From 4ed12e16f882008388c007c6e86be3ce038d8751 Mon Sep 17 00:00:00 2001 From: Andrew Geissler Date: Fri, 5 Jun 2020 18:00:41 -0500 Subject: poky: subtree update:a35bf0e5d3..b66b9f7548 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit backport: meson 0.54.2: backport upstream patch for boost libs Adrian Bunk (1): libubootenv: Remove the DEPENDS on mtd-utils Alex Kiernan (2): openssh: Upgrade 8.2p1 -> 8.3p1 systemd: upgrade v245.5 -> v245.6 Alexander Kanavin (68): btrfs-tools: upgrade 5.4.1 -> 5.6.1 build-compare: upgrade to latest revision ccache: upgrade 3.7.7 -> 3.7.9 createrepo-c: upgrade 0.15.7 -> 0.15.10 dpkg: upgrade 1.19.7 -> 1.20.0 librepo: upgrade 1.11.2 -> 1.11.3 python3-numpy: upgrade 1.18.3 -> 1.18.4 python3-cython: upgrade 0.29.16 -> 0.29.19 python3-gitdb: upgrade 4.0.4 -> 4.0.5 python3-mako: upgrade 1.1.1 -> 1.1.3 python3-pygments: upgrade 2.5.2 -> 2.6.1 python3-smmap: upgrade 2.0.5 -> 3.0.4 python3-subunit: upgrade 1.3.0 -> 1.4.0 python3-testtools: upgrade 2.3.0 -> 2.4.0 python3: upgrade 3.8.2 -> 3.8.3 strace: upgrade 5.5 -> 5.6 vala: upgrade 0.46.6 -> 0.48.6 cups: upgrade 2.3.1 -> 2.3.3 gawk: upgrade 5.0.1 -> 5.1.0 libsolv: upgrade 0.7.10 -> 0.7.14 man-pages: upgrade 5.05 -> 5.06 msmtp: upgrade 1.8.8 -> 1.8.10 stress-ng: upgrade 0.11.01 -> 0.11.12 stress-ng: mark as incompatible with musl sudo: upgrade 1.8.31 -> 1.9.0 adwaita-icon-theme: upgrade 3.34.3 -> 3.36.1 gtk+3: upgrade 3.24.14 -> 3.24.20 cogl-1.0: upgrade 1.22.4 -> 1.22.6 mesa: upgrade 20.0.2 -> 20.0.7 mesa: merge the .bb content into .inc piglit: upgrade to latest revision waffle: upgrade 1.6.0 -> 1.6.1 pixman: upgrade 0.38.4 -> 0.40.0 kmod: upgrade 26 -> 27 powertop: upgrade 2.10 -> 2.12 alsa-plugins: upgrade 1.2.1 -> 1.2.2 alsa-tools: upgrade 1.1.7 -> 1.2.2 alsa-utils: split the content into .inc alsa-topology/ucm-conf: update to 1.2.2 x264: upgrade to latest revision puzzles: upgrade to latest revision libcap: upgrade 2.33 -> 2.34 libical: upgrade 3.0.7 -> 3.0.8 libunwind: upgrade 1.3.1 -> 1.4.0 rng-tools: upgrade 6.9 -> 6.10 babeltrace: correct the git SRC_URI libexif: update to 0.6.22 ppp: update 2.4.7 -> 2.4.8 gettext: update 0.20.1 -> 0.20.2 ptest-runner: fix upstream version check automake: 1.16.1 -> 1.16.2 bison: 3.5.4 -> 3.6.2 cmake: update 3.16.5 -> 3.17.3 gnu-config: update to latest revision jquery: update to 3.5.1 json-c: update 0.13.1 - > 0.14 libmodulemd: update 2.9.2 -> 2.9.4 meson: upgrade 0.53.2 -> 0.54.2 shared-mime-info: fix upstream version check mpg123: fix upstream version check ethtool: upgrade 5.4 -> 5.6 libcpre2: update 10.34 -> 10.35 help2man-native: update to 1.47.15 apt: update to 1.8.2.1 asciidoc: bump PV to 8.6.10 pulseaudio: exclude pre-releases from version checks xinetd: switch to a maintained opensuse fork lz4: disable static library Andreas Müller (1): vte: Pack ${libexecdir}/vte-urlencode-cwd to vte-prompt Anuj Mittal (1): linux-yocto: bump genericx86 kernel version to v5.4.40 Bruce Ashfield (5): linux-yocto/5.4: update to v5.4.42 linux-yocto-rt/5.4: update to rt24 linux-yocto/5.4: temporarily revert IKHEADERS in standard kernels linux-yocto: gather reproducibility configs into a fragment linux-yocto/5.4: update to v5.4.43 Christian Eggers (2): librsvg: Extend for nativesdk tiff: Extend for nativesdk Hongxu Jia (1): rpm: fix rpm -Kv xxx.rpm failed if signature header is larger than 64KB Jacob Kroon (1): bitbake: doc: More explanation to tasks that recursively depend on themselves Jan Luebbe (1): classes/buildhistory: capture package config Jens Rehsack (2): initscripts/init-system-helpers: fix mountnfs.sh dependency init-system-helpers: avoid superfluous update-rc.d Joshua Watt (2): layer.conf: Bump OE-Core layer version wic: Add --offset argument for partitions Junling Zheng (3): buildstats.bbclass: Remove useless variables buildstats.bbclass: Do not recalculate build start time security_flags: Remove stack protector flag from LDFLAGS Kai Kang (1): bitbake: bitbake-user-manual-metadata.xml: fix a minor error Khem Raj (4): make-mod-scripts: Fix a rare build race condition go-1.14: Update to 1.14.3 minor release armv8/tunes: Set TUNE_PKGARCH_64 based on ARMPKGARCH ltp: Disable sigwaitinfo tests relying on undefined behavior Konrad Weihmann (8): qemurunner: fix ip fallback detection sysfsutils: rem leftover settings for libsysfs-dev debianutils: whitespace fixes libjpeg-turbo: whitespace fixes cairo: remove trailing whitespace gtk-doc: remove trailing whitespace libxt: fix whitespaces cogl: point to correct HOMEPAGE Lee Chee Yang (4): re2c: fix CVE-2020-11958 bind: fix CVE-2020-8616/7 glib-2.0: 2.64.2 -> 2.64.3 glib-networking: 2.64.2 -> 2.64.3 Marco Felsch (1): util-linux: alternatify rtcwake Mark Hatle (1): sstate.bbclass: When siginfo or sig files are missing, stop fetcher errors Martin Jansa (6): devtool: use -f and don't use --exclude-standard when adding files to workspace meta-selftest: add test of .gitignore in tarball lib/oe/patch: prevent applying patches without any subject lib/oe/patch: GitApplyTree: save 1 echo in commit-msg hook Revert "lib/oe/patch: fix handling of patches with no header" meta-selftest: add test for .patch file with long filename and without subject Mauro Queirós (3): bitbake: git.py: skip smudging if lfs=0 is set bitbake: git.py: LFS bitbake note should not be printed if need_lfs is not set. bitbake: git.py: Use the correct branch to check if the repository has LFS objects. Ming Liu (2): u-boot.inc: fix some inconsistent coding style u-boot: introduce UBOOT_INITIAL_ENV Paul Barker (5): archiver: Fix test case for srpm archiver mode oe-selftest: Allow overriding the build directory used for tests oe-selftest: Support verbose log output oe-selftest: Recursively patch test case paths bitbake: fetch2: Add the ability to list expanded URL data Peter Kjellerstedt (1): cairo: Do not try to remove nonexistent directories Pierre-Jean Texier (1): diffoscope: upgrade 144 -> 146 Ralph Siemsen (1): cve-check: include epoch in product version output Richard Purdie (7): lib/classextend: Drop unneeded comment poky.ent: Update UBUNTU_HOST_PACKAGES_ESSENTIAL to match recent changes maintainers: Update Ross' email address logrotate: Drop obsolete setting/comment oeqa/targetcontrol: Rework exception handling to avoid warnings patchelf: Add patch to address corrupt shared library issue poky.ent: Update XXX_HOST_PACKAGES_ESSENTIAL to include mesa for other distros Robert P. J. Day (1): bitbake.conf: Remove unused DEPLOY_DIR_TOOLS variable Tim Orling (1): bitbake: toaster-requirements.txt: require Django 2.2 Trevor Gamblin (1): qemuarm: check serial consoles vs /proc/consoles Wang Mingyu (13): less: upgrade 551 -> 562 liburcu: upgrade 0.12.0 -> 0.12.1 alsa-lib: upgrade 1.2.1.2 -> 1.2.2 alsa-utils: upgrade 1.2.1 -> 1.2.2 python3-six: upgrade 1.14.0 -> 1.15.0 util-linux: upgrade 2.35.1 -> 2.35.2 xf86-input-libinput: upgrade 0.29.0 -> 0.30.0 ca-certificates: upgrade 20190110 -> 20200601 dbus: upgrade 1.12.16 -> 1.12.18 libyaml: upgrade 0.2.4 -> 0.2.5 sqlite: upgrade 3.31.1 -> 3.32.1 valgrind: upgrade 3.15.0 -> 3.16.0 dbus-test: upgrade 1.12.16 -> 1.12.18 akuster (2): poky.ent: Update OPENSUSE_HOST_PACKAGES_ESSENTIAL to include mesa-dri-devel yocto-docs: Add SPDX headers in scripts and Makefile hongxu (1): core-image-minimal-initramfs: keep restriction with initramfs-module-install zangrc (3): python3-pycairo:upgrade 1.19.0 -> 1.19.1 python3-pygobject:upgrade 3.34.0 -> 3.36.1 python3-setuptools:upgrade 45.2.0 -> 47.1.1 zhengruoqin (2): gdb: upgrade 9.1 -> 9.2 libyaml: upgrade 0.2.2 -> 0.2.4 Signed-off-by: Andrew Geissler Signed-off-by: Patrick Williams Change-Id: I60e616be0c30904f8cfc947089ed2e4f5e84bc60 --- poky/meta/recipes-devtools/meson/meson.inc | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'poky/meta/recipes-devtools/meson/meson.inc') diff --git a/poky/meta/recipes-devtools/meson/meson.inc b/poky/meta/recipes-devtools/meson/meson.inc index 0a58c971a..12bc08648 100644 --- a/poky/meta/recipes-devtools/meson/meson.inc +++ b/poky/meta/recipes-devtools/meson/meson.inc @@ -9,16 +9,14 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=3b83ef96387f14655fc854ddc3c6bd57" SRC_URI = "https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${PV}.tar.gz \ file://0001-gtkdoc-fix-issues-that-arise-when-cross-compiling.patch \ - file://0002-gobject-introspection-determine-g-ir-scanner-and-g-i.patch \ file://0003-native_bindir.patch \ file://0001-python-module-do-not-manipulate-the-environment-when.patch \ file://disable-rpath-handling.patch \ file://cross-prop-default.patch \ - file://0001-mesonbuild-environment.py-check-environment-for-vari.patch \ file://0001-modules-python.py-do-not-substitute-python-s-install.patch \ + file://0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch \ " -SRC_URI[sha256sum] = "3e8f830f33184397c2eb0b651ec502adb63decb28978bdc84b3558d71284c21f" -SRC_URI[md5sum] = "80303535995fcae72bdb887df102b421" +SRC_URI[sha256sum] = "a7716eeae8f8dff002e4147642589ab6496ff839e4376a5aed761f83c1fa0455" SRC_URI_append_class-native = " \ file://0001-Make-CPU-family-warnings-fatal.patch \ -- cgit v1.2.3 From 32654a436bd7904c10204251299a50cac55df0fb Mon Sep 17 00:00:00 2001 From: Andrew Geissler Date: Thu, 11 Jun 2020 15:44:54 -0500 Subject: meson: backport library ordering fix meson had a bug where they started looking for static boost libraries first vs. the default behavior of looking at shared libraries first. This caused issues because some projects assume the shared libraries first which automatically add in other shared library dependencies. Static libraries do not have the default behavior so projects that use boost start failing to compile with undefined references to other boost libraries. The patch was initially put into the meta-phosphor layer as a bbappend but it really should be a backport from oe-core upstream. This commit backports the upstream fix and removes the meta-phosphor patch. Signed-off-by: Andrew Geissler Signed-off-by: Andrew Geissler Change-Id: Id50eb02fa8ad519903498ace2ef825d55fa1b033 --- ...ways-sort-shared-before-static-fixes-7171.patch | 32 -------------------- .../recipes-devtools/meson/meson_0.54.2.bbappend | 3 -- poky/meta/recipes-devtools/meson/meson.inc | 1 + ...ways-sort-shared-before-static-fixes-7171.patch | 35 ++++++++++++++++++++++ 4 files changed, 36 insertions(+), 35 deletions(-) delete mode 100644 meta-phosphor/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch delete mode 100644 meta-phosphor/recipes-devtools/meson/meson_0.54.2.bbappend create mode 100644 poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch (limited to 'poky/meta/recipes-devtools/meson/meson.inc') diff --git a/meta-phosphor/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch b/meta-phosphor/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch deleted file mode 100644 index 08cb103e5..000000000 --- a/meta-phosphor/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 5862ad6965c60caa861dfdcd29e499c34c4d00da Mon Sep 17 00:00:00 2001 -From: Daniel Mensinger -Date: Thu, 21 May 2020 13:35:27 +0200 -Subject: [PATCH] boost: Always sort shared before static (fixes #7171) - ---- - mesonbuild/dependencies/boost.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/mesonbuild/dependencies/boost.py b/mesonbuild/dependencies/boost.py -index 6e85c534..38497041 100644 ---- a/mesonbuild/dependencies/boost.py -+++ b/mesonbuild/dependencies/boost.py -@@ -189,13 +189,13 @@ class BoostLibraryFile(): - def __lt__(self, other: T.Any) -> bool: - if isinstance(other, BoostLibraryFile): - return ( -- self.mod_name, self.version_lib, self.arch, self.static, -+ self.mod_name, self.static, self.version_lib, self.arch, - not self.mt, not self.runtime_static, - not self.debug, self.runtime_debug, self.python_debug, - self.stlport, self.deprecated_iostreams, - self.name, - ) < ( -- other.mod_name, other.version_lib, other.arch, other.static, -+ other.mod_name, other.static, other.version_lib, other.arch, - not other.mt, not other.runtime_static, - not other.debug, other.runtime_debug, other.python_debug, - other.stlport, other.deprecated_iostreams, --- -2.26.2 - diff --git a/meta-phosphor/recipes-devtools/meson/meson_0.54.2.bbappend b/meta-phosphor/recipes-devtools/meson/meson_0.54.2.bbappend deleted file mode 100644 index 4880c90a8..000000000 --- a/meta-phosphor/recipes-devtools/meson/meson_0.54.2.bbappend +++ /dev/null @@ -1,3 +0,0 @@ -FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" - -SRC_URI += "file://0001-boost-Always-sort-shared-before-static-fixes-7171.patch" diff --git a/poky/meta/recipes-devtools/meson/meson.inc b/poky/meta/recipes-devtools/meson/meson.inc index 12bc08648..a0b54f57d 100644 --- a/poky/meta/recipes-devtools/meson/meson.inc +++ b/poky/meta/recipes-devtools/meson/meson.inc @@ -15,6 +15,7 @@ SRC_URI = "https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${P file://cross-prop-default.patch \ file://0001-modules-python.py-do-not-substitute-python-s-install.patch \ file://0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch \ + file://0001-boost-Always-sort-shared-before-static-fixes-7171.patch \ " SRC_URI[sha256sum] = "a7716eeae8f8dff002e4147642589ab6496ff839e4376a5aed761f83c1fa0455" diff --git a/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch b/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch new file mode 100644 index 000000000..217218180 --- /dev/null +++ b/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch @@ -0,0 +1,35 @@ +From 5862ad6965c60caa861dfdcd29e499c34c4d00da Mon Sep 17 00:00:00 2001 +From: Daniel Mensinger +Date: Thu, 21 May 2020 13:35:27 +0200 +Subject: [PATCH] boost: Always sort shared before static (fixes #7171) + +Upstream-Status: Backport [https://github.com/mesonbuild/meson/commit/5862ad6965c60caa861dfdcd29e499c34c4d00da] + +Signed-off-by: Andrew Geissler +--- + mesonbuild/dependencies/boost.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/mesonbuild/dependencies/boost.py b/mesonbuild/dependencies/boost.py +index 6e85c534..38497041 100644 +--- a/mesonbuild/dependencies/boost.py ++++ b/mesonbuild/dependencies/boost.py +@@ -189,13 +189,13 @@ class BoostLibraryFile(): + def __lt__(self, other: T.Any) -> bool: + if isinstance(other, BoostLibraryFile): + return ( +- self.mod_name, self.version_lib, self.arch, self.static, ++ self.mod_name, self.static, self.version_lib, self.arch, + not self.mt, not self.runtime_static, + not self.debug, self.runtime_debug, self.python_debug, + self.stlport, self.deprecated_iostreams, + self.name, + ) < ( +- other.mod_name, other.version_lib, other.arch, other.static, ++ other.mod_name, other.static, other.version_lib, other.arch, + not other.mt, not other.runtime_static, + not other.debug, other.runtime_debug, other.python_debug, + other.stlport, other.deprecated_iostreams, +-- +2.26.2 + -- cgit v1.2.3 From 475cb72d2bb2f40ca5e9f4edba6d49d6c7afbd3e Mon Sep 17 00:00:00 2001 From: Andrew Geissler Date: Fri, 10 Jul 2020 16:00:51 -0500 Subject: poky: subtree update:5951cbcabe..968fcf4989 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Alejandro Hernandez (3): baremetal-helloworld: Use do_image_complete instead of do_deploy baremetal-image.bbclass: Create a class for baremetal applications or an RTOS baremetal-helloworld: Use baremetal-image class to deploy the application Alejandro del Castillo (2): opkg-utils: upgrade to 0.4.3 opkg: upgrade to version 0.4.3 Alexander Kanavin (30): dnf: upgrade 4.2.21 -> 4.2.23 meson: upgrade 0.54.2 -> 0.54.3 libdnf: update 0.47.0 -> 0.48.0 ffmpeg: disable altivec on ppc by default dropbear: update 2019.78 -> 2020.79 elfutils: upgrade 0.179 -> 0.180 gnu-config: update to latest revision libgpg-error: update 1.37 -> 1.38 perl: update 5.30.2 -> 5.32.0 gst-examples: upstream releases are even numbered bison: upgrade 3.6.3 -> 3.6.4 python3-cython: upgrade 0.29.19 -> 0.29.20 stress-ng: upgrade 0.11.12 -> 0.11.14 piglit: upgrade to latest revision linux-firmware: upgrade 20200519 -> 20200619 systemtap: upgrade 4.2 -> 4.3 alsa-lib: upgrade 1.2.2 -> 1.2.3.1 alsa-topology-conf: upgrade 1.2.2 -> 1.2.3 alsa-ucm-conf: upgrade 1.2.2 -> 1.2.3 alsa-utils: upgrade 1.2.2 -> 1.2.3 puzzles: upgrade to latest revision diffoscope: upgrade 147 -> 148 libcheck: upgrade 0.14.0 -> 0.15.0 rsync: update 3.1.3 -> 3.2.1 sudo: upgrade 1.9.0 -> 1.9.1 python3-numpy: update 1.18.5 -> 1.19.0 mesa: update 20.0.7 -> 20.1.2 go-binary-native: fix upstream version check Revert "python3-setuptools: patch entrypoints for faster initialization" python3-setuptools: upgrade 47.1.1 -> 47.3.1 Alistair Francis (1): opensbi: Update to OpenSBI v0.8 release Andreas Müller (3): nfs-utils: upgrade 2.4.3 -> 2.5.1 ccache: merge ccache.inc into recipe ccache: upgrade 3.7.9 -> 3.7.10 Andrej Valek (2): busybox: 1.31.1 -> 1.32.0 dropbear: update to 2020.80 Andrey Zhizhikin (1): kernel/yocto: fix search for defconfig from src_uri Armin Kuster (1): wpa-supplicant: Security fix CVE-2020-12695 Bjarne Michelsen (1): devtool: default to empty string, if LIC_FILES_CHKSUM is not available Bruce Ashfield (10): kernel/yocto: ensure that defconfigs are processed first linux-yocto/5.4: update to v5.4.45 linux-yocto-rt/5.4: update to rt25 linux-yocto/5.4: update to v5.4.46 linux-yocto/5.4: update to v5.4.47 linux-yocto/5.4: update to v5.4.49 and -rt28 yocto-bsps: bump reference boards to v5.4.49 linux-yocto/5.4: update to v5.4.50 linux-yocto-dev: bump to 5.8-rc lttng-modules: bump devupstream to v2.12.1+ Changqing Li (5): xinit: add rxvt-unicode in RDEPENDS modutils-initscripts: update postinst initscripts: update postinst gtk-icon-cache.bbclass: add runtime dependency logrotate.py: fix testimage occasionally failure Chen Qi (2): oescripts.py: fix typo oescripts: ignore whitespaces when comparing lines Chris Laplante (2): bitbake: contrib/vim: synchronize from kergoth/vim-bitbake rev 4225ee8b4818d7e4696520567216a3a031c26f7d bitbake: ui/teamcity: don't use removed logging classes Christian Eggers (1): libnl: Extend for native/nativesdk Damian Wrobel (1): rootfs: do not let ldconfig to create symlinks Daniel Klauer (2): uboot-sign: Refactor do_deploy prefunc to do_deploy_prepend deploy.bbclass: Clean DEPLOYDIR before do_deploy David Khouya (2): bitbake: lib/ui/taskexp: Validate gi import bitbake: lib/ui/taskexp: Fix missing Gtk import Hannu Lounento (1): openssl: move ${libdir}/[...]/openssl.cnf to ${PN}-conf Hongxu Jia (1): iso-codes: switch upstream branch master -> main Jason Wessel (1): runqemu: If using a vmtype image do not add the -no-reboot flag Joe Slater (1): jquery: use ${S} Joshua Watt (4): bitbake: hashserv: Chunkify large messages bitbake: siggen: Fix error when hash equivalence has an exception classes/archiver: run do_unpack_and_patch after do_preconfigure classes/archive: do_configure should not depend on do_ar_patched Khem Raj (2): musl: Update to tip of master rxvt-unicode: Disable wtmp on musl Konrad Weihmann (2): systemd: remove kernel-install from base pkg bitbake.conf: fix whitespace issues Lee Chee Yang (3): json-c: fix CVE-2020-12762 qemu: fix CVE-2020-10761 oeqa/core/loader: refine regex to find module Lili Li (1): kernel.bbclass: Fix Module.symvers support Matt Madison (1): kernel.bbclass: add gzip-native to do_deploy dependencies Max Krummenacher (2): cogl-1.0: : don't require eglmesaext.h cogl-1.0: cope with missing x11 headers Mingli Yu (2): python3-libarchive-c: add the missing rdepends python3: add ldconfig rdepends for python3-ctypes Nicolas Dechesne (1): checklayer: parse LAYERDEPENDS with bb.utils.explode_dep_versions2() Pierre-Jean Texier (3): libubootenv: bump to revision 86bd30a curl: upgrade 7.71.0 -> 7.71.1 diffoscope: upgrade 148 -> 150 Rahul Kumar (1): bzip2: Add test suite for bzip2 Rasmus Villemoes (1): coreutils: don't split stdbuf to own package with single-binary Richard Purdie (13): pseudo: Switch to oe-core branch in git repo pseudo: merge in fixes for setfacl issue oeqa/selftest: Clean up separate builddir in success case when non-threaded populate_sdk_ext: Fix to use python3, not python bitbake: taskdata: Improve handling of regex in ASSUME_PROVIDED bitbake: runqueue: Avoid unpickle errors in rare cases bitbake: msg: Avoid issues where paths have relative components oeqa/selftest: recipetool/devtool: Avoid load_plugin test race oeqa/targetcontrol: Attempt to fix log closure warning message rootfs-postcommands: Improve/fix rootfs_check_host_user_contaminated spdx: Remove the class as its obsolete adwaita-icon-theme: Add missing license files to LIC_FILES_CHKSUM bitbake: server/process: Increase timeout for commands Ross Burton (3): ovmf: build natively everywhere common-licenses: fix filename of BSD-2-Clause-Patent gtk+3: fix reproducible build failure Timon Ulrich (2): kernel.bbclass: add lz4 dependency and fix the call to lz4 kernel.bbclass: make dependency on lzop-native conditional Vacek, Patrick (1): oeqa/core/loader: fix regex to include numbers Wang Mingyu (1): gtk+3: upgrade 3.24.20 -> 3.24.21 Yanfei Xu (1): classes/kernel: Use a copy of image for kernel*.rpm if fs doesn't support symlinks akuster (5): libuv: update to the last version in meta-oe bitbake: test/fetch: change to better svn source overview-manual: add SPDX license header mega-manual: Add SPDX license headers ref-manual: Add SPDX license headers hongxu (2): qemu: switches from libcap to libcap-ng for PACAKGECONFIG virtfs cpio: add nativesdk support zangrc (1): libjpeg-turbo:upgrade 2.0.4 -> 2.0.5 Signed-off-by: Andrew Geissler Change-Id: I41e066e5957aa74c9a24e86a6c214bcf96e9c46b --- poky/bitbake/contrib/vim/LICENSE.txt | 18 + poky/bitbake/contrib/vim/ftdetect/bitbake.vim | 4 +- poky/bitbake/contrib/vim/ftplugin/bitbake.vim | 15 +- poky/bitbake/contrib/vim/plugin/newbb.vim | 14 +- poky/bitbake/contrib/vim/plugin/newbbappend.vim | 46 + poky/bitbake/contrib/vim/syntax/bitbake.vim | 16 +- poky/bitbake/lib/bb/msg.py | 2 +- poky/bitbake/lib/bb/runqueue.py | 9 +- poky/bitbake/lib/bb/server/process.py | 4 +- poky/bitbake/lib/bb/siggen.py | 1 + poky/bitbake/lib/bb/taskdata.py | 9 +- poky/bitbake/lib/bb/tests/fetch.py | 2 +- poky/bitbake/lib/bb/ui/taskexp.py | 13 +- poky/bitbake/lib/bb/ui/teamcity.py | 2 - poky/bitbake/lib/hashserv/__init__.py | 22 + poky/bitbake/lib/hashserv/client.py | 43 +- poky/bitbake/lib/hashserv/server.py | 105 +- poky/bitbake/lib/hashserv/tests.py | 23 + .../mega-manual/mega-manual-customization.xsl | 1 + poky/documentation/mega-manual/mega-manual.xml | 2 +- poky/documentation/mega-manual/mega-style.css | 2 + .../overview-manual/overview-manual-concepts.xml | 1 + .../overview-manual-customization.xsl | 2 + .../overview-manual-development-environment.xml | 1 + .../overview-manual/overview-manual-intro.xml | 1 + .../overview-manual/overview-manual-style.css | 2 + .../overview-manual/overview-manual-yp-intro.xml | 1 + .../overview-manual/overview-manual.xml | 1 + poky/documentation/ref-manual/faq.xml | 1 + poky/documentation/ref-manual/migration.xml | 1 + poky/documentation/ref-manual/ref-classes.xml | 1 + .../ref-manual/ref-devtool-reference.xml | 1 + poky/documentation/ref-manual/ref-features.xml | 1 + poky/documentation/ref-manual/ref-images.xml | 1 + poky/documentation/ref-manual/ref-kickstart.xml | 1 + .../ref-manual/ref-manual-customization.xsl | 2 + poky/documentation/ref-manual/ref-qa-checks.xml | 1 + .../ref-manual/ref-release-process.xml | 1 + poky/documentation/ref-manual/ref-structure.xml | 1 + poky/documentation/ref-manual/ref-style.css | 3 + .../ref-manual/ref-system-requirements.xml | 1 + poky/documentation/ref-manual/ref-tasks.xml | 1 + poky/documentation/ref-manual/ref-terms.xml | 1 + poky/documentation/ref-manual/ref-variables.xml | 1 + poky/documentation/ref-manual/ref-varlocality.xml | 1 + poky/documentation/ref-manual/resources.xml | 1 + .../baremetal-examples/baremetal-helloworld_git.bb | 94 +- .../recipes-kernel/linux/linux-yocto_5.4.bbappend | 16 +- poky/meta/classes/archiver.bbclass | 4 +- poky/meta/classes/baremetal-image.bbclass | 99 + poky/meta/classes/deploy.bbclass | 1 + poky/meta/classes/gtk-icon-cache.bbclass | 19 +- poky/meta/classes/kernel-yocto.bbclass | 34 +- poky/meta/classes/kernel.bbclass | 34 +- poky/meta/classes/populate_sdk_ext.bbclass | 2 +- poky/meta/classes/rootfs-postcommands.bbclass | 12 +- poky/meta/classes/spdx.bbclass | 360 --- poky/meta/classes/uboot-sign.bbclass | 11 +- poky/meta/conf/bitbake.conf | 4 +- poky/meta/conf/machine/include/riscv/qemuriscv.inc | 2 +- .../meta/files/common-licenses/BSD-2-Clause-Patent | 47 + .../files/common-licenses/BSD-2-Clause-Patent.txt | 47 - poky/meta/lib/oe/rootfs.py | 4 +- poky/meta/lib/oeqa/core/loader.py | 2 +- poky/meta/lib/oeqa/core/utils/concurrencytest.py | 23 +- poky/meta/lib/oeqa/runtime/cases/logrotate.py | 4 - poky/meta/lib/oeqa/selftest/cases/devtool.py | 6 +- poky/meta/lib/oeqa/selftest/cases/oescripts.py | 4 +- poky/meta/lib/oeqa/selftest/cases/recipetool.py | 6 +- poky/meta/lib/oeqa/selftest/context.py | 36 +- poky/meta/lib/oeqa/targetcontrol.py | 1 + poky/meta/recipes-bsp/opensbi/opensbi_0.6.bb | 48 - poky/meta/recipes-bsp/opensbi/opensbi_0.8.bb | 47 + poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb | 2 +- .../recipes-connectivity/libuv/libuv_1.34.2.bb | 19 - .../recipes-connectivity/libuv/libuv_1.38.0.bb | 19 + ...001-cacheio-use-intmax_t-for-formatted-IO.patch | 38 - .../nfs-utils/nfs-utils_2.4.3.bb | 147 -- .../nfs-utils/nfs-utils_2.5.1.bb | 144 ++ .../recipes-connectivity/openssl/openssl_1.1.1g.bb | 4 +- ...-not-allow-event-subscriptions-with-URLs-.patch | 151 ++ ...x-event-message-generation-using-a-long-U.patch | 62 + ...ndle-HTTP-initiation-failures-for-events-.patch | 50 + .../wpa-supplicant/wpa-supplicant_2.9.bb | 5 +- .../recipes-core/busybox/busybox-inittab_1.31.0.bb | 35 - .../recipes-core/busybox/busybox-inittab_1.32.0.bb | 35 + .../busybox/0001-Remove-stime-function-calls.patch | 85 - ...all-wrappers-around-clock_gettime-closes-.patch | 120 - ...-date-Use-64-prefix-syscall-if-we-have-to.patch | 53 - ...-time-Use-64-prefix-syscall-if-we-have-to.patch | 43 - ...runsv-Use-64-prefix-syscall-if-we-have-to.patch | 46 - .../busybox/busybox-udhcpc-no_deconfig.patch | 26 +- poky/meta/recipes-core/busybox/busybox_1.31.1.bb | 55 - poky/meta/recipes-core/busybox/busybox_1.32.0.bb | 50 + poky/meta/recipes-core/coreutils/coreutils_8.32.bb | 15 +- poky/meta/recipes-core/dropbear/dropbear.inc | 2 +- .../dropbear/dropbear-disable-weak-ciphers.patch | 39 +- .../meta/recipes-core/dropbear/dropbear_2019.78.bb | 4 - .../meta/recipes-core/dropbear/dropbear_2020.80.bb | 3 + .../recipes-core/initscripts/initscripts_1.0.bb | 2 +- poky/meta/recipes-core/meta/signing-keys.bb | 2 - ...dynamic-linker-a-relative-symlink-to-libc.patch | 11 +- ...slibdir-and-libdir-as-default-pathes-to-l.patch | 15 +- poky/meta/recipes-core/musl/musl_git.bb | 2 +- poky/meta/recipes-core/ovmf/ovmf_git.bb | 3 +- poky/meta/recipes-core/systemd/systemd_245.6.bb | 1 - poky/meta/recipes-devtools/bison/bison_3.6.3.bb | 44 - poky/meta/recipes-devtools/bison/bison_3.6.4.bb | 44 + poky/meta/recipes-devtools/ccache/ccache.inc | 28 - poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb | 32 + poky/meta/recipes-devtools/ccache/ccache_3.7.9.bb | 7 - poky/meta/recipes-devtools/dnf/dnf_4.2.21.bb | 90 - poky/meta/recipes-devtools/dnf/dnf_4.2.23.bb | 90 + .../recipes-devtools/elfutils/elfutils_0.179.bb | 157 -- .../recipes-devtools/elfutils/elfutils_0.180.bb | 144 ++ .../elfutils/files/0001-musl-obstack-fts.patch | 11 +- ...ile.am-compile-test_nlist-with-standard-C.patch | 27 + .../elfutils/files/0002-musl-libs.patch | 17 +- .../elfutils/files/0003-musl-utils.patch | 7 +- .../elfutils/files/0004-Fix-error-on-musl.patch | 7 +- .../0015-config-eu.am-do-not-use-Werror.patch | 36 + ...ferences-between-mips-machine-identifiers.patch | 34 - ...1-fix-compile-failure-with-debian-patches.patch | 48 - ...-support-for-mips64-abis-in-mips_retval.c.patch | 171 -- .../0003-Add-mips-n64-relocation-format-hack.patch | 229 -- .../elfutils/files/debian/arm_backend.diff | 624 ----- .../elfutils/files/debian/disable_werror.patch | 20 - .../elfutils/files/debian/hppa_backend.diff | 828 ------- .../elfutils/files/debian/hurd_path.patch | 17 - .../elfutils/files/debian/ignore_strmerge.diff | 14 - .../elfutils/files/debian/kfreebsd_path.patch | 20 - .../elfutils/files/debian/mips_backend.diff | 724 ------ .../elfutils/files/debian/mips_cfi.patch | 129 - .../elfutils/files/debian/mips_readelf_w.patch | 31 - .../files/debian/testsuite-ignore-elflint.diff | 52 - .../recipes-devtools/gnu-config/gnu-config_git.bb | 4 +- .../recipes-devtools/go/go-binary-native_1.14.4.bb | 3 + poky/meta/recipes-devtools/jquery/jquery_3.5.1.bb | 16 +- .../json-c/json-c/CVE-2020-12762.patch | 160 ++ poky/meta/recipes-devtools/json-c/json-c_0.14.bb | 5 +- .../libdnf/libdnf/0001-Add-WITH_TESTS-option.patch | 10 +- ...quotes-around-string-literals-used-in-SQL.patch | 36 - poky/meta/recipes-devtools/libdnf/libdnf_0.47.0.bb | 34 - poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb | 33 + poky/meta/recipes-devtools/meson/meson.inc | 3 +- .../0001-Make-CPU-family-warnings-fatal.patch | 2 +- ...ways-sort-shared-before-static-fixes-7171.patch | 35 - ...02-Support-building-allarch-recipes-again.patch | 2 +- poky/meta/recipes-devtools/meson/meson_0.54.2.bb | 4 - poky/meta/recipes-devtools/meson/meson_0.54.3.bb | 4 + .../meson/nativesdk-meson_0.54.2.bb | 65 - .../meson/nativesdk-meson_0.54.3.bb | 65 + .../opkg-utils/fix-reproducibility.patch | 32 - .../opkg-utils/opkg-utils_0.4.2.bb | 66 - .../opkg-utils/opkg-utils_0.4.3.bb | 65 + poky/meta/recipes-devtools/opkg/opkg_0.4.2.bb | 74 - poky/meta/recipes-devtools/opkg/opkg_0.4.3.bb | 74 + ...CH-perl-134117-Close-DATA-in-loc_tools.pl.patch | 30 - ...environment-variable-to-suppress-comments.patch | 30 - .../recipes-devtools/perl/files/perl-rdepends.txt | 2493 +------------------- poky/meta/recipes-devtools/perl/perl_5.30.2.bb | 387 --- poky/meta/recipes-devtools/perl/perl_5.32.0.bb | 383 +++ .../pseudo/files/0001-Add-statx.patch | 106 - .../0001-maketables-wrappers-use-Python-3.patch | 34 - ...-DB-fixup-remove-files-that-do-not-exist-.patch | 49 - .../files/0001-pseudo_ipc.h-Fix-enum-typedef.patch | 31 - .../0001-realpath.c-Remove-trailing-slashes.patch | 57 - ...0006-xattr-adjust-for-attr-2.4.48-release.patch | 48 - .../pseudo/files/moreretries.patch | 19 - .../recipes-devtools/pseudo/files/seccomp.patch | 137 -- .../pseudo/files/toomanyfiles.patch | 71 - .../pseudo/files/xattr_version.patch | 54 - poky/meta/recipes-devtools/pseudo/pseudo_git.bb | 14 +- ...h-usr-and-so-on-for-libraries-by-default-.patch | 11 +- ...01-convert-shebang-from-python-to-python3.patch | 555 ----- .../recipes-devtools/python-numpy/python-numpy.inc | 7 +- .../python-numpy/python3-numpy_1.18.5.bb | 3 - .../python-numpy/python3-numpy_1.19.0.bb | 3 + ...ionally-do-not-fetch-code-by-easy_install.patch | 6 +- .../meta/recipes-devtools/python/python-cython.inc | 2 +- .../recipes-devtools/python/python-setuptools.inc | 7 +- .../python/python3-cython_0.29.19.bb | 18 - .../python/python3-cython_0.29.20.bb | 18 + .../python/python3-libarchive-c_2.9.bb | 6 +- ...ficient-usr-bin-wrappers-signoff-included.patch | 62 - .../python/python3-setuptools_47.1.1.bb | 6 - .../python/python3-setuptools_47.3.1.bb | 6 + poky/meta/recipes-devtools/python/python3_3.8.3.bb | 1 + poky/meta/recipes-devtools/qemu/qemu.inc | 3 +- .../qemu/qemu/CVE-2020-10761.patch | 151 ++ .../rsync/files/CVE-2016-9840.patch | 75 - .../rsync/files/CVE-2016-9841.patch | 228 -- .../rsync/files/CVE-2016-9842.patch | 33 - .../rsync/files/CVE-2016-9843.patch | 53 - .../rsync/files/makefile-no-rebuild.patch | 25 +- poky/meta/recipes-devtools/rsync/rsync_3.1.3.bb | 57 - poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb | 58 + poky/meta/recipes-extended/bzip2/bzip2/Makefile.am | 1 + poky/meta/recipes-extended/bzip2/bzip2_1.0.8.bb | 31 +- poky/meta/recipes-extended/cpio/cpio_2.13.bb | 6 +- .../stress-ng/stress-ng/no_daddr_t.patch | 16 +- .../stress-ng/stress-ng_0.11.12.bb | 26 - .../stress-ng/stress-ng_0.11.14.bb | 26 + ...1-Include-sys-types.h-for-id_t-definition.patch | 34 - poky/meta/recipes-extended/sudo/sudo_1.9.0.bb | 48 - poky/meta/recipes-extended/sudo/sudo_1.9.1.bb | 46 + .../gnome/adwaita-icon-theme_3.36.1.bb | 4 +- poky/meta/recipes-gnome/gtk+/gtk+3.inc | 8 + poky/meta/recipes-gnome/gtk+/gtk+3_3.24.20.bb | 19 - poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb | 19 + ...1-configure.ac-don-t-require-eglmesaext.h.patch | 92 + poky/meta/recipes-graphics/cogl/cogl-1.0_1.22.8.bb | 1 + .../recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb | 59 - .../recipes-graphics/jpeg/libjpeg-turbo_2.0.5.bb | 59 + .../0002-meson.build-make-TLS-ELF-optional.patch | 12 +- ...max_t-for-formatted-output-of-timespec-me.patch | 16 +- poky/meta/recipes-graphics/mesa/mesa-gl_20.0.7.bb | 15 - poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb | 15 + poky/meta/recipes-graphics/mesa/mesa.inc | 3 +- poky/meta/recipes-graphics/mesa/mesa_20.0.7.bb | 1 - poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb | 2 + poky/meta/recipes-graphics/piglit/piglit_git.bb | 2 +- poky/meta/recipes-graphics/xorg-app/xinit_1.4.1.bb | 3 + .../linux-firmware/linux-firmware_20200519.bb | 946 -------- .../linux-firmware/linux-firmware_20200619.bb | 946 ++++++++ poky/meta/recipes-kernel/linux/linux-yocto-dev.bb | 2 +- .../recipes-kernel/linux/linux-yocto-rt_5.4.bb | 6 +- .../recipes-kernel/linux/linux-yocto-tiny_5.4.bb | 8 +- poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb | 22 +- .../recipes-kernel/lttng/lttng-modules_2.12.1.bb | 6 +- .../modutils-initscripts/modutils-initscripts.bb | 2 +- .../recipes-kernel/systemtap/systemtap_git.inc | 4 +- .../meta/recipes-multimedia/alsa/alsa-lib_1.2.2.bb | 43 - .../recipes-multimedia/alsa/alsa-lib_1.2.3.1.bb | 42 + .../alsa/alsa-topology-conf_1.2.2.bb | 19 - .../alsa/alsa-topology-conf_1.2.3.bb | 19 + .../recipes-multimedia/alsa/alsa-ucm-conf_1.2.2.bb | 20 - .../recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb | 20 + .../alsa/alsa-utils-scripts_1.2.2.bb | 25 - .../alsa/alsa-utils-scripts_1.2.3.bb | 25 + poky/meta/recipes-multimedia/alsa/alsa-utils.inc | 3 +- .../recipes-multimedia/alsa/alsa-utils_1.2.2.bb | 1 - .../recipes-multimedia/alsa/alsa-utils_1.2.3.bb | 2 + poky/meta/recipes-multimedia/ffmpeg/ffmpeg_4.3.bb | 1 + .../gstreamer/gst-examples_1.16.0.bb | 1 + poky/meta/recipes-sato/puzzles/puzzles_git.bb | 2 +- .../recipes-sato/rxvt-unicode/rxvt-unicode.inc | 2 + poky/meta/recipes-support/curl/curl_7.71.0.bb | 83 - poky/meta/recipes-support/curl/curl_7.71.1.bb | 83 + .../recipes-support/diffoscope/diffoscope_147.bb | 17 - .../recipes-support/diffoscope/diffoscope_150.bb | 17 + .../recipes-support/iso-codes/iso-codes_4.5.0.bb | 2 +- .../recipes-support/libcheck/libcheck_0.14.0.bb | 29 - .../recipes-support/libcheck/libcheck_0.15.0.bb | 28 + ...cross-compiling-into-a-separate-build-dir.patch | 43 + .../0005-src-gen-lock-obj.sh-add-a-file.patch | 134 ++ .../libgpg-error/libgpg-error_1.37.bb | 72 - .../libgpg-error/libgpg-error_1.38.bb | 42 + poky/meta/recipes-support/libnl/libnl_3.5.0.bb | 2 + poky/scripts/lib/checklayer/__init__.py | 7 +- poky/scripts/lib/devtool/upgrade.py | 4 +- poky/scripts/runqemu | 4 +- 262 files changed, 4520 insertions(+), 11227 deletions(-) create mode 100644 poky/bitbake/contrib/vim/LICENSE.txt mode change 100755 => 100644 poky/bitbake/contrib/vim/plugin/newbb.vim create mode 100644 poky/bitbake/contrib/vim/plugin/newbbappend.vim create mode 100644 poky/meta/classes/baremetal-image.bbclass delete mode 100644 poky/meta/classes/spdx.bbclass create mode 100644 poky/meta/files/common-licenses/BSD-2-Clause-Patent delete mode 100644 poky/meta/files/common-licenses/BSD-2-Clause-Patent.txt delete mode 100644 poky/meta/recipes-bsp/opensbi/opensbi_0.6.bb create mode 100644 poky/meta/recipes-bsp/opensbi/opensbi_0.8.bb delete mode 100644 poky/meta/recipes-connectivity/libuv/libuv_1.34.2.bb create mode 100644 poky/meta/recipes-connectivity/libuv/libuv_1.38.0.bb delete mode 100644 poky/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-cacheio-use-intmax_t-for-formatted-IO.patch delete mode 100644 poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.3.bb create mode 100644 poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.5.1.bb create mode 100644 poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch create mode 100644 poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch create mode 100644 poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb create mode 100644 poky/meta/recipes-core/busybox/busybox-inittab_1.32.0.bb delete mode 100644 poky/meta/recipes-core/busybox/busybox/0001-Remove-stime-function-calls.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox/0001-Remove-syscall-wrappers-around-clock_gettime-closes-.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox/0001-date-Use-64-prefix-syscall-if-we-have-to.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox/0001-time-Use-64-prefix-syscall-if-we-have-to.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox/0003-runsv-Use-64-prefix-syscall-if-we-have-to.patch delete mode 100644 poky/meta/recipes-core/busybox/busybox_1.31.1.bb create mode 100644 poky/meta/recipes-core/busybox/busybox_1.32.0.bb delete mode 100644 poky/meta/recipes-core/dropbear/dropbear_2019.78.bb create mode 100644 poky/meta/recipes-core/dropbear/dropbear_2020.80.bb delete mode 100644 poky/meta/recipes-devtools/bison/bison_3.6.3.bb create mode 100644 poky/meta/recipes-devtools/bison/bison_3.6.4.bb delete mode 100644 poky/meta/recipes-devtools/ccache/ccache.inc create mode 100644 poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb delete mode 100644 poky/meta/recipes-devtools/ccache/ccache_3.7.9.bb delete mode 100644 poky/meta/recipes-devtools/dnf/dnf_4.2.21.bb create mode 100644 poky/meta/recipes-devtools/dnf/dnf_4.2.23.bb delete mode 100644 poky/meta/recipes-devtools/elfutils/elfutils_0.179.bb create mode 100644 poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb create mode 100644 poky/meta/recipes-devtools/elfutils/files/0001-tests-Makefile.am-compile-test_nlist-with-standard-C.patch create mode 100644 poky/meta/recipes-devtools/elfutils/files/0015-config-eu.am-do-not-use-Werror.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/0001-Ignore-differences-between-mips-machine-identifiers.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/0001-fix-compile-failure-with-debian-patches.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/0003-Add-mips-n64-relocation-format-hack.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/arm_backend.diff delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/disable_werror.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/hppa_backend.diff delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/hurd_path.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/ignore_strmerge.diff delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/kfreebsd_path.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/mips_backend.diff delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/mips_cfi.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/mips_readelf_w.patch delete mode 100644 poky/meta/recipes-devtools/elfutils/files/debian/testsuite-ignore-elflint.diff create mode 100644 poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch delete mode 100644 poky/meta/recipes-devtools/libdnf/libdnf/0001-Use-single-quotes-around-string-literals-used-in-SQL.patch delete mode 100644 poky/meta/recipes-devtools/libdnf/libdnf_0.47.0.bb create mode 100644 poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb delete mode 100644 poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch delete mode 100644 poky/meta/recipes-devtools/meson/meson_0.54.2.bb create mode 100644 poky/meta/recipes-devtools/meson/meson_0.54.3.bb delete mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.2.bb create mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.3.bb delete mode 100644 poky/meta/recipes-devtools/opkg-utils/opkg-utils/fix-reproducibility.patch delete mode 100644 poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb create mode 100644 poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.3.bb delete mode 100644 poky/meta/recipes-devtools/opkg/opkg_0.4.2.bb create mode 100644 poky/meta/recipes-devtools/opkg/opkg_0.4.3.bb delete mode 100644 poky/meta/recipes-devtools/perl/files/0001-PATCH-perl-134117-Close-DATA-in-loc_tools.pl.patch delete mode 100644 poky/meta/recipes-devtools/perl/files/0001-enc2xs-Add-environment-variable-to-suppress-comments.patch delete mode 100644 poky/meta/recipes-devtools/perl/perl_5.30.2.bb create mode 100644 poky/meta/recipes-devtools/perl/perl_5.32.0.bb delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/moreretries.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/seccomp.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/toomanyfiles.patch delete mode 100644 poky/meta/recipes-devtools/pseudo/files/xattr_version.patch delete mode 100644 poky/meta/recipes-devtools/python-numpy/files/0001-convert-shebang-from-python-to-python3.patch delete mode 100644 poky/meta/recipes-devtools/python-numpy/python3-numpy_1.18.5.bb create mode 100644 poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb delete mode 100644 poky/meta/recipes-devtools/python/python3-cython_0.29.19.bb create mode 100644 poky/meta/recipes-devtools/python/python3-cython_0.29.20.bb delete mode 100644 poky/meta/recipes-devtools/python/python3-setuptools/0001-ScriptWriter-create-more-efficient-usr-bin-wrappers-signoff-included.patch delete mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_47.1.1.bb create mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb create mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch delete mode 100644 poky/meta/recipes-devtools/rsync/files/CVE-2016-9840.patch delete mode 100644 poky/meta/recipes-devtools/rsync/files/CVE-2016-9841.patch delete mode 100644 poky/meta/recipes-devtools/rsync/files/CVE-2016-9842.patch delete mode 100644 poky/meta/recipes-devtools/rsync/files/CVE-2016-9843.patch delete mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.1.3.bb create mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb delete mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.12.bb create mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.14.bb delete mode 100644 poky/meta/recipes-extended/sudo/sudo/0001-Include-sys-types.h-for-id_t-definition.patch delete mode 100644 poky/meta/recipes-extended/sudo/sudo_1.9.0.bb create mode 100644 poky/meta/recipes-extended/sudo/sudo_1.9.1.bb delete mode 100644 poky/meta/recipes-gnome/gtk+/gtk+3_3.24.20.bb create mode 100644 poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb create mode 100644 poky/meta/recipes-graphics/cogl/cogl-1.0/0001-configure.ac-don-t-require-eglmesaext.h.patch delete mode 100644 poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb create mode 100644 poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.5.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.0.7.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.0.7.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb delete mode 100644 poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200519.bb create mode 100644 poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb delete mode 100644 poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.2.bb create mode 100644 poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.3.1.bb delete mode 100644 poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.2.bb create mode 100644 poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb delete mode 100644 poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.2.bb create mode 100644 poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb delete mode 100644 poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.2.bb create mode 100644 poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.3.bb delete mode 100644 poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.2.bb create mode 100644 poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.3.bb delete mode 100644 poky/meta/recipes-support/curl/curl_7.71.0.bb create mode 100644 poky/meta/recipes-support/curl/curl_7.71.1.bb delete mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_147.bb create mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_150.bb delete mode 100644 poky/meta/recipes-support/libcheck/libcheck_0.14.0.bb create mode 100644 poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb create mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch create mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch delete mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error_1.37.bb create mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error_1.38.bb (limited to 'poky/meta/recipes-devtools/meson/meson.inc') diff --git a/poky/bitbake/contrib/vim/LICENSE.txt b/poky/bitbake/contrib/vim/LICENSE.txt new file mode 100644 index 000000000..c7d915024 --- /dev/null +++ b/poky/bitbake/contrib/vim/LICENSE.txt @@ -0,0 +1,18 @@ +The MIT License (MIT) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/poky/bitbake/contrib/vim/ftdetect/bitbake.vim b/poky/bitbake/contrib/vim/ftdetect/bitbake.vim index 200f8ae49..09fc4dc74 100644 --- a/poky/bitbake/contrib/vim/ftdetect/bitbake.vim +++ b/poky/bitbake/contrib/vim/ftdetect/bitbake.vim @@ -6,12 +6,12 @@ " " This sets up the syntax highlighting for BitBake files, like .bb, .bbclass and .inc -if &compatible || version < 600 +if &compatible || version < 600 || exists("b:loaded_bitbake_plugin") finish endif " .bb, .bbappend and .bbclass -au BufNewFile,BufRead *.{bb,bbappend,bbclass} set filetype=bitbake +au BufNewFile,BufRead *.{bb,bbappend,bbclass} set filetype=bitbake " .inc au BufNewFile,BufRead *.inc set filetype=bitbake diff --git a/poky/bitbake/contrib/vim/ftplugin/bitbake.vim b/poky/bitbake/contrib/vim/ftplugin/bitbake.vim index db0d75319..9e8d3e13c 100644 --- a/poky/bitbake/contrib/vim/ftplugin/bitbake.vim +++ b/poky/bitbake/contrib/vim/ftplugin/bitbake.vim @@ -1,2 +1,13 @@ -set sts=4 sw=4 et -set cms=#%s +" Only do this when not done yet for this buffer +if exists("b:did_ftplugin") + finish +endif + +" Don't load another plugin for this buffer +let b:did_ftplugin = 1 + +let b:undo_ftplugin = "setl cms< sts< sw< et< sua<" + +setlocal commentstring=#\ %s +setlocal softtabstop=4 shiftwidth=4 expandtab +setlocal suffixesadd+=.bb,.bbclass diff --git a/poky/bitbake/contrib/vim/plugin/newbb.vim b/poky/bitbake/contrib/vim/plugin/newbb.vim old mode 100755 new mode 100644 index 874e33805..3a4202736 --- a/poky/bitbake/contrib/vim/plugin/newbb.vim +++ b/poky/bitbake/contrib/vim/plugin/newbb.vim @@ -10,7 +10,7 @@ " " Will try to use git to find the user name and email -if &compatible || v:version < 600 +if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") finish endif @@ -25,7 +25,7 @@ endfun fun! GetUserEmail() let l:user_email = system("git config --get user.email") if v:shell_error - return "unknow@user.org" + return "unknown@user.org" else return substitute(l:user_email, "\n", "", "") endfun @@ -41,6 +41,10 @@ fun! BBHeader() endfun fun! NewBBTemplate() + if line2byte(line('$') + 1) != -1 + return + endif + let l:paste = &paste set nopaste @@ -48,7 +52,7 @@ fun! NewBBTemplate() call BBHeader() " New the bb template - put ='DESCRIPTION = \"\"' + put ='SUMMARY = \"\"' put ='HOMEPAGE = \"\"' put ='LICENSE = \"\"' put ='SECTION = \"\"' @@ -58,7 +62,7 @@ fun! NewBBTemplate() " Go to the first place to edit 0 - /^DESCRIPTION =/ + /^SUMMARY =/ exec "normal 2f\"" if paste == 1 @@ -76,7 +80,7 @@ if v:progname =~ "vimdiff" endif augroup NewBB - au BufNewFile *.bb + au BufNewFile,BufReadPost *.bb \ if g:bb_create_on_empty | \ call NewBBTemplate() | \ endif diff --git a/poky/bitbake/contrib/vim/plugin/newbbappend.vim b/poky/bitbake/contrib/vim/plugin/newbbappend.vim new file mode 100644 index 000000000..e04174cf6 --- /dev/null +++ b/poky/bitbake/contrib/vim/plugin/newbbappend.vim @@ -0,0 +1,46 @@ +" Vim plugin file +" Purpose: Create a template for new bbappend file +" Author: Joshua Watt +" Copyright: Copyright (C) 2017 Joshua Watt +" +" This file is licensed under the MIT license, see COPYING.MIT in +" this source distribution for the terms. +" + +if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") + finish +endif + +fun! NewBBAppendTemplate() + if line2byte(line('$') + 1) != -1 + return + endif + + let l:paste = &paste + set nopaste + + " New bbappend template + 0 put ='FILESEXTRAPATHS_prepend := \"${THISDIR}/${PN}:\"' + 2 + + if paste == 1 + set paste + endif +endfun + +if !exists("g:bb_create_on_empty") + let g:bb_create_on_empty = 1 +endif + +" disable in case of vimdiff +if v:progname =~ "vimdiff" + let g:bb_create_on_empty = 0 +endif + +augroup NewBBAppend + au BufNewFile,BufReadPost *.bbappend + \ if g:bb_create_on_empty | + \ call NewBBAppendTemplate() | + \ endif +augroup END + diff --git a/poky/bitbake/contrib/vim/syntax/bitbake.vim b/poky/bitbake/contrib/vim/syntax/bitbake.vim index fb55f9102..f964621ae 100644 --- a/poky/bitbake/contrib/vim/syntax/bitbake.vim +++ b/poky/bitbake/contrib/vim/syntax/bitbake.vim @@ -12,7 +12,7 @@ " " It's an entirely new type, just has specific syntax in shell and python code -if &compatible || v:version < 600 +if &compatible || v:version < 600 || exists("b:loaded_bitbake_plugin") finish endif if exists("b:current_syntax") @@ -58,8 +58,8 @@ syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref,bbV syn region bbVarPyValue start=+${@+ skip=+\\$+ end=+}+ contained contains=@python " Vars metadata flags -syn match bbVarFlagDef "^\([a-zA-Z0-9\-_\.]\+\)\(\[[a-zA-Z0-9\-_\.]\+\]\)\@=" contains=bbIdentifier nextgroup=bbVarFlagFlag -syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*\(=\|+=\|=+\|?=\)\@=" contained contains=bbIdentifier nextgroup=bbVarEq +syn match bbVarFlagDef "^\([a-zA-Z0-9\-_\.]\+\)\(\[[a-zA-Z0-9\-_\.+]\+\]\)\@=" contains=bbIdentifier nextgroup=bbVarFlagFlag +syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*\(:=\|=\|.=\|=.|+=\|=+\|?=\)\@=" contained contains=bbIdentifier nextgroup=bbVarEq " Includes and requires syn keyword bbInclude inherit include require contained @@ -67,15 +67,15 @@ syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest " Add taks and similar -syn keyword bbStatement addtask addhandler after before EXPORT_FUNCTIONS contained +syn keyword bbStatement addtask deltask addhandler after before EXPORT_FUNCTIONS contained syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement -syn match bbStatementLine "^\(addtask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest +syn match bbStatementLine "^\(addtask\|deltask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest " OE Important Functions syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained " Generic Functions -syn match bbFunction "\h[0-9A-Za-z_-]*" display contained contains=bbOEFunctions +syn match bbFunction "\h[0-9A-Za-z_\-\.]*" display contained contains=bbOEFunctions " BitBake shell metadata syn include @shell syntax/sh.vim @@ -83,7 +83,7 @@ if exists("b:current_syntax") unlet b:current_syntax endif syn keyword bbShFakeRootFlag fakeroot contained -syn match bbShFuncDef "^\(fakeroot\s*\)\?\([0-9A-Za-z_${}-]\+\)\(python\)\@"): try: event = pickle.loads(self.queue[7:index]) - except ValueError as e: + except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: + if isinstance(e, pickle.UnpicklingError) and "truncated" in str(e): + # The pickled data could contain "" so search for the next occurance + # unpickling again, this should be the only way an unpickle error could occur + index = self.queue.find(b"", index + 1) + continue bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[7:index])) bb.event.fire_from_worker(event, self.d) if isinstance(event, taskUniHashUpdate): @@ -2979,7 +2984,7 @@ class runQueuePipe(): while index != -1 and self.queue.startswith(b""): try: task, status = pickle.loads(self.queue[10:index]) - except ValueError as e: + except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e: bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index])) self.rqexec.runqueue_process_waitpid(task, status) found = True diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py index 69aae626e..83385baf6 100644 --- a/poky/bitbake/lib/bb/server/process.py +++ b/poky/bitbake/lib/bb/server/process.py @@ -331,7 +331,9 @@ class ServerCommunicator(): def runCommand(self, command): self.connection.send(command) if not self.recv.poll(30): - raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server") + logger.note("No reply from server in 30s") + if not self.recv.poll(30): + raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)") return self.recv.get() def updateFeatureSet(self, featureset): diff --git a/poky/bitbake/lib/bb/siggen.py b/poky/bitbake/lib/bb/siggen.py index 872333d7f..4c63b0baa 100644 --- a/poky/bitbake/lib/bb/siggen.py +++ b/poky/bitbake/lib/bb/siggen.py @@ -14,6 +14,7 @@ import simplediff from bb.checksum import FileChecksumCache from bb import runqueue import hashserv +import hashserv.client logger = logging.getLogger('BitBake.SigGen') hashequiv_logger = logging.getLogger('BitBake.SigGen.HashEquiv') diff --git a/poky/bitbake/lib/bb/taskdata.py b/poky/bitbake/lib/bb/taskdata.py index d13a12498..ffbaf362e 100644 --- a/poky/bitbake/lib/bb/taskdata.py +++ b/poky/bitbake/lib/bb/taskdata.py @@ -21,8 +21,13 @@ def re_match_strings(target, strings): Whether or not the string 'target' matches any one string of the strings which can be regular expression string """ - return any(name == target or re.match(name, target) - for name in strings) + for name in strings: + if name.startswith("^") or name.endswith("$"): + if re.match(name, target): + return True + elif name == target: + return True + return False class TaskEntry: def __init__(self): diff --git a/poky/bitbake/lib/bb/tests/fetch.py b/poky/bitbake/lib/bb/tests/fetch.py index 4697ef59b..29c96b2b4 100644 --- a/poky/bitbake/lib/bb/tests/fetch.py +++ b/poky/bitbake/lib/bb/tests/fetch.py @@ -1031,7 +1031,7 @@ class SVNTest(FetcherTest): bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir) # Github will emulate SVN. Use this to check if we're downloding... - bb.process.run("svn propset svn:externals 'bitbake http://github.com/openembedded/bitbake' .", + bb.process.run("svn propset svn:externals 'bitbake svn://vcs.pcre.org/pcre2/code' .", cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk')) bb.process.run("svn commit --non-interactive -m 'Add external'", cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk')) diff --git a/poky/bitbake/lib/bb/ui/taskexp.py b/poky/bitbake/lib/bb/ui/taskexp.py index 8fff24423..05e32338c 100644 --- a/poky/bitbake/lib/bb/ui/taskexp.py +++ b/poky/bitbake/lib/bb/ui/taskexp.py @@ -8,9 +8,16 @@ # import sys -import gi -gi.require_version('Gtk', '3.0') -from gi.repository import Gtk, Gdk, GObject + +try: + import gi + gi.require_version('Gtk', '3.0') + from gi.repository import Gtk, Gdk, GObject +except ValueError: + sys.exit("FATAL: Gtk version needs to be 3.0") +except ImportError: + sys.exit("FATAL: Gtk ui could not load the required gi python module") + import threading from xmlrpc import client import bb diff --git a/poky/bitbake/lib/bb/ui/teamcity.py b/poky/bitbake/lib/bb/ui/teamcity.py index 1854292fa..fca46c287 100644 --- a/poky/bitbake/lib/bb/ui/teamcity.py +++ b/poky/bitbake/lib/bb/ui/teamcity.py @@ -167,8 +167,6 @@ def main(server, eventHandler, params): forcelevel = bb.msg.BBLogFormatter.ERROR else: forcelevel = bb.msg.BBLogFormatter.WARNING - bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, forcelevel) - bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr) console.setFormatter(format) errconsole.setFormatter(format) if not bb.msg.has_console_handler(logger): diff --git a/poky/bitbake/lib/hashserv/__init__.py b/poky/bitbake/lib/hashserv/__init__.py index c3318620f..f95e8f43f 100644 --- a/poky/bitbake/lib/hashserv/__init__.py +++ b/poky/bitbake/lib/hashserv/__init__.py @@ -6,12 +6,20 @@ from contextlib import closing import re import sqlite3 +import itertools +import json UNIX_PREFIX = "unix://" ADDR_TYPE_UNIX = 0 ADDR_TYPE_TCP = 1 +# The Python async server defaults to a 64K receive buffer, so we hardcode our +# maximum chunk size. It would be better if the client and server reported to +# each other what the maximum chunk sizes were, but that will slow down the +# connection setup with a round trip delay so I'd rather not do that unless it +# is necessary +DEFAULT_MAX_CHUNK = 32 * 1024 def setup_database(database, sync=True): db = sqlite3.connect(database) @@ -66,6 +74,20 @@ def parse_address(addr): return (ADDR_TYPE_TCP, (host, int(port))) +def chunkify(msg, max_chunk): + if len(msg) < max_chunk - 1: + yield ''.join((msg, "\n")) + else: + yield ''.join((json.dumps({ + 'chunk-stream': None + }), "\n")) + + args = [iter(msg)] * (max_chunk - 1) + for m in map(''.join, itertools.zip_longest(*args, fillvalue='')): + yield ''.join(itertools.chain(m, "\n")) + yield "\n" + + def create_server(addr, dbname, *, sync=True): from . import server db = setup_database(dbname, sync=sync) diff --git a/poky/bitbake/lib/hashserv/client.py b/poky/bitbake/lib/hashserv/client.py index 46085d641..a29af836d 100644 --- a/poky/bitbake/lib/hashserv/client.py +++ b/poky/bitbake/lib/hashserv/client.py @@ -7,6 +7,7 @@ import json import logging import socket import os +from . import chunkify, DEFAULT_MAX_CHUNK logger = logging.getLogger('hashserv.client') @@ -25,6 +26,7 @@ class Client(object): self.reader = None self.writer = None self.mode = self.MODE_NORMAL + self.max_chunk = DEFAULT_MAX_CHUNK def connect_tcp(self, address, port): def connect_sock(): @@ -58,7 +60,7 @@ class Client(object): self.reader = self._socket.makefile('r', encoding='utf-8') self.writer = self._socket.makefile('w', encoding='utf-8') - self.writer.write('OEHASHEQUIV 1.0\n\n') + self.writer.write('OEHASHEQUIV 1.1\n\n') self.writer.flush() # Restore mode if the socket is being re-created @@ -91,18 +93,35 @@ class Client(object): count += 1 def send_message(self, msg): + def get_line(): + line = self.reader.readline() + if not line: + raise HashConnectionError('Connection closed') + + if not line.endswith('\n'): + raise HashConnectionError('Bad message %r' % message) + + return line + def proc(): - self.writer.write('%s\n' % json.dumps(msg)) + for c in chunkify(json.dumps(msg), self.max_chunk): + self.writer.write(c) self.writer.flush() - l = self.reader.readline() - if not l: - raise HashConnectionError('Connection closed') + l = get_line() - if not l.endswith('\n'): - raise HashConnectionError('Bad message %r' % message) + m = json.loads(l) + if 'chunk-stream' in m: + lines = [] + while True: + l = get_line().rstrip('\n') + if not l: + break + lines.append(l) - return json.loads(l) + m = json.loads(''.join(lines)) + + return m return self._send_wrapper(proc) @@ -155,6 +174,14 @@ class Client(object): m['unihash'] = unihash return self.send_message({'report-equiv': m}) + def get_taskhash(self, method, taskhash, all_properties=False): + self._set_mode(self.MODE_NORMAL) + return self.send_message({'get': { + 'taskhash': taskhash, + 'method': method, + 'all': all_properties + }}) + def get_stats(self): self._set_mode(self.MODE_NORMAL) return self.send_message({'get-stats': None}) diff --git a/poky/bitbake/lib/hashserv/server.py b/poky/bitbake/lib/hashserv/server.py index cc7e48233..81050715e 100644 --- a/poky/bitbake/lib/hashserv/server.py +++ b/poky/bitbake/lib/hashserv/server.py @@ -13,6 +13,7 @@ import os import signal import socket import time +from . import chunkify, DEFAULT_MAX_CHUNK logger = logging.getLogger('hashserv.server') @@ -107,12 +108,29 @@ class Stats(object): return {k: getattr(self, k) for k in ('num', 'total_time', 'max_time', 'average', 'stdev')} +class ClientError(Exception): + pass + class ServerClient(object): + FAST_QUERY = 'SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1' + ALL_QUERY = 'SELECT * FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1' + def __init__(self, reader, writer, db, request_stats): self.reader = reader self.writer = writer self.db = db self.request_stats = request_stats + self.max_chunk = DEFAULT_MAX_CHUNK + + self.handlers = { + 'get': self.handle_get, + 'report': self.handle_report, + 'report-equiv': self.handle_equivreport, + 'get-stream': self.handle_get_stream, + 'get-stats': self.handle_get_stats, + 'reset-stats': self.handle_reset_stats, + 'chunk-stream': self.handle_chunk, + } async def process_requests(self): try: @@ -125,7 +143,11 @@ class ServerClient(object): return (proto_name, proto_version) = protocol.decode('utf-8').rstrip().split() - if proto_name != 'OEHASHEQUIV' or proto_version != '1.0': + if proto_name != 'OEHASHEQUIV': + return + + proto_version = tuple(int(v) for v in proto_version.split('.')) + if proto_version < (1, 0) or proto_version > (1, 1): return # Read headers. Currently, no headers are implemented, so look for @@ -140,40 +162,34 @@ class ServerClient(object): break # Handle messages - handlers = { - 'get': self.handle_get, - 'report': self.handle_report, - 'report-equiv': self.handle_equivreport, - 'get-stream': self.handle_get_stream, - 'get-stats': self.handle_get_stats, - 'reset-stats': self.handle_reset_stats, - } - while True: d = await self.read_message() if d is None: break - - for k in handlers.keys(): - if k in d: - logger.debug('Handling %s' % k) - if 'stream' in k: - await handlers[k](d[k]) - else: - with self.request_stats.start_sample() as self.request_sample, \ - self.request_sample.measure(): - await handlers[k](d[k]) - break - else: - logger.warning("Unrecognized command %r" % d) - break - + await self.dispatch_message(d) await self.writer.drain() + except ClientError as e: + logger.error(str(e)) finally: self.writer.close() + async def dispatch_message(self, msg): + for k in self.handlers.keys(): + if k in msg: + logger.debug('Handling %s' % k) + if 'stream' in k: + await self.handlers[k](msg[k]) + else: + with self.request_stats.start_sample() as self.request_sample, \ + self.request_sample.measure(): + await self.handlers[k](msg[k]) + return + + raise ClientError("Unrecognized command %r" % msg) + def write_message(self, msg): - self.writer.write(('%s\n' % json.dumps(msg)).encode('utf-8')) + for c in chunkify(json.dumps(msg), self.max_chunk): + self.writer.write(c.encode('utf-8')) async def read_message(self): l = await self.reader.readline() @@ -191,14 +207,38 @@ class ServerClient(object): logger.error('Bad message from client: %r' % message) raise e + async def handle_chunk(self, request): + lines = [] + try: + while True: + l = await self.reader.readline() + l = l.rstrip(b"\n").decode("utf-8") + if not l: + break + lines.append(l) + + msg = json.loads(''.join(lines)) + except (json.JSONDecodeError, UnicodeDecodeError) as e: + logger.error('Bad message from client: %r' % message) + raise e + + if 'chunk-stream' in msg: + raise ClientError("Nested chunks are not allowed") + + await self.dispatch_message(msg) + async def handle_get(self, request): method = request['method'] taskhash = request['taskhash'] - row = self.query_equivalent(method, taskhash) + if request.get('all', False): + row = self.query_equivalent(method, taskhash, self.ALL_QUERY) + else: + row = self.query_equivalent(method, taskhash, self.FAST_QUERY) + if row is not None: logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash'])) - d = {k: row[k] for k in ('taskhash', 'method', 'unihash')} + d = {k: row[k] for k in row.keys()} self.write_message(d) else: @@ -228,7 +268,7 @@ class ServerClient(object): (method, taskhash) = l.split() #logger.debug('Looking up %s %s' % (method, taskhash)) - row = self.query_equivalent(method, taskhash) + row = self.query_equivalent(method, taskhash, self.FAST_QUERY) if row is not None: msg = ('%s\n' % row['unihash']).encode('utf-8') #logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash'])) @@ -328,7 +368,7 @@ class ServerClient(object): # Fetch the unihash that will be reported for the taskhash. If the # unihash matches, it means this row was inserted (or the mapping # was already valid) - row = self.query_equivalent(data['method'], data['taskhash']) + row = self.query_equivalent(data['method'], data['taskhash'], self.FAST_QUERY) if row['unihash'] == data['unihash']: logger.info('Adding taskhash equivalence for %s with unihash %s', @@ -354,12 +394,11 @@ class ServerClient(object): self.request_stats.reset() self.write_message(d) - def query_equivalent(self, method, taskhash): + def query_equivalent(self, method, taskhash, query): # This is part of the inner loop and must be as fast as possible try: cursor = self.db.cursor() - cursor.execute('SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1', - {'method': method, 'taskhash': taskhash}) + cursor.execute(query, {'method': method, 'taskhash': taskhash}) return cursor.fetchone() except: cursor.close() diff --git a/poky/bitbake/lib/hashserv/tests.py b/poky/bitbake/lib/hashserv/tests.py index a5472a996..6e8629507 100644 --- a/poky/bitbake/lib/hashserv/tests.py +++ b/poky/bitbake/lib/hashserv/tests.py @@ -99,6 +99,29 @@ class TestHashEquivalenceServer(object): result = self.client.get_unihash(self.METHOD, taskhash) self.assertEqual(result, unihash) + def test_huge_message(self): + # Simple test that hashes can be created + taskhash = 'c665584ee6817aa99edfc77a44dd853828279370' + outhash = '3c979c3db45c569f51ab7626a4651074be3a9d11a84b1db076f5b14f7d39db44' + unihash = '90e9bc1d1f094c51824adca7f8ea79a048d68824' + + result = self.client.get_unihash(self.METHOD, taskhash) + self.assertIsNone(result, msg='Found unexpected task, %r' % result) + + siginfo = "0" * (self.client.max_chunk * 4) + + result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash, { + 'outhash_siginfo': siginfo + }) + self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash') + + result = self.client.get_taskhash(self.METHOD, taskhash, True) + self.assertEqual(result['taskhash'], taskhash) + self.assertEqual(result['unihash'], unihash) + self.assertEqual(result['method'], self.METHOD) + self.assertEqual(result['outhash'], outhash) + self.assertEqual(result['outhash_siginfo'], siginfo) + def test_stress(self): def query_server(failures): client = Client(self.server.address) diff --git a/poky/documentation/mega-manual/mega-manual-customization.xsl b/poky/documentation/mega-manual/mega-manual-customization.xsl index b52b5b2aa..33a6e1632 100644 --- a/poky/documentation/mega-manual/mega-manual-customization.xsl +++ b/poky/documentation/mega-manual/mega-manual-customization.xsl @@ -1,4 +1,5 @@ + diff --git a/poky/documentation/mega-manual/mega-manual.xml b/poky/documentation/mega-manual/mega-manual.xml index e730f7259..d9912fa44 100755 --- a/poky/documentation/mega-manual/mega-manual.xml +++ b/poky/documentation/mega-manual/mega-manual.xml @@ -1,7 +1,7 @@ %poky; ] > - + %poky; ] > + Yocto Project Concepts diff --git a/poky/documentation/overview-manual/overview-manual-customization.xsl b/poky/documentation/overview-manual/overview-manual-customization.xsl index 22360e7ba..1dd91bde8 100644 --- a/poky/documentation/overview-manual/overview-manual-customization.xsl +++ b/poky/documentation/overview-manual/overview-manual-customization.xsl @@ -1,4 +1,6 @@ + + diff --git a/poky/documentation/overview-manual/overview-manual-development-environment.xml b/poky/documentation/overview-manual/overview-manual-development-environment.xml index 36ebf8a32..8415d1dd7 100644 --- a/poky/documentation/overview-manual/overview-manual-development-environment.xml +++ b/poky/documentation/overview-manual/overview-manual-development-environment.xml @@ -1,6 +1,7 @@ %poky; ] > + The Yocto Project Development Environment diff --git a/poky/documentation/overview-manual/overview-manual-intro.xml b/poky/documentation/overview-manual/overview-manual-intro.xml index 39433aa41..0e0bfed6e 100644 --- a/poky/documentation/overview-manual/overview-manual-intro.xml +++ b/poky/documentation/overview-manual/overview-manual-intro.xml @@ -1,6 +1,7 @@ %poky; ] > + diff --git a/poky/documentation/overview-manual/overview-manual-style.css b/poky/documentation/overview-manual/overview-manual-style.css index 97a364b12..eec934161 100644 --- a/poky/documentation/overview-manual/overview-manual-style.css +++ b/poky/documentation/overview-manual/overview-manual-style.css @@ -1,4 +1,6 @@ /* + SPDX-License-Identifier: CC-BY-2.0-UK + Generic XHTML / DocBook XHTML CSS Stylesheet. Browser wrangling and typographic design by diff --git a/poky/documentation/overview-manual/overview-manual-yp-intro.xml b/poky/documentation/overview-manual/overview-manual-yp-intro.xml index 1b60a3030..2097ed36e 100644 --- a/poky/documentation/overview-manual/overview-manual-yp-intro.xml +++ b/poky/documentation/overview-manual/overview-manual-yp-intro.xml @@ -1,6 +1,7 @@ %poky; ] > + Introducing the Yocto Project diff --git a/poky/documentation/overview-manual/overview-manual.xml b/poky/documentation/overview-manual/overview-manual.xml index 210d644b3..8021a2e95 100755 --- a/poky/documentation/overview-manual/overview-manual.xml +++ b/poky/documentation/overview-manual/overview-manual.xml @@ -1,6 +1,7 @@ %poky; ] > + %poky; ] > + FAQ diff --git a/poky/documentation/ref-manual/migration.xml b/poky/documentation/ref-manual/migration.xml index affc8b90a..d3d5b16bd 100644 --- a/poky/documentation/ref-manual/migration.xml +++ b/poky/documentation/ref-manual/migration.xml @@ -1,6 +1,7 @@ %poky; ] > + Migrating to a Newer Yocto Project Release diff --git a/poky/documentation/ref-manual/ref-classes.xml b/poky/documentation/ref-manual/ref-classes.xml index f8920d8c1..ab12373ae 100644 --- a/poky/documentation/ref-manual/ref-classes.xml +++ b/poky/documentation/ref-manual/ref-classes.xml @@ -1,6 +1,7 @@ %poky; ] > + Classes diff --git a/poky/documentation/ref-manual/ref-devtool-reference.xml b/poky/documentation/ref-manual/ref-devtool-reference.xml index 11f7399c5..6c3ccc303 100644 --- a/poky/documentation/ref-manual/ref-devtool-reference.xml +++ b/poky/documentation/ref-manual/ref-devtool-reference.xml @@ -1,6 +1,7 @@ %poky; ] > + <filename>devtool</filename> Quick Reference diff --git a/poky/documentation/ref-manual/ref-features.xml b/poky/documentation/ref-manual/ref-features.xml index 294b297c2..8cab5ec3a 100644 --- a/poky/documentation/ref-manual/ref-features.xml +++ b/poky/documentation/ref-manual/ref-features.xml @@ -1,6 +1,7 @@ %poky; ] > + Features diff --git a/poky/documentation/ref-manual/ref-images.xml b/poky/documentation/ref-manual/ref-images.xml index 1f96186c6..aaeda5522 100644 --- a/poky/documentation/ref-manual/ref-images.xml +++ b/poky/documentation/ref-manual/ref-images.xml @@ -1,6 +1,7 @@ %poky; ] > + Images diff --git a/poky/documentation/ref-manual/ref-kickstart.xml b/poky/documentation/ref-manual/ref-kickstart.xml index 1128bd50d..45db1c0ff 100644 --- a/poky/documentation/ref-manual/ref-kickstart.xml +++ b/poky/documentation/ref-manual/ref-kickstart.xml @@ -1,6 +1,7 @@ %poky; ] > + OpenEmbedded Kickstart (<filename>.wks</filename>) Reference diff --git a/poky/documentation/ref-manual/ref-manual-customization.xsl b/poky/documentation/ref-manual/ref-manual-customization.xsl index c58dd905b..3181f618e 100644 --- a/poky/documentation/ref-manual/ref-manual-customization.xsl +++ b/poky/documentation/ref-manual/ref-manual-customization.xsl @@ -1,4 +1,6 @@ + + diff --git a/poky/documentation/ref-manual/ref-qa-checks.xml b/poky/documentation/ref-manual/ref-qa-checks.xml index 424a19c59..0071e4a55 100644 --- a/poky/documentation/ref-manual/ref-qa-checks.xml +++ b/poky/documentation/ref-manual/ref-qa-checks.xml @@ -1,6 +1,7 @@ %poky; ] > + QA Error and Warning Messages diff --git a/poky/documentation/ref-manual/ref-release-process.xml b/poky/documentation/ref-manual/ref-release-process.xml index 5efe17417..87f530806 100644 --- a/poky/documentation/ref-manual/ref-release-process.xml +++ b/poky/documentation/ref-manual/ref-release-process.xml @@ -1,6 +1,7 @@ %poky; ] > + Yocto Project Releases and the Stable Release Process diff --git a/poky/documentation/ref-manual/ref-structure.xml b/poky/documentation/ref-manual/ref-structure.xml index 27f17dd91..8588e9c2d 100644 --- a/poky/documentation/ref-manual/ref-structure.xml +++ b/poky/documentation/ref-manual/ref-structure.xml @@ -1,6 +1,7 @@ %poky; ] > + diff --git a/poky/documentation/ref-manual/ref-style.css b/poky/documentation/ref-manual/ref-style.css index 7077e4b70..622ceb8f7 100644 --- a/poky/documentation/ref-manual/ref-style.css +++ b/poky/documentation/ref-manual/ref-style.css @@ -1,4 +1,7 @@ /* + + SPDX-License-Identifier: CC-BY-2.0-UK + Generic XHTML / DocBook XHTML CSS Stylesheet. Browser wrangling and typographic design by diff --git a/poky/documentation/ref-manual/ref-system-requirements.xml b/poky/documentation/ref-manual/ref-system-requirements.xml index 7a11ec2cf..43e217c6d 100644 --- a/poky/documentation/ref-manual/ref-system-requirements.xml +++ b/poky/documentation/ref-manual/ref-system-requirements.xml @@ -1,6 +1,7 @@ %poky; ] > + System Requirements diff --git a/poky/documentation/ref-manual/ref-tasks.xml b/poky/documentation/ref-manual/ref-tasks.xml index 011e0d749..5b09b3f2e 100644 --- a/poky/documentation/ref-manual/ref-tasks.xml +++ b/poky/documentation/ref-manual/ref-tasks.xml @@ -1,6 +1,7 @@ %poky; ] > + Tasks diff --git a/poky/documentation/ref-manual/ref-terms.xml b/poky/documentation/ref-manual/ref-terms.xml index 722fa7ee2..d2605c62a 100644 --- a/poky/documentation/ref-manual/ref-terms.xml +++ b/poky/documentation/ref-manual/ref-terms.xml @@ -1,6 +1,7 @@ %poky; ] > + Yocto Project Terms diff --git a/poky/documentation/ref-manual/ref-variables.xml b/poky/documentation/ref-manual/ref-variables.xml index 657f6cf3d..9fe744aff 100644 --- a/poky/documentation/ref-manual/ref-variables.xml +++ b/poky/documentation/ref-manual/ref-variables.xml @@ -1,6 +1,7 @@ %poky; ] > + diff --git a/poky/documentation/ref-manual/ref-varlocality.xml b/poky/documentation/ref-manual/ref-varlocality.xml index 54524d5b6..a2436fb31 100644 --- a/poky/documentation/ref-manual/ref-varlocality.xml +++ b/poky/documentation/ref-manual/ref-varlocality.xml @@ -1,6 +1,7 @@ %poky; ] > + Variable Context diff --git a/poky/documentation/ref-manual/resources.xml b/poky/documentation/ref-manual/resources.xml index afe8e288d..4899b2e59 100644 --- a/poky/documentation/ref-manual/resources.xml +++ b/poky/documentation/ref-manual/resources.xml @@ -1,6 +1,7 @@ %poky; ] > + Contributions and Additional Information diff --git a/poky/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb b/poky/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb index e84a90f28..3acc523a8 100644 --- a/poky/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb +++ b/poky/meta-skeleton/recipes-baremetal/baremetal-examples/baremetal-helloworld_git.bb @@ -10,10 +10,19 @@ SRC_URI = "git://github.com/aehs29/baremetal-helloqemu.git;protocol=https;branch S = "${WORKDIR}/git/" -# These examples are not meant to be built when using either musl or glibc -COMPATIBLE_HOST_libc-musl_class-target = "null" -COMPATIBLE_HOST_libc-glibc_class-target = "null" +# The following variables should be set to accomodate each application +BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}" +IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}" +IMAGE_NAME_SUFFIX ?= "" + +# Baremetal-Image creates the proper wiring, assumes the output is provided in +# binary and ELF format, installed on ${base_libdir}/firmware/ , we want a +# package to be created since we might have some way of updating the baremetal +# firmware from Linux +inherit baremetal-image + +# These parameters are app specific for this example # This will be translated automatically to the architecture and # machine that QEMU uses on OE, e.g. -machine virt -cpu cortex-a57 # but the examples can also be run on other architectures/machines @@ -25,82 +34,17 @@ BAREMETAL_QEMUARCH_qemuarmv5 = "versatile" BAREMETAL_QEMUARCH_qemuarm = "arm" BAREMETAL_QEMUARCH_qemuarm64 = "aarch64" - EXTRA_OEMAKE_append = " QEMUARCH=${BAREMETAL_QEMUARCH} V=1" -do_install(){ - install -d ${D}/${datadir} - install -m 755 ${B}build/hello_baremetal_${BAREMETAL_QEMUARCH}.bin ${D}/${datadir}/hello_baremetal_${MACHINE}.bin - install -m 755 ${B}build/hello_baremetal_${BAREMETAL_QEMUARCH}.elf ${D}/${datadir}/hello_baremetal_${MACHINE}.elf -} -# Borrowed from meta-freertos -inherit rootfs-postcommands -inherit deploy -IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete" -do_deploy[dirs] = "${DEPLOYDIR} ${DEPLOY_DIR_IMAGE}" -do_rootfs[dirs] = "${DEPLOYDIR} ${DEPLOY_DIR_IMAGE}" -DEPLOYDIR = "${IMGDEPLOYDIR}" -IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}" -IMAGE_NAME_SUFFIX ?= "" - -do_deploy(){ - install ${D}/${datadir}/hello_baremetal_${MACHINE}.bin ${DEPLOYDIR}/${IMAGE_LINK_NAME}.bin - install ${D}/${datadir}/hello_baremetal_${MACHINE}.elf ${DEPLOYDIR}/${IMAGE_LINK_NAME}.elf -} - -do_image(){ - : +# Install binaries on the proper location for baremetal-image to fetch and deploy +do_install(){ + install -d ${D}/${base_libdir}/firmware + install -m 755 ${B}build/hello_baremetal_${BAREMETAL_QEMUARCH}.bin ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin + install -m 755 ${B}build/hello_baremetal_${BAREMETAL_QEMUARCH}.elf ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf } FILES_${PN} += " \ - ${datadir}/hello_baremetal_${MACHINE}.bin \ - ${datadir}/hello_baremetal_${MACHINE}.elf \ + ${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin \ + ${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf \ " - -python do_rootfs(){ - from oe.utils import execute_pre_post_process - from pathlib import Path - - # Write empty manifest testdate file - deploy_dir = d.getVar('DEPLOYDIR') - link_name = d.getVar('IMAGE_LINK_NAME') - manifest_name = d.getVar('IMAGE_MANIFEST') - - Path(manifest_name).touch() - if os.path.exists(manifest_name) and link_name: - manifest_link = deploy_dir + "/" + link_name + ".manifest" - if os.path.lexists(manifest_link): - os.remove(manifest_link) - os.symlink(os.path.basename(manifest_name), manifest_link) - execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND')) -} - -# QEMU generic FreeRTOS parameters -QB_DEFAULT_KERNEL = "${IMAGE_LINK_NAME}.bin" -QB_MEM = "-m 256" -QB_OPT_APPEND = "-nographic" -QB_DEFAULT_FSTYPE = "bin" -QB_DTB = "" - -# This next part is necessary to trick the build system into thinking -# its building an image recipe so it generates the qemuboot.conf -addtask do_deploy after do_write_qemuboot_conf before do_build -addtask do_rootfs before do_deploy after do_install -addtask do_image after do_rootfs before do_build -inherit qemuboot - -# Based on image.bbclass to make sure we build qemu -python(){ - # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have - # /usr/bin on recipe-sysroot (qemu) populated - def extraimage_getdepends(task): - deps = "" - for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split(): - # Make sure we only add it for qemu - if 'qemu' in dep: - deps += " %s:%s" % (dep, task) - return deps - d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot')) - d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot')) -} diff --git a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend index 720744200..626748541 100644 --- a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend +++ b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend @@ -7,17 +7,17 @@ KMACHINE_genericx86 ?= "common-pc" KMACHINE_genericx86-64 ?= "common-pc-64" KMACHINE_beaglebone-yocto ?= "beaglebone" -SRCREV_machine_genericx86 ?= "29f44c85c379c38f15e544828e7e77b3c008f378" -SRCREV_machine_genericx86-64 ?= "29f44c85c379c38f15e544828e7e77b3c008f378" -SRCREV_machine_edgerouter ?= "f4d7dbafb103e4f782323017c239c548871c1567" -SRCREV_machine_beaglebone-yocto ?= "f4d7dbafb103e4f782323017c239c548871c1567" +SRCREV_machine_genericx86 ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" +SRCREV_machine_genericx86-64 ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" +SRCREV_machine_edgerouter ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" +SRCREV_machine_beaglebone-yocto ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto" -LINUX_VERSION_genericx86 = "5.4.40" -LINUX_VERSION_genericx86-64 = "5.4.40" -LINUX_VERSION_edgerouter = "5.4.20" -LINUX_VERSION_beaglebone-yocto = "5.4.20" +LINUX_VERSION_genericx86 = "5.4.49" +LINUX_VERSION_genericx86-64 = "5.4.49" +LINUX_VERSION_edgerouter = "5.4.49" +LINUX_VERSION_beaglebone-yocto = "5.4.49" diff --git a/poky/meta/classes/archiver.bbclass b/poky/meta/classes/archiver.bbclass index e221fff69..aff1f9dbb 100644 --- a/poky/meta/classes/archiver.bbclass +++ b/poky/meta/classes/archiver.bbclass @@ -582,8 +582,8 @@ do_deploy_archives[sstate-outputdirs] = "${DEPLOY_DIR_SRC}" addtask do_deploy_archives_setscene addtask do_ar_original after do_unpack -addtask do_unpack_and_patch after do_patch -addtask do_ar_patched after do_unpack_and_patch before do_preconfigure do_configure +addtask do_unpack_and_patch after do_patch do_preconfigure +addtask do_ar_patched after do_unpack_and_patch addtask do_ar_configured after do_unpack_and_patch addtask do_ar_mirror after do_fetch addtask do_dumpdata diff --git a/poky/meta/classes/baremetal-image.bbclass b/poky/meta/classes/baremetal-image.bbclass new file mode 100644 index 000000000..90d58f261 --- /dev/null +++ b/poky/meta/classes/baremetal-image.bbclass @@ -0,0 +1,99 @@ +# Baremetal image class +# +# This class is meant to be inherited by recipes for baremetal/RTOS applications +# It contains code that would be used by all of them, every recipe just needs to +# override certain variables. +# +# For scalability purposes, code within this class focuses on the "image" wiring +# to satisfy the OpenEmbedded image creation and testing infrastructure. +# +# See meta-skeleton for a working example. + + +# Toolchain should be baremetal or newlib based. +# TCLIBC="baremetal" or TCLIBC="newlib" +COMPATIBLE_HOST_libc-musl_class-target = "null" +COMPATIBLE_HOST_libc-glibc_class-target = "null" + + +inherit rootfs-postcommands + +# Set some defaults, but these should be overriden by each recipe if required +IMGDEPLOYDIR ?= "${WORKDIR}/deploy-${PN}-image-complete" +BAREMETAL_BINNAME ?= "hello_baremetal_${MACHINE}" +IMAGE_LINK_NAME ?= "baremetal-helloworld-image-${MACHINE}" +IMAGE_NAME_SUFFIX ?= "" + +do_rootfs[dirs] = "${IMGDEPLOYDIR} ${DEPLOY_DIR_IMAGE}" + +do_image(){ + install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.bin ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.bin + install ${D}/${base_libdir}/firmware/${BAREMETAL_BINNAME}.elf ${IMGDEPLOYDIR}/${IMAGE_LINK_NAME}.elf +} + +do_image_complete(){ + : +} + +python do_rootfs(){ + from oe.utils import execute_pre_post_process + from pathlib import Path + + # Write empty manifest file to satisfy test infrastructure + deploy_dir = d.getVar('IMGDEPLOYDIR') + link_name = d.getVar('IMAGE_LINK_NAME') + manifest_name = d.getVar('IMAGE_MANIFEST') + + Path(manifest_name).touch() + if os.path.exists(manifest_name) and link_name: + manifest_link = deploy_dir + "/" + link_name + ".manifest" + if os.path.lexists(manifest_link): + os.remove(manifest_link) + os.symlink(os.path.basename(manifest_name), manifest_link) + execute_pre_post_process(d, d.getVar('ROOTFS_POSTPROCESS_COMMAND')) +} + + +# Assure binaries, manifest and qemubootconf are populated on DEPLOY_DIR_IMAGE +do_image_complete[dirs] = "${TOPDIR}" +do_image_complete[umask] = "022" +SSTATETASKS += "do_image_complete" +SSTATE_SKIP_CREATION_task-image-complete = '1' +do_image_complete[sstate-inputdirs] = "${IMGDEPLOYDIR}" +do_image_complete[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" +do_image_complete[stamp-extra-info] = "${MACHINE_ARCH}" +addtask do_image_complete after do_image before do_build + +python do_image_complete_setscene () { + sstate_setscene(d) +} +addtask do_image_complete_setscene + +# QEMU generic Baremetal/RTOS parameters +QB_DEFAULT_KERNEL ?= "${IMAGE_LINK_NAME}.bin" +QB_MEM ?= "-m 256" +QB_DEFAULT_FSTYPE ?= "bin" +QB_DTB ?= "" +QB_OPT_APPEND = "-nographic" + +# This next part is necessary to trick the build system into thinking +# its building an image recipe so it generates the qemuboot.conf +addtask do_rootfs before do_image after do_install +addtask do_image after do_rootfs before do_image_complete +addtask do_image_complete after do_image before do_build +inherit qemuboot + +# Based on image.bbclass to make sure we build qemu +python(){ + # do_addto_recipe_sysroot doesnt exist for all recipes, but we need it to have + # /usr/bin on recipe-sysroot (qemu) populated + def extraimage_getdepends(task): + deps = "" + for dep in (d.getVar('EXTRA_IMAGEDEPENDS') or "").split(): + # Make sure we only add it for qemu + if 'qemu' in dep: + deps += " %s:%s" % (dep, task) + return deps + d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_addto_recipe_sysroot')) + d.appendVarFlag('do_image', 'depends', extraimage_getdepends('do_populate_sysroot')) +} diff --git a/poky/meta/classes/deploy.bbclass b/poky/meta/classes/deploy.bbclass index 6d5290878..737c26122 100644 --- a/poky/meta/classes/deploy.bbclass +++ b/poky/meta/classes/deploy.bbclass @@ -8,4 +8,5 @@ python do_deploy_setscene () { } addtask do_deploy_setscene do_deploy[dirs] = "${DEPLOYDIR} ${B}" +do_deploy[cleandirs] = "${DEPLOYDIR}" do_deploy[stamp-extra-info] = "${MACHINE_ARCH}" diff --git a/poky/meta/classes/gtk-icon-cache.bbclass b/poky/meta/classes/gtk-icon-cache.bbclass index 91cb4ad40..dd394af27 100644 --- a/poky/meta/classes/gtk-icon-cache.bbclass +++ b/poky/meta/classes/gtk-icon-cache.bbclass @@ -1,6 +1,10 @@ FILES_${PN} += "${datadir}/icons/hicolor" -DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} gtk+3-native" +DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \ + ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \ + ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \ + gtk+3-native \ +" PACKAGE_WRITE_DEPS += "gtk+3-native gdk-pixbuf-native" @@ -48,9 +52,18 @@ python populate_packages_append () { bb.note("adding hicolor-icon-theme dependency to %s" % pkg) rdepends = ' ' + d.getVar('MLPREFIX', False) + "hicolor-icon-theme" d.appendVar('RDEPENDS_%s' % pkg, rdepends) - + + #gtk_icon_cache_postinst depend on gdk-pixbuf and gtk+3 + bb.note("adding gdk-pixbuf dependency to %s" % pkg) + rdepends = ' ' + d.getVar('MLPREFIX', False) + "gdk-pixbuf" + d.appendVar('RDEPENDS_%s' % pkg, rdepends) + + bb.note("adding gtk+3 dependency to %s" % pkg) + rdepends = ' ' + d.getVar('MLPREFIX', False) + "gtk+3" + d.appendVar('RDEPENDS_%s' % pkg, rdepends) + bb.note("adding gtk-icon-cache postinst and postrm scripts to %s" % pkg) - + postinst = d.getVar('pkg_postinst_%s' % pkg) if not postinst: postinst = '#!/bin/sh\n' diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass index 5bc627066..54a1a1627 100644 --- a/poky/meta/classes/kernel-yocto.bbclass +++ b/poky/meta/classes/kernel-yocto.bbclass @@ -131,7 +131,7 @@ do_kernel_metadata() { else cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig fi - sccs="${WORKDIR}/defconfig" + in_tree_defconfig="${WORKDIR}/defconfig" else bbfatal "A KBUILD_DEFCONFIG '${KBUILD_DEFCONFIG}' was specified, but not present in the source tree" fi @@ -153,14 +153,25 @@ do_kernel_metadata() { patches="${@" ".join(find_patches(d,''))}" feat_dirs="${@" ".join(find_kernel_feature_dirs(d))}" - # a quick check to make sure we don't have duplicate defconfigs - # If there's a defconfig in the SRC_URI, did we also have one from - # the KBUILD_DEFCONFIG processing above ? - if [ -n "$sccs" ]; then - # we did have a defconfig from above. remove any that might be in the src_uri - sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '{ if ($0!="defconfig") { print $0 } }' RS=' ') + # a quick check to make sure we don't have duplicate defconfigs If + # there's a defconfig in the SRC_URI, did we also have one from the + # KBUILD_DEFCONFIG processing above ? + src_uri_defconfig=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") != 0) { print $0 }' RS=' ') + # drop and defconfig's from the src_uri variable, we captured it just above here if it existed + sccs_from_src_uri=$(echo $sccs_from_src_uri | awk '(match($0, "defconfig") == 0) { print $0 }' RS=' ') + + if [ -n "$in_tree_defconfig" ]; then + sccs_defconfig=$in_tree_defconfig + if [ -n "$src_uri_defconfig" ]; then + bbwarn "[NOTE]: defconfig was supplied both via KBUILD_DEFCONFIG and SRC_URI. Dropping SRC_URI defconfig" + fi + else + # if we didn't have an in-tree one, make our defconfig the one + # from the src_uri. Note: there may not have been one from the + # src_uri, so this can be an empty variable. + sccs_defconfig=$src_uri_defconfig fi - sccs="$sccs $sccs_from_src_uri" + sccs="$sccs_from_src_uri" # check for feature directories/repos/branches that were part of the # SRC_URI. If they were supplied, we convert them into include directives @@ -187,11 +198,10 @@ do_kernel_metadata() { # expand kernel features into their full path equivalents bsp_definition=$(spp ${includes} --find -DKMACHINE=${KMACHINE} -DKTYPE=${LINUX_KERNEL_TYPE}) if [ -z "$bsp_definition" ]; then - echo "$sccs" | grep -q defconfig - if [ $? -ne 0 ]; then + if [ -z "$sccs_defconfig" ]; then bbfatal_log "Could not locate BSP definition for ${KMACHINE}/${LINUX_KERNEL_TYPE} and no defconfig was provided" fi - + else # if the bsp definition has "define KMETA_EXTERNAL_BSP t", # then we need to set a flag that will instruct the next # steps to use the BSP as both configuration and patches. @@ -206,7 +216,7 @@ do_kernel_metadata() { elements="`echo -n ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES}`" if [ -n "${elements}" ]; then echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition - scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} ${bsp_definition} ${sccs} ${patches} ${KERNEL_FEATURES} + scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches ${KERNEL_FEATURES} if [ $? -ne 0 ]; then bbfatal_log "Could not generate configuration queue for ${KMACHINE}." fi diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass index 20a0135fc..cf43a5d60 100644 --- a/poky/meta/classes/kernel.bbclass +++ b/poky/meta/classes/kernel.bbclass @@ -4,10 +4,12 @@ KERNEL_PACKAGE_NAME ??= "kernel" KERNEL_DEPLOYSUBDIR ??= "${@ "" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else d.getVar("KERNEL_PACKAGE_NAME") }" PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }" -DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native lzop-native bison-native" +DEPENDS += "virtual/${TARGET_PREFIX}binutils virtual/${TARGET_PREFIX}gcc kmod-native bc-native bison-native" +DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lzo", "lzop-native", "", d)}" +DEPENDS += "${@bb.utils.contains("INITRAMFS_FSTYPES", "cpio.lz4", "lz4-native", "", d)}" PACKAGE_WRITE_DEPS += "depmodwrapper-cross" -do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot" +do_deploy[depends] += "depmodwrapper-cross:do_populate_sysroot gzip-native:do_populate_sysroot" do_clean[depends] += "make-mod-scripts:do_clean" CVE_PRODUCT ?= "linux_kernel" @@ -94,6 +96,25 @@ python __anonymous () { d.appendVar('RDEPENDS_%s-image' % kname, ' %s-image-%s' % (kname, typelower)) d.setVar('PKG_%s-image-%s' % (kname,typelower), '%s-image-%s-${KERNEL_VERSION_PKG_NAME}' % (kname, typelower)) d.setVar('ALLOW_EMPTY_%s-image-%s' % (kname, typelower), '1') + d.setVar('pkg_postinst_%s-image-%s' % (kname,typelower), """set +e +if [ -n "$D" ]; then + ln -sf %s-${KERNEL_VERSION} $D/${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 +else + ln -sf %s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "Filesystem on ${KERNEL_IMAGEDEST}/ doesn't support symlinks, falling back to copied image (%s)." + install -m 0644 ${KERNEL_IMAGEDEST}/%s-${KERNEL_VERSION} ${KERNEL_IMAGEDEST}/%s + fi +fi +set -e +""" % (type, type, type, type, type, type, type)) + d.setVar('pkg_postrm_%s-image-%s' % (kname,typelower), """set +e +if [ -f "${KERNEL_IMAGEDEST}/%s" -o -L "${KERNEL_IMAGEDEST}/%s" ]; then + rm -f ${KERNEL_IMAGEDEST}/%s > /dev/null 2>&1 +fi +set -e +""" % (type, type, type)) + image = d.getVar('INITRAMFS_IMAGE') # If the INTIRAMFS_IMAGE is set but the INITRAMFS_IMAGE_BUNDLE is set to 0, @@ -210,7 +231,7 @@ copy_initramfs() { ;; *lz4) echo "lz4 decompressing image" - lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img + lz4 -df ${B}/usr/${INITRAMFS_IMAGE_NAME}.$img ${B}/usr/${INITRAMFS_IMAGE_NAME}.cpio break ;; *lzo) @@ -385,9 +406,6 @@ kernel_do_install() { install -d ${D}/boot for imageType in ${KERNEL_IMAGETYPES} ; do install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION} - if [ "${KERNEL_PACKAGE_NAME}" = "kernel" ]; then - ln -sf ${imageType}-${KERNEL_VERSION} ${D}/${KERNEL_IMAGEDEST}/${imageType} - fi done install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION} install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION} @@ -463,7 +481,7 @@ do_shared_workdir () { # Copy files required for module builds cp System.map $kerneldir/System.map-${KERNEL_VERSION} - cp Module.symvers $kerneldir/ + [ -e Module.symvers ] && cp Module.symvers $kerneldir/ cp .config $kerneldir/ mkdir -p $kerneldir/include/config cp include/config/kernel.release $kerneldir/include/config/kernel.release @@ -729,8 +747,6 @@ kernel_do_deploy() { done fi } -do_deploy[cleandirs] = "${DEPLOYDIR}" -do_deploy[dirs] = "${DEPLOYDIR} ${B}" do_deploy[prefuncs] += "package_get_auto_pr" addtask deploy after do_populate_sysroot do_packagedata diff --git a/poky/meta/classes/populate_sdk_ext.bbclass b/poky/meta/classes/populate_sdk_ext.bbclass index 9f26cfc13..fd0da16e7 100644 --- a/poky/meta/classes/populate_sdk_ext.bbclass +++ b/poky/meta/classes/populate_sdk_ext.bbclass @@ -676,7 +676,7 @@ sdk_ext_postinst() { # current working directory when first ran, nor will it set $1 when # sourcing a script. That is why this has to look so ugly. LOGFILE="$target_sdk_dir/preparing_build_system.log" - sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; } + sh -c ". buildtools/environment-setup* > $LOGFILE && cd $target_sdk_dir/`dirname ${oe_init_build_env_path}` && set $target_sdk_dir && . $target_sdk_dir/${oe_init_build_env_path} $target_sdk_dir >> $LOGFILE && python3 $target_sdk_dir/ext-sdk-prepare.py $LOGFILE '${SDK_INSTALL_TARGETS}'" || { echo "printf 'ERROR: this SDK was not fully installed and needs reinstalling\n'" >> $env_setup_script ; exit 1 ; } fi if [ -e $target_sdk_dir/ext-sdk-prepare.py ]; then rm $target_sdk_dir/ext-sdk-prepare.py diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass index 2f171836f..c43b9a982 100644 --- a/poky/meta/classes/rootfs-postcommands.bbclass +++ b/poky/meta/classes/rootfs-postcommands.bbclass @@ -308,12 +308,16 @@ rootfs_check_host_user_contaminated () { HOST_USER_UID="$(PSEUDO_UNLOAD=1 id -u)" HOST_USER_GID="$(PSEUDO_UNLOAD=1 id -g)" - find "${IMAGE_ROOTFS}" -wholename "${IMAGE_ROOTFS}/home" -prune \ - -user "$HOST_USER_UID" -o -group "$HOST_USER_GID" >"$contaminated" + find "${IMAGE_ROOTFS}" -path "${IMAGE_ROOTFS}/home" -prune -o \ + -user "$HOST_USER_UID" -print -o -group "$HOST_USER_GID" -print >"$contaminated" + + sed -e "s,${IMAGE_ROOTFS},," $contaminated | while read line; do + bbwarn "Path in the rootfs is owned by the same user or group as the user running bitbake:" $line `ls -lan ${IMAGE_ROOTFS}/$line` + done if [ -s "$contaminated" ]; then - echo "WARNING: Paths in the rootfs are owned by the same user or group as the user running bitbake. See the logfile for the specific paths." - cat "$contaminated" | sed "s,^, ," + bbwarn "/etc/passwd:" `cat ${IMAGE_ROOTFS}/etc/passwd` + bbwarn "/etc/group:" `cat ${IMAGE_ROOTFS}/etc/group` fi } diff --git a/poky/meta/classes/spdx.bbclass b/poky/meta/classes/spdx.bbclass deleted file mode 100644 index fb78e274a..000000000 --- a/poky/meta/classes/spdx.bbclass +++ /dev/null @@ -1,360 +0,0 @@ -# This class integrates real-time license scanning, generation of SPDX standard -# output and verifiying license info during the building process. -# It is a combination of efforts from the OE-Core, SPDX and Fossology projects. -# -# For more information on FOSSology: -# http://www.fossology.org -# -# For more information on FOSSologySPDX commandline: -# https://github.com/spdx-tools/fossology-spdx/wiki/Fossology-SPDX-Web-API -# -# For more information on SPDX: -# http://www.spdx.org -# - -# SPDX file will be output to the path which is defined as[SPDX_MANIFEST_DIR] -# in ./meta/conf/licenses.conf. - -SPDXSSTATEDIR = "${WORKDIR}/spdx_sstate_dir" - -# If ${S} isn't actually the top-level source directory, set SPDX_S to point at -# the real top-level directory. -SPDX_S ?= "${S}" - -python do_spdx () { - import os, sys - import json, shutil - - info = {} - info['workdir'] = d.getVar('WORKDIR') - info['sourcedir'] = d.getVar('SPDX_S') - info['pn'] = d.getVar('PN') - info['pv'] = d.getVar('PV') - info['spdx_version'] = d.getVar('SPDX_VERSION') - info['data_license'] = d.getVar('DATA_LICENSE') - - sstatedir = d.getVar('SPDXSSTATEDIR') - sstatefile = os.path.join(sstatedir, info['pn'] + info['pv'] + ".spdx") - - manifest_dir = d.getVar('SPDX_MANIFEST_DIR') - info['outfile'] = os.path.join(manifest_dir, info['pn'] + ".spdx" ) - - info['spdx_temp_dir'] = d.getVar('SPDX_TEMP_DIR') - info['tar_file'] = os.path.join(info['workdir'], info['pn'] + ".tar.gz" ) - - # Make sure important dirs exist - try: - bb.utils.mkdirhier(manifest_dir) - bb.utils.mkdirhier(sstatedir) - bb.utils.mkdirhier(info['spdx_temp_dir']) - except OSError as e: - bb.error("SPDX: Could not set up required directories: " + str(e)) - return - - ## get everything from cache. use it to decide if - ## something needs to be rerun - cur_ver_code = get_ver_code(info['sourcedir']) - cache_cur = False - if os.path.exists(sstatefile): - ## cache for this package exists. read it in - cached_spdx = get_cached_spdx(sstatefile) - - if cached_spdx['PackageVerificationCode'] == cur_ver_code: - bb.warn("SPDX: Verification code for " + info['pn'] - + "is same as cache's. do nothing") - cache_cur = True - else: - local_file_info = setup_foss_scan(info, True, cached_spdx['Files']) - else: - local_file_info = setup_foss_scan(info, False, None) - - if cache_cur: - spdx_file_info = cached_spdx['Files'] - foss_package_info = cached_spdx['Package'] - foss_license_info = cached_spdx['Licenses'] - else: - ## setup fossology command - foss_server = d.getVar('FOSS_SERVER') - foss_flags = d.getVar('FOSS_WGET_FLAGS') - foss_full_spdx = d.getVar('FOSS_FULL_SPDX') == "true" or False - foss_command = "wget %s --post-file=%s %s"\ - % (foss_flags, info['tar_file'], foss_server) - - foss_result = run_fossology(foss_command, foss_full_spdx) - if foss_result is not None: - (foss_package_info, foss_file_info, foss_license_info) = foss_result - spdx_file_info = create_spdx_doc(local_file_info, foss_file_info) - ## write to cache - write_cached_spdx(sstatefile, cur_ver_code, foss_package_info, - spdx_file_info, foss_license_info) - else: - bb.error("SPDX: Could not communicate with FOSSology server. Command was: " + foss_command) - return - - ## Get document and package level information - spdx_header_info = get_header_info(info, cur_ver_code, foss_package_info) - - ## CREATE MANIFEST - create_manifest(info, spdx_header_info, spdx_file_info, foss_license_info) - - ## clean up the temp stuff - shutil.rmtree(info['spdx_temp_dir'], ignore_errors=True) - if os.path.exists(info['tar_file']): - remove_file(info['tar_file']) -} -addtask spdx after do_patch before do_configure - -def create_manifest(info, header, files, licenses): - import codecs - with codecs.open(info['outfile'], mode='w', encoding='utf-8') as f: - # Write header - f.write(header + '\n') - - # Write file data - for chksum, block in files.iteritems(): - f.write("FileName: " + block['FileName'] + '\n') - for key, value in block.iteritems(): - if not key == 'FileName': - f.write(key + ": " + value + '\n') - f.write('\n') - - # Write license data - for id, block in licenses.iteritems(): - f.write("LicenseID: " + id + '\n') - for key, value in block.iteritems(): - f.write(key + ": " + value + '\n') - f.write('\n') - -def get_cached_spdx(sstatefile): - import json - import codecs - cached_spdx_info = {} - with codecs.open(sstatefile, mode='r', encoding='utf-8') as f: - try: - cached_spdx_info = json.load(f) - except ValueError as e: - cached_spdx_info = None - return cached_spdx_info - -def write_cached_spdx(sstatefile, ver_code, package_info, files, license_info): - import json - import codecs - spdx_doc = {} - spdx_doc['PackageVerificationCode'] = ver_code - spdx_doc['Files'] = {} - spdx_doc['Files'] = files - spdx_doc['Package'] = {} - spdx_doc['Package'] = package_info - spdx_doc['Licenses'] = {} - spdx_doc['Licenses'] = license_info - with codecs.open(sstatefile, mode='w', encoding='utf-8') as f: - f.write(json.dumps(spdx_doc)) - -def setup_foss_scan(info, cache, cached_files): - import errno, shutil - import tarfile - file_info = {} - cache_dict = {} - - for f_dir, f in list_files(info['sourcedir']): - full_path = os.path.join(f_dir, f) - abs_path = os.path.join(info['sourcedir'], full_path) - dest_dir = os.path.join(info['spdx_temp_dir'], f_dir) - dest_path = os.path.join(info['spdx_temp_dir'], full_path) - - checksum = hash_file(abs_path) - if not checksum is None: - file_info[checksum] = {} - ## retain cache information if it exists - if cache and checksum in cached_files: - file_info[checksum] = cached_files[checksum] - ## have the file included in what's sent to the FOSSology server - else: - file_info[checksum]['FileName'] = full_path - try: - bb.utils.mkdirhier(dest_dir) - shutil.copyfile(abs_path, dest_path) - except OSError as e: - bb.warn("SPDX: mkdirhier failed: " + str(e)) - except shutil.Error as e: - bb.warn("SPDX: copyfile failed: " + str(e)) - except IOError as e: - bb.warn("SPDX: copyfile failed: " + str(e)) - else: - bb.warn("SPDX: Could not get checksum for file: " + f) - - with tarfile.open(info['tar_file'], "w:gz") as tar: - tar.add(info['spdx_temp_dir'], arcname=os.path.basename(info['spdx_temp_dir'])) - - return file_info - -def remove_file(file_name): - try: - os.remove(file_name) - except OSError as e: - pass - -def list_files(dir): - for root, subFolders, files in os.walk(dir): - for f in files: - rel_root = os.path.relpath(root, dir) - yield rel_root, f - return - -def hash_file(file_name): - from bb.utils import sha1_file - return sha1_file(file_name) - -def hash_string(data): - import hashlib - sha1 = hashlib.sha1() - sha1.update(data.encode('utf-8')) - return sha1.hexdigest() - -def run_fossology(foss_command, full_spdx): - import string, re - import subprocess - - try: - foss_output = subprocess.check_output(foss_command.split(), - stderr=subprocess.STDOUT).decode('utf-8') - except subprocess.CalledProcessError as e: - return None - - foss_output = foss_output.replace('\r', '') - - # Package info - package_info = {} - if full_spdx: - # All mandatory, only one occurrence - package_info['PackageCopyrightText'] = re.findall('PackageCopyrightText: (.*?)', foss_output, re.S)[0] - package_info['PackageLicenseDeclared'] = re.findall('PackageLicenseDeclared: (.*)', foss_output)[0] - package_info['PackageLicenseConcluded'] = re.findall('PackageLicenseConcluded: (.*)', foss_output)[0] - # These may be more than one - package_info['PackageLicenseInfoFromFiles'] = re.findall('PackageLicenseInfoFromFiles: (.*)', foss_output) - else: - DEFAULT = "NOASSERTION" - package_info['PackageCopyrightText'] = "" + DEFAULT + "" - package_info['PackageLicenseDeclared'] = DEFAULT - package_info['PackageLicenseConcluded'] = DEFAULT - package_info['PackageLicenseInfoFromFiles'] = [] - - # File info - file_info = {} - records = [] - # FileName is also in PackageFileName, so we match on FileType as well. - records = re.findall('FileName:.*?FileType:.*?', foss_output, re.S) - for rec in records: - chksum = re.findall('FileChecksum: SHA1: (.*)\n', rec)[0] - file_info[chksum] = {} - file_info[chksum]['FileCopyrightText'] = re.findall('FileCopyrightText: ' - + '(.*?)', rec, re.S )[0] - fields = ['FileName', 'FileType', 'LicenseConcluded', 'LicenseInfoInFile'] - for field in fields: - file_info[chksum][field] = re.findall(field + ': (.*)', rec)[0] - - # Licenses - license_info = {} - licenses = [] - licenses = re.findall('LicenseID:.*?LicenseName:.*?\n', foss_output, re.S) - for lic in licenses: - license_id = re.findall('LicenseID: (.*)\n', lic)[0] - license_info[license_id] = {} - license_info[license_id]['ExtractedText'] = re.findall('ExtractedText: (.*?)', lic, re.S)[0] - license_info[license_id]['LicenseName'] = re.findall('LicenseName: (.*)', lic)[0] - - return (package_info, file_info, license_info) - -def create_spdx_doc(file_info, scanned_files): - import json - ## push foss changes back into cache - for chksum, lic_info in scanned_files.iteritems(): - if chksum in file_info: - file_info[chksum]['FileType'] = lic_info['FileType'] - file_info[chksum]['FileChecksum: SHA1'] = chksum - file_info[chksum]['LicenseInfoInFile'] = lic_info['LicenseInfoInFile'] - file_info[chksum]['LicenseConcluded'] = lic_info['LicenseConcluded'] - file_info[chksum]['FileCopyrightText'] = lic_info['FileCopyrightText'] - else: - bb.warn("SPDX: " + lic_info['FileName'] + " : " + chksum - + " : is not in the local file info: " - + json.dumps(lic_info, indent=1)) - return file_info - -def get_ver_code(dirname): - chksums = [] - for f_dir, f in list_files(dirname): - path = os.path.join(dirname, f_dir, f) - hash = hash_file(path) - if not hash is None: - chksums.append(hash) - else: - bb.warn("SPDX: Could not hash file: " + path) - ver_code_string = ''.join(chksums).lower() - ver_code = hash_string(ver_code_string) - return ver_code - -def get_header_info(info, spdx_verification_code, package_info): - """ - Put together the header SPDX information. - Eventually this needs to become a lot less - of a hardcoded thing. - """ - from datetime import datetime - import os - head = [] - DEFAULT = "NOASSERTION" - - package_checksum = hash_file(info['tar_file']) - if package_checksum is None: - package_checksum = DEFAULT - - ## document level information - head.append("## SPDX Document Information") - head.append("SPDXVersion: " + info['spdx_version']) - head.append("DataLicense: " + info['data_license']) - head.append("DocumentComment: SPDX for " - + info['pn'] + " version " + info['pv'] + "") - head.append("") - - ## Creator information - ## Note that this does not give time in UTC. - now = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ') - head.append("## Creation Information") - ## Tools are supposed to have a version, but FOSSology+SPDX provides none. - head.append("Creator: Tool: FOSSology+SPDX") - head.append("Created: " + now) - head.append("CreatorComment: UNO") - head.append("") - - ## package level information - head.append("## Package Information") - head.append("PackageName: " + info['pn']) - head.append("PackageVersion: " + info['pv']) - head.append("PackageFileName: " + os.path.basename(info['tar_file'])) - head.append("PackageSupplier: Person:" + DEFAULT) - head.append("PackageDownloadLocation: " + DEFAULT) - head.append("PackageSummary: ") - head.append("PackageOriginator: Person:" + DEFAULT) - head.append("PackageChecksum: SHA1: " + package_checksum) - head.append("PackageVerificationCode: " + spdx_verification_code) - head.append("PackageDescription: " + info['pn'] - + " version " + info['pv'] + "") - head.append("") - head.append("PackageCopyrightText: " - + package_info['PackageCopyrightText']) - head.append("") - head.append("PackageLicenseDeclared: " - + package_info['PackageLicenseDeclared']) - head.append("PackageLicenseConcluded: " - + package_info['PackageLicenseConcluded']) - - for licref in package_info['PackageLicenseInfoFromFiles']: - head.append("PackageLicenseInfoFromFiles: " + licref) - head.append("") - - ## header for file level - head.append("## File Information") - head.append("") - - return '\n'.join(head) diff --git a/poky/meta/classes/uboot-sign.bbclass b/poky/meta/classes/uboot-sign.bbclass index 982ed46d0..713196df4 100644 --- a/poky/meta/classes/uboot-sign.bbclass +++ b/poky/meta/classes/uboot-sign.bbclass @@ -117,15 +117,16 @@ do_install_append() { fi } +do_deploy_prepend_pn-${UBOOT_PN}() { + if [ "${UBOOT_SIGN_ENABLE}" = "1" -a -n "${UBOOT_DTB_BINARY}" ]; then + concat_dtb + fi +} + python () { if d.getVar('UBOOT_SIGN_ENABLE') == '1' and d.getVar('PN') == d.getVar('UBOOT_PN') and d.getVar('UBOOT_DTB_BINARY'): kernel_pn = d.getVar('PREFERRED_PROVIDER_virtual/kernel') # Make "bitbake u-boot -cdeploy" deploys the signed u-boot.dtb d.appendVarFlag('do_deploy', 'depends', ' %s:do_deploy' % kernel_pn) - - # kernerl's do_deploy is a litle special, so we can't use - # do_deploy_append, otherwise it would override - # kernel_do_deploy. - d.appendVarFlag('do_deploy', 'prefuncs', ' concat_dtb') } diff --git a/poky/meta/conf/bitbake.conf b/poky/meta/conf/bitbake.conf index 89717a713..353caacef 100644 --- a/poky/meta/conf/bitbake.conf +++ b/poky/meta/conf/bitbake.conf @@ -252,7 +252,7 @@ This package contains symbolic links, header files, and \ related items necessary for software development." SUMMARY_${PN}-staticdev ?= "${SUMMARY} - Development files (Static Libraries)" -DESCRIPTION_${PN}-staticdev?= "${DESCRIPTION} \ +DESCRIPTION_${PN}-staticdev ?= "${DESCRIPTION} \ This package contains static libraries for software development." SUMMARY_${PN}-doc ?= "${SUMMARY} - Documentation files" @@ -267,7 +267,7 @@ HOMEPAGE = "" # Ensure that -dev packages recommend the corresponding -dev packages of their # deps, and the same for -dbg. -DEPCHAIN_PRE = "" +DEPCHAIN_PRE = "" DEPCHAIN_POST = "-dev -dbg" DEPENDS = "" diff --git a/poky/meta/conf/machine/include/riscv/qemuriscv.inc b/poky/meta/conf/machine/include/riscv/qemuriscv.inc index a42346f36..759c8a196 100644 --- a/poky/meta/conf/machine/include/riscv/qemuriscv.inc +++ b/poky/meta/conf/machine/include/riscv/qemuriscv.inc @@ -18,7 +18,7 @@ WKS_FILE ?= "qemuriscv.wks" MACHINE_EXTRA_RRECOMMENDS += " kernel-modules" EXTRA_IMAGEDEPENDS += "opensbi" -RISCV_SBI_PLAT ?= "qemu/virt" +RISCV_SBI_PLAT ?= "generic" RISCV_SBI_PAYLOAD ?= "${KERNEL_IMAGETYPE}-${MACHINE}.bin" UBOOT_ENTRYPOINT_riscv32 = "0x80400000" diff --git a/poky/meta/files/common-licenses/BSD-2-Clause-Patent b/poky/meta/files/common-licenses/BSD-2-Clause-Patent new file mode 100644 index 000000000..1184c0295 --- /dev/null +++ b/poky/meta/files/common-licenses/BSD-2-Clause-Patent @@ -0,0 +1,47 @@ +Copyright (c) + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +Subject to the terms and conditions of this license, each copyright holder +and contributor hereby grants to those receiving rights under this license +a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable +(except for failure to satisfy the conditions of this license) patent license +to make, have made, use, offer to sell, sell, import, and otherwise transfer +this software, where such license applies only to those patent claims, already +acquired or hereafter acquired, licensable by such copyright holder or contributor +that are necessarily infringed by: + +(a) their Contribution(s) (the licensed copyrights of copyright holders and +non-copyrightable additions of contributors, in source or binary form) alone; +or + +(b) combination of their Contribution(s) with the work of authorship to which +such Contribution(s) was added by such copyright holder or contributor, if, +at the time the Contribution is added, such addition causes such combination +to be necessarily infringed. The patent license shall not apply to any other +combinations which include the Contribution. + +Except as expressly stated above, no rights or licenses from any copyright +holder or contributor is granted under this license, whether expressly, by +implication, estoppel or otherwise. + +DISCLAIMER + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE +USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/poky/meta/files/common-licenses/BSD-2-Clause-Patent.txt b/poky/meta/files/common-licenses/BSD-2-Clause-Patent.txt deleted file mode 100644 index 1184c0295..000000000 --- a/poky/meta/files/common-licenses/BSD-2-Clause-Patent.txt +++ /dev/null @@ -1,47 +0,0 @@ -Copyright (c) - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -Subject to the terms and conditions of this license, each copyright holder -and contributor hereby grants to those receiving rights under this license -a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable -(except for failure to satisfy the conditions of this license) patent license -to make, have made, use, offer to sell, sell, import, and otherwise transfer -this software, where such license applies only to those patent claims, already -acquired or hereafter acquired, licensable by such copyright holder or contributor -that are necessarily infringed by: - -(a) their Contribution(s) (the licensed copyrights of copyright holders and -non-copyrightable additions of contributors, in source or binary form) alone; -or - -(b) combination of their Contribution(s) with the work of authorship to which -such Contribution(s) was added by such copyright holder or contributor, if, -at the time the Contribution is added, such addition causes such combination -to be necessarily infringed. The patent license shall not apply to any other -combinations which include the Contribution. - -Except as expressly stated above, no rights or licenses from any copyright -holder or contributor is granted under this license, whether expressly, by -implication, estoppel or otherwise. - -DISCLAIMER - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/poky/meta/lib/oe/rootfs.py b/poky/meta/lib/oe/rootfs.py index a0ac33ada..0e05f1f75 100644 --- a/poky/meta/lib/oe/rootfs.py +++ b/poky/meta/lib/oe/rootfs.py @@ -297,9 +297,9 @@ class Rootfs(object, metaclass=ABCMeta): def _run_ldconfig(self): if self.d.getVar('LDCONFIGDEPEND'): - bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v") + bb.note("Executing: ldconfig -r " + self.image_rootfs + " -c new -v -X") self._exec_shell_cmd(['ldconfig', '-r', self.image_rootfs, '-c', - 'new', '-v']) + 'new', '-v', '-X']) def _check_for_kernel_modules(self, modules_dir): for root, dirs, files in os.walk(modules_dir, topdown=True): diff --git a/poky/meta/lib/oeqa/core/loader.py b/poky/meta/lib/oeqa/core/loader.py index 0d7970d49..11978213b 100644 --- a/poky/meta/lib/oeqa/core/loader.py +++ b/poky/meta/lib/oeqa/core/loader.py @@ -46,7 +46,7 @@ def _built_modules_dict(modules): for module in modules: # Assumption: package and module names do not contain upper case # characters, whereas class names do - m = re.match(r'^(\w+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII) + m = re.match(r'^([0-9a-z_.]+)(?:\.(\w[^.]*)(?:\.([^.]+))?)?$', module, flags=re.ASCII) if not m: continue diff --git a/poky/meta/lib/oeqa/core/utils/concurrencytest.py b/poky/meta/lib/oeqa/core/utils/concurrencytest.py index 01c39830f..b2eb68fb0 100644 --- a/poky/meta/lib/oeqa/core/utils/concurrencytest.py +++ b/poky/meta/lib/oeqa/core/utils/concurrencytest.py @@ -183,10 +183,11 @@ class dummybuf(object): # class ConcurrentTestSuite(unittest.TestSuite): - def __init__(self, suite, processes, setupfunc): + def __init__(self, suite, processes, setupfunc, removefunc): super(ConcurrentTestSuite, self).__init__([suite]) self.processes = processes self.setupfunc = setupfunc + self.removefunc = removefunc def run(self, result): tests, totaltests = fork_for_tests(self.processes, self) @@ -237,22 +238,6 @@ class ConcurrentTestSuite(unittest.TestSuite): finally: queue.put(test) -def removebuilddir(d): - delay = 5 - while delay and os.path.exists(d + "/bitbake.lock"): - time.sleep(1) - delay = delay - 1 - # Deleting these directories takes a lot of time, use autobuilder - # clobberdir if its available - clobberdir = os.path.expanduser("~/yocto-autobuilder-helper/janitor/clobberdir") - if os.path.exists(clobberdir): - try: - subprocess.check_call([clobberdir, d]) - return - except subprocess.CalledProcessError: - pass - bb.utils.prunedir(d, ionice=True) - def fork_for_tests(concurrency_num, suite): result = [] if 'BUILDDIR' in os.environ: @@ -297,7 +282,7 @@ def fork_for_tests(concurrency_num, suite): if ourpid != os.getpid(): os._exit(0) if newbuilddir and unittest_result.wasSuccessful(): - removebuilddir(newbuilddir) + suite.removefunc(newbuilddir) except: # Don't do anything with process children if ourpid != os.getpid(): @@ -313,7 +298,7 @@ def fork_for_tests(concurrency_num, suite): sys.stderr.write(traceback.format_exc()) finally: if newbuilddir: - removebuilddir(newbuilddir) + suite.removefunc(newbuilddir) stream.flush() os._exit(1) stream.flush() diff --git a/poky/meta/lib/oeqa/runtime/cases/logrotate.py b/poky/meta/lib/oeqa/runtime/cases/logrotate.py index 3938e9199..a4efcd07c 100644 --- a/poky/meta/lib/oeqa/runtime/cases/logrotate.py +++ b/poky/meta/lib/oeqa/runtime/cases/logrotate.py @@ -37,10 +37,6 @@ class LogrotateTest(OERuntimeTestCase): msg = ('Could not write to /tmp/logrotate-test.conf') self.assertEqual(status, 0, msg = msg) - status, output = self.target.run('echo "/var/log/logrotate_test {\\n missingok \\n monthly \\n rotate 1" > /etc/logrotate.d/logrotate_test') - msg = ('Could not write to /etc/logrotate.d/logrotate_test') - self.assertEqual(status, 0, msg = msg) - # If logrotate fails to rotate the log, view the verbose output of logrotate to see what prevented it _, logrotate_output = self.target.run('logrotate -vf /tmp/logrotate-test.conf') status, _ = self.target.run('find $HOME/logrotate_dir -type f | grep wtmp.1') diff --git a/poky/meta/lib/oeqa/selftest/cases/devtool.py b/poky/meta/lib/oeqa/selftest/cases/devtool.py index 7d8f89504..b383ed9c5 100644 --- a/poky/meta/lib/oeqa/selftest/cases/devtool.py +++ b/poky/meta/lib/oeqa/selftest/cases/devtool.py @@ -1500,7 +1500,11 @@ class DevtoolUpgradeTests(DevtoolBase): dstdir = os.path.join(dstdir, p) if not os.path.exists(dstdir): os.makedirs(dstdir) - self.track_for_cleanup(dstdir) + if p == "lib": + # Can race with other tests + self.add_command_to_tearDown('rmdir --ignore-fail-on-non-empty %s' % dstdir) + else: + self.track_for_cleanup(dstdir) dstfile = os.path.join(dstdir, os.path.basename(srcfile)) if srcfile != dstfile: shutil.copy(srcfile, dstfile) diff --git a/poky/meta/lib/oeqa/selftest/cases/oescripts.py b/poky/meta/lib/oeqa/selftest/cases/oescripts.py index 2f18d8f29..726daff7c 100644 --- a/poky/meta/lib/oeqa/selftest/cases/oescripts.py +++ b/poky/meta/lib/oeqa/selftest/cases/oescripts.py @@ -133,7 +133,7 @@ class OEListPackageconfigTests(OEScriptTests): def check_endlines(self, results, expected_endlines): for line in results.output.splitlines(): for el in expected_endlines: - if line == el: + if line.split() == el.split(): expected_endlines.remove(el) break @@ -177,7 +177,7 @@ class OEListPackageconfigTests(OEScriptTests): self.check_endlines(results, expected_endlines) - def test_packageconfig_flags_optiins_preferred_only(self): + def test_packageconfig_flags_options_preferred_only(self): results = runCmd('%s/contrib/list-packageconfig-flags.py -p' % self.scripts_dir) expected_endlines = [] expected_endlines.append("RECIPE NAME PACKAGECONFIG FLAGS") diff --git a/poky/meta/lib/oeqa/selftest/cases/recipetool.py b/poky/meta/lib/oeqa/selftest/cases/recipetool.py index 6bfe8f177..c2ade2543 100644 --- a/poky/meta/lib/oeqa/selftest/cases/recipetool.py +++ b/poky/meta/lib/oeqa/selftest/cases/recipetool.py @@ -534,7 +534,11 @@ class RecipetoolTests(RecipetoolBase): dstdir = os.path.join(dstdir, p) if not os.path.exists(dstdir): os.makedirs(dstdir) - self.track_for_cleanup(dstdir) + if p == "lib": + # Can race with other tests + self.add_command_to_tearDown('rmdir --ignore-fail-on-non-empty %s' % dstdir) + else: + self.track_for_cleanup(dstdir) dstfile = os.path.join(dstdir, os.path.basename(srcfile)) if srcfile != dstfile: shutil.copy(srcfile, dstfile) diff --git a/poky/meta/lib/oeqa/selftest/context.py b/poky/meta/lib/oeqa/selftest/context.py index 494e9dbd1..23f7d71bd 100644 --- a/poky/meta/lib/oeqa/selftest/context.py +++ b/poky/meta/lib/oeqa/selftest/context.py @@ -22,6 +22,37 @@ from oeqa.core.exception import OEQAPreRun, OEQATestNotFound from oeqa.utils.commands import runCmd, get_bb_vars, get_test_layer +class NonConcurrentTestSuite(unittest.TestSuite): + def __init__(self, suite, processes, setupfunc, removefunc): + super().__init__([suite]) + self.processes = processes + self.suite = suite + self.setupfunc = setupfunc + self.removefunc = removefunc + + def run(self, result): + (builddir, newbuilddir) = self.setupfunc("-st", None, self.suite) + ret = super().run(result) + os.chdir(builddir) + if newbuilddir and ret.wasSuccessful(): + self.removefunc(newbuilddir) + +def removebuilddir(d): + delay = 5 + while delay and os.path.exists(d + "/bitbake.lock"): + time.sleep(1) + delay = delay - 1 + # Deleting these directories takes a lot of time, use autobuilder + # clobberdir if its available + clobberdir = os.path.expanduser("~/yocto-autobuilder-helper/janitor/clobberdir") + if os.path.exists(clobberdir): + try: + subprocess.check_call([clobberdir, d]) + return + except subprocess.CalledProcessError: + pass + bb.utils.prunedir(d, ionice=True) + class OESelftestTestContext(OETestContext): def __init__(self, td=None, logger=None, machines=None, config_paths=None, newbuilddir=None): super(OESelftestTestContext, self).__init__(td, logger) @@ -86,10 +117,9 @@ class OESelftestTestContext(OETestContext): if processes: from oeqa.core.utils.concurrencytest import ConcurrentTestSuite - return ConcurrentTestSuite(suites, processes, self.setup_builddir) + return ConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir) else: - self.setup_builddir("-st", None, suites) - return suites + return NonConcurrentTestSuite(suites, processes, self.setup_builddir, removebuilddir) def runTests(self, processes=None, machine=None, skips=[]): if machine: diff --git a/poky/meta/lib/oeqa/targetcontrol.py b/poky/meta/lib/oeqa/targetcontrol.py index 7bbba6016..19f5a4ea7 100644 --- a/poky/meta/lib/oeqa/targetcontrol.py +++ b/poky/meta/lib/oeqa/targetcontrol.py @@ -187,6 +187,7 @@ class QemuTarget(BaseTarget): except: pass self.logger.removeHandler(self.loggerhandler) + self.loggerhandler.close() self.connection = None self.ip = None self.server_ip = None diff --git a/poky/meta/recipes-bsp/opensbi/opensbi_0.6.bb b/poky/meta/recipes-bsp/opensbi/opensbi_0.6.bb deleted file mode 100644 index 56f2d4b91..000000000 --- a/poky/meta/recipes-bsp/opensbi/opensbi_0.6.bb +++ /dev/null @@ -1,48 +0,0 @@ -SUMMARY = "RISC-V Open Source Supervisor Binary Interface (OpenSBI)" -DESCRIPTION = "OpenSBI aims to provide an open-source and extensible implementation of the RISC-V SBI specification for a platform specific firmware (M-mode) and a general purpose OS, hypervisor or bootloader (S-mode or HS-mode). OpenSBI implementation can be easily extended by RISC-V platform or System-on-Chip vendors to fit a particular hadware configuration." -LICENSE = "BSD-2-Clause" -LIC_FILES_CHKSUM = "file://COPYING.BSD;md5=42dd9555eb177f35150cf9aa240b61e5" - -require opensbi-payloads.inc - -inherit autotools-brokensep deploy - -SRCREV = "ac5e821d50be631f26274765a59bc1b444ffd862" -SRC_URI = "git://github.com/riscv/opensbi.git \ - file://0001-Makefile-Don-t-specify-mabi-or-march.patch \ - " - -S = "${WORKDIR}/git" - -EXTRA_OEMAKE += "PLATFORM=${RISCV_SBI_PLAT} I=${D}" -# If RISCV_SBI_PAYLOAD is set then include it as a payload -EXTRA_OEMAKE_append = " ${@riscv_get_extra_oemake_image(d)}" - -# Required if specifying a custom payload -do_compile[depends] += "${@riscv_get_do_compile_depends(d)}" - -do_install_append() { - # In the future these might be required as a dependency for other packages. - # At the moment just delete them to avoid warnings - rm -r ${D}/include - rm -r ${D}/platform/${RISCV_SBI_PLAT}/lib - rm -r ${D}/platform/${RISCV_SBI_PLAT}/firmware/payloads - rm -r ${D}/lib -} - -do_deploy () { - install -m 755 ${D}/platform/${RISCV_SBI_PLAT}/firmware/fw_payload.* ${DEPLOYDIR}/ - install -m 755 ${D}/platform/${RISCV_SBI_PLAT}/firmware/fw_jump.* ${DEPLOYDIR}/ - install -m 755 ${D}/platform/${RISCV_SBI_PLAT}/firmware/fw_dynamic.* ${DEPLOYDIR}/ -} - -addtask deploy before do_build after do_install - -FILES_${PN} += "/platform/${RISCV_SBI_PLAT}/firmware/fw_jump.*" -FILES_${PN} += "/platform/${RISCV_SBI_PLAT}/firmware/fw_payload.*" -FILES_${PN} += "/platform/${RISCV_SBI_PLAT}/firmware/fw_dynamic.*" - -COMPATIBLE_HOST = "(riscv64|riscv32).*" -INHIBIT_PACKAGE_STRIP = "1" - -SECURITY_CFLAGS = "" diff --git a/poky/meta/recipes-bsp/opensbi/opensbi_0.8.bb b/poky/meta/recipes-bsp/opensbi/opensbi_0.8.bb new file mode 100644 index 000000000..818efac73 --- /dev/null +++ b/poky/meta/recipes-bsp/opensbi/opensbi_0.8.bb @@ -0,0 +1,47 @@ +SUMMARY = "RISC-V Open Source Supervisor Binary Interface (OpenSBI)" +DESCRIPTION = "OpenSBI aims to provide an open-source and extensible implementation of the RISC-V SBI specification for a platform specific firmware (M-mode) and a general purpose OS, hypervisor or bootloader (S-mode or HS-mode). OpenSBI implementation can be easily extended by RISC-V platform or System-on-Chip vendors to fit a particular hadware configuration." +LICENSE = "BSD-2-Clause" +LIC_FILES_CHKSUM = "file://COPYING.BSD;md5=42dd9555eb177f35150cf9aa240b61e5" + +require opensbi-payloads.inc + +inherit autotools-brokensep deploy + +SRCREV = "a98258d0b537a295f517bbc8d813007336731fa9" +SRC_URI = "git://github.com/riscv/opensbi.git;branch=master \ + file://0001-Makefile-Don-t-specify-mabi-or-march.patch \ + " + +S = "${WORKDIR}/git" + +EXTRA_OEMAKE += "PLATFORM=${RISCV_SBI_PLAT} I=${D}" +# If RISCV_SBI_PAYLOAD is set then include it as a payload +EXTRA_OEMAKE_append = " ${@riscv_get_extra_oemake_image(d)}" + +# Required if specifying a custom payload +do_compile[depends] += "${@riscv_get_do_compile_depends(d)}" + +do_install_append() { + # In the future these might be required as a dependency for other packages. + # At the moment just delete them to avoid warnings + rm -r ${D}/include + rm -r ${D}/lib* + rm -r ${D}/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/payloads +} + +do_deploy () { + install -m 755 ${D}/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_payload.* ${DEPLOYDIR}/ + install -m 755 ${D}/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_jump.* ${DEPLOYDIR}/ + install -m 755 ${D}/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_dynamic.* ${DEPLOYDIR}/ +} + +addtask deploy before do_build after do_install + +FILES_${PN} += "/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_jump.*" +FILES_${PN} += "/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_payload.*" +FILES_${PN} += "/share/opensbi/*/${RISCV_SBI_PLAT}/firmware/fw_dynamic.*" + +COMPATIBLE_HOST = "(riscv64|riscv32).*" +INHIBIT_PACKAGE_STRIP = "1" + +SECURITY_CFLAGS = "" diff --git a/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb b/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb index ea29b668e..3820ba262 100644 --- a/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb +++ b/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb @@ -12,7 +12,7 @@ SECTION = "libs" PV = "0.2+git${SRCPV}" SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https" -SRCREV = "f4b9cde3815abe84a98079cedd515283ea08c16b" +SRCREV = "86bd30a14e153a18f670b25708795253d8736f0f" S = "${WORKDIR}/git" diff --git a/poky/meta/recipes-connectivity/libuv/libuv_1.34.2.bb b/poky/meta/recipes-connectivity/libuv/libuv_1.34.2.bb deleted file mode 100644 index 234cec37b..000000000 --- a/poky/meta/recipes-connectivity/libuv/libuv_1.34.2.bb +++ /dev/null @@ -1,19 +0,0 @@ -SUMMARY = "A multi-platform support library with a focus on asynchronous I/O" -HOMEPAGE = "https://github.com/libuv/libuv" -BUGTRACKER = "https://github.com/libuv/libuv/issues" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://LICENSE;md5=a68902a430e32200263d182d44924d47" - -SRCREV = "f868c9ab0c307525a16fff99fd21e32a6ebc3837" -SRC_URI = "git://github.com/libuv/libuv;branch=v1.x" - -S = "${WORKDIR}/git" - -inherit autotools - -do_configure() { - ${S}/autogen.sh || bbnote "${PN} failed to autogen.sh" - oe_runconf -} - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/libuv/libuv_1.38.0.bb b/poky/meta/recipes-connectivity/libuv/libuv_1.38.0.bb new file mode 100644 index 000000000..afc9b2f2e --- /dev/null +++ b/poky/meta/recipes-connectivity/libuv/libuv_1.38.0.bb @@ -0,0 +1,19 @@ +SUMMARY = "A multi-platform support library with a focus on asynchronous I/O" +HOMEPAGE = "https://github.com/libuv/libuv" +BUGTRACKER = "https://github.com/libuv/libuv/issues" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;md5=a68902a430e32200263d182d44924d47" + +SRCREV = "1ab9ea3790378f9f25c4e78e9e2b511c75f9c9ed" +SRC_URI = "git://github.com/libuv/libuv;branch=v1.x" + +S = "${WORKDIR}/git" + +inherit autotools + +do_configure() { + ${S}/autogen.sh || bbnote "${PN} failed to autogen.sh" + oe_runconf +} + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-cacheio-use-intmax_t-for-formatted-IO.patch b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-cacheio-use-intmax_t-for-formatted-IO.patch deleted file mode 100644 index bafff5b9c..000000000 --- a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/0001-cacheio-use-intmax_t-for-formatted-IO.patch +++ /dev/null @@ -1,38 +0,0 @@ -From ac32b813f5d6f9a2de944015cf9bb98d68e0203a Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Sat, 1 Dec 2018 10:02:12 -0800 -Subject: [PATCH] cacheio: use intmax_t for formatted IO - -time_t is not same size on x32 ABI (ILP32) - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - support/nfs/cacheio.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/support/nfs/cacheio.c b/support/nfs/cacheio.c -index 9dc4cf1..2086a95 100644 ---- a/support/nfs/cacheio.c -+++ b/support/nfs/cacheio.c -@@ -17,6 +17,7 @@ - - #include - #include -+#include - #include - #include - #include -@@ -234,7 +235,7 @@ cache_flush(int force) - stb.st_mtime > now) - stb.st_mtime = time(0); - -- sprintf(stime, "%ld\n", stb.st_mtime); -+ sprintf(stime, "%jd\n", (intmax_t)stb.st_mtime); - for (c=0; cachelist[c]; c++) { - int fd; - sprintf(path, "/proc/net/rpc/%s/flush", cachelist[c]); --- -2.19.2 - diff --git a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.3.bb b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.3.bb deleted file mode 100644 index 9bdb6f4ae..000000000 --- a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.4.3.bb +++ /dev/null @@ -1,147 +0,0 @@ -SUMMARY = "userspace utilities for kernel nfs" -DESCRIPTION = "The nfs-utils package provides a daemon for the kernel \ -NFS server and related tools." -HOMEPAGE = "http://nfs.sourceforge.net/" -SECTION = "console/network" - -LICENSE = "MIT & GPLv2+ & BSD" -LIC_FILES_CHKSUM = "file://COPYING;md5=95f3a93a5c3c7888de623b46ea085a84" - -# util-linux for libblkid -DEPENDS = "libcap libevent util-linux sqlite3 libtirpc" -RDEPENDS_${PN} = "${PN}-client" -RRECOMMENDS_${PN} = "kernel-module-nfsd" - -inherit useradd - -USERADD_PACKAGES = "${PN}-client" -USERADD_PARAM_${PN}-client = "--system --home-dir /var/lib/nfs \ - --shell /bin/false --user-group rpcuser" - -SRC_URI = "${KERNELORG_MIRROR}/linux/utils/nfs-utils/${PV}/nfs-utils-${PV}.tar.xz \ - file://nfsserver \ - file://nfscommon \ - file://nfs-utils.conf \ - file://nfs-server.service \ - file://nfs-mountd.service \ - file://nfs-statd.service \ - file://proc-fs-nfsd.mount \ - file://nfs-utils-debianize-start-statd.patch \ - file://bugfix-adjust-statd-service-name.patch \ - file://0001-cacheio-use-intmax_t-for-formatted-IO.patch \ - file://0001-Makefile.am-fix-undefined-function-for-libnsm.a.patch \ - file://clang-warnings.patch \ - " -SRC_URI[md5sum] = "06020c76f531ed97f3145514901e0e7c" -SRC_URI[sha256sum] = "af65fce5dd8370cff9ead67baac5a6cd69c376dcadfef264dc2c78c904f26599" - -# Only kernel-module-nfsd is required here (but can be built-in) - the nfsd module will -# pull in the remainder of the dependencies. - -INITSCRIPT_PACKAGES = "${PN} ${PN}-client" -INITSCRIPT_NAME = "nfsserver" -INITSCRIPT_PARAMS = "defaults" -INITSCRIPT_NAME_${PN}-client = "nfscommon" -INITSCRIPT_PARAMS_${PN}-client = "defaults 19 21" - -inherit autotools-brokensep update-rc.d systemd pkgconfig - -SYSTEMD_PACKAGES = "${PN} ${PN}-client" -SYSTEMD_SERVICE_${PN} = "nfs-server.service nfs-mountd.service" -SYSTEMD_SERVICE_${PN}-client = "nfs-statd.service" - -# --enable-uuid is need for cross-compiling -EXTRA_OECONF = "--with-statduser=rpcuser \ - --enable-mountconfig \ - --enable-libmount-mount \ - --enable-uuid \ - --disable-gss \ - --disable-nfsdcltrack \ - --with-statdpath=/var/lib/nfs/statd \ - " - -PACKAGECONFIG ??= "tcp-wrappers \ - ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ -" -PACKAGECONFIG_remove_libc-musl = "tcp-wrappers" -PACKAGECONFIG[tcp-wrappers] = "--with-tcp-wrappers,--without-tcp-wrappers,tcp-wrappers" -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," -# libdevmapper is available in meta-oe -PACKAGECONFIG[nfsv41] = "--enable-nfsv41,--disable-nfsv41,libdevmapper,libdevmapper" -# keyutils is available in meta-oe -PACKAGECONFIG[nfsv4] = "--enable-nfsv4,--disable-nfsv4,keyutils,python3-core" - -PACKAGES =+ "${PN}-client ${PN}-mount ${PN}-stats" - -CONFFILES_${PN}-client += "${localstatedir}/lib/nfs/etab \ - ${localstatedir}/lib/nfs/rmtab \ - ${localstatedir}/lib/nfs/xtab \ - ${localstatedir}/lib/nfs/statd/state \ - ${sysconfdir}/nfsmount.conf" - -FILES_${PN}-client = "${sbindir}/*statd \ - ${sbindir}/rpc.idmapd ${sbindir}/sm-notify \ - ${sbindir}/showmount ${sbindir}/nfsstat \ - ${localstatedir}/lib/nfs \ - ${sysconfdir}/nfs-utils.conf \ - ${sysconfdir}/nfsmount.conf \ - ${sysconfdir}/init.d/nfscommon \ - ${systemd_unitdir}/system/nfs-statd.service" -RDEPENDS_${PN}-client = "${PN}-mount rpcbind" - -FILES_${PN}-mount = "${base_sbindir}/*mount.nfs*" - -FILES_${PN}-stats = "${sbindir}/mountstats ${sbindir}/nfsiostat" -RDEPENDS_${PN}-stats = "python3-core" - -FILES_${PN}-staticdev += "${libdir}/libnfsidmap/*.a" - -FILES_${PN} += "${systemd_unitdir} ${libdir}/libnfsidmap/" - -do_configure_prepend() { - sed -i -e 's,sbindir = /sbin,sbindir = ${base_sbindir},g' \ - ${S}/utils/mount/Makefile.am -} - -# Make clean needed because the package comes with -# precompiled 64-bit objects that break the build -do_compile_prepend() { - make clean -} - -# Works on systemd only -HIGH_RLIMIT_NOFILE ??= "4096" - -do_install_append () { - install -d ${D}${sysconfdir}/init.d - install -m 0755 ${WORKDIR}/nfsserver ${D}${sysconfdir}/init.d/nfsserver - install -m 0755 ${WORKDIR}/nfscommon ${D}${sysconfdir}/init.d/nfscommon - - install -m 0755 ${WORKDIR}/nfs-utils.conf ${D}${sysconfdir} - install -m 0755 ${S}/utils/mount/nfsmount.conf ${D}${sysconfdir} - - install -d ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/nfs-server.service ${D}${systemd_unitdir}/system/ - install -m 0644 ${WORKDIR}/nfs-mountd.service ${D}${systemd_unitdir}/system/ - install -m 0644 ${WORKDIR}/nfs-statd.service ${D}${systemd_unitdir}/system/ - sed -i -e 's,@SBINDIR@,${sbindir},g' \ - -e 's,@SYSCONFDIR@,${sysconfdir},g' \ - -e 's,@HIGH_RLIMIT_NOFILE@,${HIGH_RLIMIT_NOFILE},g' \ - ${D}${systemd_unitdir}/system/*.service - if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then - install -m 0644 ${WORKDIR}/proc-fs-nfsd.mount ${D}${systemd_unitdir}/system/ - install -d ${D}${systemd_unitdir}/system/sysinit.target.wants/ - ln -sf ../proc-fs-nfsd.mount ${D}${systemd_unitdir}/system/sysinit.target.wants/proc-fs-nfsd.mount - fi - - # kernel code as of 3.8 hard-codes this path as a default - install -d ${D}/var/lib/nfs/v4recovery - - # chown the directories and files - chown -R rpcuser:rpcuser ${D}${localstatedir}/lib/nfs/statd - chmod 0644 ${D}${localstatedir}/lib/nfs/statd/state - - # Make python tools use python 3 - sed -i -e '1s,#!.*python.*,#!${bindir}/python3,' ${D}${sbindir}/mountstats ${D}${sbindir}/nfsiostat - -} diff --git a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.5.1.bb b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.5.1.bb new file mode 100644 index 000000000..188a8893b --- /dev/null +++ b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils_2.5.1.bb @@ -0,0 +1,144 @@ +SUMMARY = "userspace utilities for kernel nfs" +DESCRIPTION = "The nfs-utils package provides a daemon for the kernel \ +NFS server and related tools." +HOMEPAGE = "http://nfs.sourceforge.net/" +SECTION = "console/network" + +LICENSE = "MIT & GPLv2+ & BSD" +LIC_FILES_CHKSUM = "file://COPYING;md5=95f3a93a5c3c7888de623b46ea085a84" + +# util-linux for libblkid +DEPENDS = "libcap libevent util-linux sqlite3 libtirpc" +RDEPENDS_${PN} = "${PN}-client" +RRECOMMENDS_${PN} = "kernel-module-nfsd" + +inherit useradd + +USERADD_PACKAGES = "${PN}-client" +USERADD_PARAM_${PN}-client = "--system --home-dir /var/lib/nfs \ + --shell /bin/false --user-group rpcuser" + +SRC_URI = "${KERNELORG_MIRROR}/linux/utils/nfs-utils/${PV}/nfs-utils-${PV}.tar.xz \ + file://nfsserver \ + file://nfscommon \ + file://nfs-utils.conf \ + file://nfs-server.service \ + file://nfs-mountd.service \ + file://nfs-statd.service \ + file://proc-fs-nfsd.mount \ + file://nfs-utils-debianize-start-statd.patch \ + file://bugfix-adjust-statd-service-name.patch \ + file://0001-Makefile.am-fix-undefined-function-for-libnsm.a.patch \ + file://clang-warnings.patch \ + " +SRC_URI[sha256sum] = "0f1c8170e16a07d9836bbf0836d48d0c842b6f0e0e8b18748f099751851d30c4" + +# Only kernel-module-nfsd is required here (but can be built-in) - the nfsd module will +# pull in the remainder of the dependencies. + +INITSCRIPT_PACKAGES = "${PN} ${PN}-client" +INITSCRIPT_NAME = "nfsserver" +INITSCRIPT_PARAMS = "defaults" +INITSCRIPT_NAME_${PN}-client = "nfscommon" +INITSCRIPT_PARAMS_${PN}-client = "defaults 19 21" + +inherit autotools-brokensep update-rc.d systemd pkgconfig + +SYSTEMD_PACKAGES = "${PN} ${PN}-client" +SYSTEMD_SERVICE_${PN} = "nfs-server.service nfs-mountd.service" +SYSTEMD_SERVICE_${PN}-client = "nfs-statd.service" + +# --enable-uuid is need for cross-compiling +EXTRA_OECONF = "--with-statduser=rpcuser \ + --enable-mountconfig \ + --enable-libmount-mount \ + --enable-uuid \ + --disable-gss \ + --disable-nfsdcltrack \ + --with-statdpath=/var/lib/nfs/statd \ + " + +PACKAGECONFIG ??= "tcp-wrappers \ + ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ +" +PACKAGECONFIG_remove_libc-musl = "tcp-wrappers" +PACKAGECONFIG[tcp-wrappers] = "--with-tcp-wrappers,--without-tcp-wrappers,tcp-wrappers" +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +# libdevmapper is available in meta-oe +PACKAGECONFIG[nfsv41] = "--enable-nfsv41,--disable-nfsv41,libdevmapper,libdevmapper" +# keyutils is available in meta-oe +PACKAGECONFIG[nfsv4] = "--enable-nfsv4,--disable-nfsv4,keyutils,python3-core" + +PACKAGES =+ "${PN}-client ${PN}-mount ${PN}-stats" + +CONFFILES_${PN}-client += "${localstatedir}/lib/nfs/etab \ + ${localstatedir}/lib/nfs/rmtab \ + ${localstatedir}/lib/nfs/xtab \ + ${localstatedir}/lib/nfs/statd/state \ + ${sysconfdir}/nfsmount.conf" + +FILES_${PN}-client = "${sbindir}/*statd \ + ${sbindir}/rpc.idmapd ${sbindir}/sm-notify \ + ${sbindir}/showmount ${sbindir}/nfsstat \ + ${localstatedir}/lib/nfs \ + ${sysconfdir}/nfs-utils.conf \ + ${sysconfdir}/nfsmount.conf \ + ${sysconfdir}/init.d/nfscommon \ + ${systemd_unitdir}/system/nfs-statd.service" +RDEPENDS_${PN}-client = "${PN}-mount rpcbind" + +FILES_${PN}-mount = "${base_sbindir}/*mount.nfs*" + +FILES_${PN}-stats = "${sbindir}/mountstats ${sbindir}/nfsiostat ${sbindir}/nfsdclnts" +RDEPENDS_${PN}-stats = "python3-core" + +FILES_${PN}-staticdev += "${libdir}/libnfsidmap/*.a" + +FILES_${PN} += "${systemd_unitdir} ${libdir}/libnfsidmap/" + +do_configure_prepend() { + sed -i -e 's,sbindir = /sbin,sbindir = ${base_sbindir},g' \ + ${S}/utils/mount/Makefile.am +} + +# Make clean needed because the package comes with +# precompiled 64-bit objects that break the build +do_compile_prepend() { + make clean +} + +# Works on systemd only +HIGH_RLIMIT_NOFILE ??= "4096" + +do_install_append () { + install -d ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/nfsserver ${D}${sysconfdir}/init.d/nfsserver + install -m 0755 ${WORKDIR}/nfscommon ${D}${sysconfdir}/init.d/nfscommon + + install -m 0755 ${WORKDIR}/nfs-utils.conf ${D}${sysconfdir} + install -m 0755 ${S}/utils/mount/nfsmount.conf ${D}${sysconfdir} + + install -d ${D}${systemd_unitdir}/system + install -m 0644 ${WORKDIR}/nfs-server.service ${D}${systemd_unitdir}/system/ + install -m 0644 ${WORKDIR}/nfs-mountd.service ${D}${systemd_unitdir}/system/ + install -m 0644 ${WORKDIR}/nfs-statd.service ${D}${systemd_unitdir}/system/ + sed -i -e 's,@SBINDIR@,${sbindir},g' \ + -e 's,@SYSCONFDIR@,${sysconfdir},g' \ + -e 's,@HIGH_RLIMIT_NOFILE@,${HIGH_RLIMIT_NOFILE},g' \ + ${D}${systemd_unitdir}/system/*.service + if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then + install -m 0644 ${WORKDIR}/proc-fs-nfsd.mount ${D}${systemd_unitdir}/system/ + install -d ${D}${systemd_unitdir}/system/sysinit.target.wants/ + ln -sf ../proc-fs-nfsd.mount ${D}${systemd_unitdir}/system/sysinit.target.wants/proc-fs-nfsd.mount + fi + + # kernel code as of 3.8 hard-codes this path as a default + install -d ${D}/var/lib/nfs/v4recovery + + # chown the directories and files + chown -R rpcuser:rpcuser ${D}${localstatedir}/lib/nfs/statd + chmod 0644 ${D}${localstatedir}/lib/nfs/statd/state + + # Make python tools use python 3 + sed -i -e '1s,#!.*python.*,#!${bindir}/python3,' ${D}${sbindir}/mountstats ${D}${sbindir}/nfsiostat +} diff --git a/poky/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb b/poky/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb index 66fa8f7d0..47ed6b764 100644 --- a/poky/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb +++ b/poky/meta/recipes-connectivity/openssl/openssl_1.1.1g.bb @@ -191,7 +191,9 @@ PACKAGES =+ "libcrypto libssl openssl-conf ${PN}-engines ${PN}-misc" FILES_libcrypto = "${libdir}/libcrypto${SOLIBS}" FILES_libssl = "${libdir}/libssl${SOLIBS}" -FILES_openssl-conf = "${sysconfdir}/ssl/openssl.cnf" +FILES_openssl-conf = "${sysconfdir}/ssl/openssl.cnf \ + ${libdir}/ssl-1.1/openssl.cnf* \ + " FILES_${PN}-engines = "${libdir}/engines-1.1" FILES_${PN}-misc = "${libdir}/ssl-1.1/misc" FILES_${PN} =+ "${libdir}/ssl-1.1/*" diff --git a/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch new file mode 100644 index 000000000..53ad5d028 --- /dev/null +++ b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch @@ -0,0 +1,151 @@ +From 5b78c8f961f25f4dc22d6f2b77ddd06d712cec63 Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Wed, 3 Jun 2020 23:17:35 +0300 +Subject: [PATCH 1/3] WPS UPnP: Do not allow event subscriptions with URLs to + other networks + +The UPnP Device Architecture 2.0 specification errata ("UDA errata +16-04-2020.docx") addresses a problem with notifications being allowed +to go out to other domains by disallowing such cases. Do such filtering +for the notification callback URLs to avoid undesired connections to +external networks based on subscriptions that any device in the local +network could request when WPS support for external registrars is +enabled (the upnp_iface parameter in hostapd configuration). + +Upstream-Status: Backport +CVE: CVE-2020-12695 patch #1 +Signed-off-by: Jouni Malinen +Signed-off-by: Armin Kuster + +--- + src/wps/wps_er.c | 2 +- + src/wps/wps_upnp.c | 38 ++++++++++++++++++++++++++++++++++++-- + src/wps/wps_upnp_i.h | 3 ++- + 3 files changed, 39 insertions(+), 4 deletions(-) + +Index: wpa_supplicant-2.9/src/wps/wps_er.c +=================================================================== +--- wpa_supplicant-2.9.orig/src/wps/wps_er.c ++++ wpa_supplicant-2.9/src/wps/wps_er.c +@@ -1298,7 +1298,7 @@ wps_er_init(struct wps_context *wps, con + "with %s", filter); + } + if (get_netif_info(er->ifname, &er->ip_addr, &er->ip_addr_text, +- er->mac_addr)) { ++ NULL, er->mac_addr)) { + wpa_printf(MSG_INFO, "WPS UPnP: Could not get IP/MAC address " + "for %s. Does it have IP address?", er->ifname); + wps_er_deinit(er, NULL, NULL); +Index: wpa_supplicant-2.9/src/wps/wps_upnp.c +=================================================================== +--- wpa_supplicant-2.9.orig/src/wps/wps_upnp.c ++++ wpa_supplicant-2.9/src/wps/wps_upnp.c +@@ -303,6 +303,14 @@ static void subscr_addr_free_all(struct + } + + ++static int local_network_addr(struct upnp_wps_device_sm *sm, ++ struct sockaddr_in *addr) ++{ ++ return (addr->sin_addr.s_addr & sm->netmask.s_addr) == ++ (sm->ip_addr & sm->netmask.s_addr); ++} ++ ++ + /* subscr_addr_add_url -- add address(es) for one url to subscription */ + static void subscr_addr_add_url(struct subscription *s, const char *url, + size_t url_len) +@@ -381,6 +389,7 @@ static void subscr_addr_add_url(struct s + + for (rp = result; rp; rp = rp->ai_next) { + struct subscr_addr *a; ++ struct sockaddr_in *addr = (struct sockaddr_in *) rp->ai_addr; + + /* Limit no. of address to avoid denial of service attack */ + if (dl_list_len(&s->addr_list) >= MAX_ADDR_PER_SUBSCRIPTION) { +@@ -389,6 +398,13 @@ static void subscr_addr_add_url(struct s + break; + } + ++ if (!local_network_addr(s->sm, addr)) { ++ wpa_printf(MSG_INFO, ++ "WPS UPnP: Ignore a delivery URL that points to another network %s", ++ inet_ntoa(addr->sin_addr)); ++ continue; ++ } ++ + a = os_zalloc(sizeof(*a) + alloc_len); + if (a == NULL) + break; +@@ -889,11 +905,12 @@ static int eth_get(const char *device, u + * @net_if: Selected network interface name + * @ip_addr: Buffer for returning IP address in network byte order + * @ip_addr_text: Buffer for returning a pointer to allocated IP address text ++ * @netmask: Buffer for returning netmask or %NULL if not needed + * @mac: Buffer for returning MAC address + * Returns: 0 on success, -1 on failure + */ + int get_netif_info(const char *net_if, unsigned *ip_addr, char **ip_addr_text, +- u8 mac[ETH_ALEN]) ++ struct in_addr *netmask, u8 mac[ETH_ALEN]) + { + struct ifreq req; + int sock = -1; +@@ -919,6 +936,19 @@ int get_netif_info(const char *net_if, u + in_addr.s_addr = *ip_addr; + os_snprintf(*ip_addr_text, 16, "%s", inet_ntoa(in_addr)); + ++ if (netmask) { ++ os_memset(&req, 0, sizeof(req)); ++ os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name)); ++ if (ioctl(sock, SIOCGIFNETMASK, &req) < 0) { ++ wpa_printf(MSG_ERROR, ++ "WPS UPnP: SIOCGIFNETMASK failed: %d (%s)", ++ errno, strerror(errno)); ++ goto fail; ++ } ++ addr = (struct sockaddr_in *) &req.ifr_netmask; ++ netmask->s_addr = addr->sin_addr.s_addr; ++ } ++ + #ifdef __linux__ + os_strlcpy(req.ifr_name, net_if, sizeof(req.ifr_name)); + if (ioctl(sock, SIOCGIFHWADDR, &req) < 0) { +@@ -1025,11 +1055,15 @@ static int upnp_wps_device_start(struct + + /* Determine which IP and mac address we're using */ + if (get_netif_info(net_if, &sm->ip_addr, &sm->ip_addr_text, +- sm->mac_addr)) { ++ &sm->netmask, sm->mac_addr)) { + wpa_printf(MSG_INFO, "WPS UPnP: Could not get IP/MAC address " + "for %s. Does it have IP address?", net_if); + goto fail; + } ++ wpa_printf(MSG_DEBUG, "WPS UPnP: Local IP address %s netmask %s hwaddr " ++ MACSTR, ++ sm->ip_addr_text, inet_ntoa(sm->netmask), ++ MAC2STR(sm->mac_addr)); + + /* Listen for incoming TCP connections so that others + * can fetch our "xml files" from us. +Index: wpa_supplicant-2.9/src/wps/wps_upnp_i.h +=================================================================== +--- wpa_supplicant-2.9.orig/src/wps/wps_upnp_i.h ++++ wpa_supplicant-2.9/src/wps/wps_upnp_i.h +@@ -128,6 +128,7 @@ struct upnp_wps_device_sm { + u8 mac_addr[ETH_ALEN]; /* mac addr of network i.f. we use */ + char *ip_addr_text; /* IP address of network i.f. we use */ + unsigned ip_addr; /* IP address of network i.f. we use (host order) */ ++ struct in_addr netmask; + int multicast_sd; /* send multicast messages over this socket */ + int ssdp_sd; /* receive discovery UPD packets on socket */ + int ssdp_sd_registered; /* nonzero if we must unregister */ +@@ -158,7 +159,7 @@ struct subscription * subscription_find( + const u8 uuid[UUID_LEN]); + void subscr_addr_delete(struct subscr_addr *a); + int get_netif_info(const char *net_if, unsigned *ip_addr, char **ip_addr_text, +- u8 mac[ETH_ALEN]); ++ struct in_addr *netmask, u8 mac[ETH_ALEN]); + + /* wps_upnp_ssdp.c */ + void msearchreply_state_machine_stop(struct advertisement_state_machine *a); diff --git a/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch new file mode 100644 index 000000000..59640859d --- /dev/null +++ b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch @@ -0,0 +1,62 @@ +From f7d268864a2660b7239b9a8ff5ad37faeeb751ba Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Wed, 3 Jun 2020 22:41:02 +0300 +Subject: [PATCH 2/3] WPS UPnP: Fix event message generation using a long URL + path + +More than about 700 character URL ended up overflowing the wpabuf used +for building the event notification and this resulted in the wpabuf +buffer overflow checks terminating the hostapd process. Fix this by +allocating the buffer to be large enough to contain the full URL path. +However, since that around 700 character limit has been the practical +limit for more than ten years, start explicitly enforcing that as the +limit or the callback URLs since any longer ones had not worked before +and there is no need to enable them now either. + +Upstream-Status: Backport +CVE: CVE-2020-12695 patch #2 +Signed-off-by: Jouni Malinen +Signed-off-by: Armin Kuster + +--- + src/wps/wps_upnp.c | 9 +++++++-- + src/wps/wps_upnp_event.c | 3 ++- + 2 files changed, 9 insertions(+), 3 deletions(-) + +diff --git a/src/wps/wps_upnp.c b/src/wps/wps_upnp.c +index 7d4b7439940e..ab685d52ecab 100644 +--- a/src/wps/wps_upnp.c ++++ b/src/wps/wps_upnp.c +@@ -328,9 +328,14 @@ static void subscr_addr_add_url(struct subscription *s, const char *url, + int rerr; + size_t host_len, path_len; + +- /* url MUST begin with http: */ +- if (url_len < 7 || os_strncasecmp(url, "http://", 7)) ++ /* URL MUST begin with HTTP scheme. In addition, limit the length of ++ * the URL to 700 characters which is around the limit that was ++ * implicitly enforced for more than 10 years due to a bug in ++ * generating the event messages. */ ++ if (url_len < 7 || os_strncasecmp(url, "http://", 7) || url_len > 700) { ++ wpa_printf(MSG_DEBUG, "WPS UPnP: Reject an unacceptable URL"); + goto fail; ++ } + url += 7; + url_len -= 7; + +diff --git a/src/wps/wps_upnp_event.c b/src/wps/wps_upnp_event.c +index d7e6edcc6503..08a23612f338 100644 +--- a/src/wps/wps_upnp_event.c ++++ b/src/wps/wps_upnp_event.c +@@ -147,7 +147,8 @@ static struct wpabuf * event_build_message(struct wps_event_ *e) + struct wpabuf *buf; + char *b; + +- buf = wpabuf_alloc(1000 + wpabuf_len(e->data)); ++ buf = wpabuf_alloc(1000 + os_strlen(e->addr->path) + ++ wpabuf_len(e->data)); + if (buf == NULL) + return NULL; + wpabuf_printf(buf, "NOTIFY %s HTTP/1.1\r\n", e->addr->path); +-- +2.20.1 diff --git a/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch new file mode 100644 index 000000000..8a014ef28 --- /dev/null +++ b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant/0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch @@ -0,0 +1,50 @@ +From 85aac526af8612c21b3117dadc8ef5944985b476 Mon Sep 17 00:00:00 2001 +From: Jouni Malinen +Date: Thu, 4 Jun 2020 21:24:04 +0300 +Subject: [PATCH 3/3] WPS UPnP: Handle HTTP initiation failures for events more + properly + +While it is appropriate to try to retransmit the event to another +callback URL on a failure to initiate the HTTP client connection, there +is no point in trying the exact same operation multiple times in a row. +Replve the event_retry() calls with event_addr_failure() for these cases +to avoid busy loops trying to repeat the same failing operation. + +These potential busy loops would go through eloop callbacks, so the +process is not completely stuck on handling them, but unnecessary CPU +would be used to process the continues retries that will keep failing +for the same reason. + +Upstream-Status: Backport +CVE: CVE-2020-12695 patch #2 +Signed-off-by: Jouni Malinen +Signed-off-by: Armin Kuster + +--- + src/wps/wps_upnp_event.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/wps/wps_upnp_event.c b/src/wps/wps_upnp_event.c +index 08a23612f338..c0d9e41d9a38 100644 +--- a/src/wps/wps_upnp_event.c ++++ b/src/wps/wps_upnp_event.c +@@ -294,7 +294,7 @@ static int event_send_start(struct subscription *s) + + buf = event_build_message(e); + if (buf == NULL) { +- event_retry(e, 0); ++ event_addr_failure(e); + return -1; + } + +@@ -302,7 +302,7 @@ static int event_send_start(struct subscription *s) + event_http_cb, e); + if (e->http_event == NULL) { + wpabuf_free(buf); +- event_retry(e, 0); ++ event_addr_failure(e); + return -1; + } + +-- +2.20.1 diff --git a/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb index 2936e89ee..7cc03fef7 100644 --- a/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb +++ b/poky/meta/recipes-connectivity/wpa-supplicant/wpa-supplicant_2.9.bb @@ -25,7 +25,10 @@ SRC_URI = "http://w1.fi/releases/wpa_supplicant-${PV}.tar.gz \ file://wpa_supplicant.conf-sane \ file://99_wpa_supplicant \ file://0001-replace-systemd-install-Alias-with-WantedBy.patch \ - file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \ + file://0001-AP-Silently-ignore-management-frame-from-unexpected-.patch \ + file://0001-WPS-UPnP-Do-not-allow-event-subscriptions-with-URLs-.patch \ + file://0002-WPS-UPnP-Fix-event-message-generation-using-a-long-U.patch \ + file://0003-WPS-UPnP-Handle-HTTP-initiation-failures-for-events-.patch \ " SRC_URI[md5sum] = "2d2958c782576dc9901092fbfecb4190" SRC_URI[sha256sum] = "fcbdee7b4a64bea8177973299c8c824419c413ec2e3a95db63dd6a5dc3541f17" diff --git a/poky/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb b/poky/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb deleted file mode 100644 index 61fb8cbad..000000000 --- a/poky/meta/recipes-core/busybox/busybox-inittab_1.31.0.bb +++ /dev/null @@ -1,35 +0,0 @@ -SUMMARY = "inittab configuration for BusyBox" -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6" - -SRC_URI = "file://inittab" - -S = "${WORKDIR}" - -INHIBIT_DEFAULT_DEPS = "1" - -do_compile() { - : -} - -do_install() { - install -d ${D}${sysconfdir} - install -D -m 0644 ${WORKDIR}/inittab ${D}${sysconfdir}/inittab - tmp="${SERIAL_CONSOLES}" - [ -n "$tmp" ] && echo >> ${D}${sysconfdir}/inittab - for i in $tmp - do - j=`echo ${i} | sed s/\;/\ /g` - id=`echo ${i} | sed -e 's/^.*;//' -e 's/;.*//'` - echo "$id::respawn:${base_sbindir}/getty ${j}" >> ${D}${sysconfdir}/inittab - done -} - -# SERIAL_CONSOLES is generally defined by the MACHINE .conf. -# Set PACKAGE_ARCH appropriately. -PACKAGE_ARCH = "${MACHINE_ARCH}" - -FILES_${PN} = "${sysconfdir}/inittab" -CONFFILES_${PN} = "${sysconfdir}/inittab" - -RCONFLICTS_${PN} = "sysvinit-inittab" diff --git a/poky/meta/recipes-core/busybox/busybox-inittab_1.32.0.bb b/poky/meta/recipes-core/busybox/busybox-inittab_1.32.0.bb new file mode 100644 index 000000000..61fb8cbad --- /dev/null +++ b/poky/meta/recipes-core/busybox/busybox-inittab_1.32.0.bb @@ -0,0 +1,35 @@ +SUMMARY = "inittab configuration for BusyBox" +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://${COREBASE}/meta/files/common-licenses/GPL-2.0;md5=801f80980d171dd6425610833a22dbe6" + +SRC_URI = "file://inittab" + +S = "${WORKDIR}" + +INHIBIT_DEFAULT_DEPS = "1" + +do_compile() { + : +} + +do_install() { + install -d ${D}${sysconfdir} + install -D -m 0644 ${WORKDIR}/inittab ${D}${sysconfdir}/inittab + tmp="${SERIAL_CONSOLES}" + [ -n "$tmp" ] && echo >> ${D}${sysconfdir}/inittab + for i in $tmp + do + j=`echo ${i} | sed s/\;/\ /g` + id=`echo ${i} | sed -e 's/^.*;//' -e 's/;.*//'` + echo "$id::respawn:${base_sbindir}/getty ${j}" >> ${D}${sysconfdir}/inittab + done +} + +# SERIAL_CONSOLES is generally defined by the MACHINE .conf. +# Set PACKAGE_ARCH appropriately. +PACKAGE_ARCH = "${MACHINE_ARCH}" + +FILES_${PN} = "${sysconfdir}/inittab" +CONFFILES_${PN} = "${sysconfdir}/inittab" + +RCONFLICTS_${PN} = "sysvinit-inittab" diff --git a/poky/meta/recipes-core/busybox/busybox/0001-Remove-stime-function-calls.patch b/poky/meta/recipes-core/busybox/busybox/0001-Remove-stime-function-calls.patch deleted file mode 100644 index 9b9432844..000000000 --- a/poky/meta/recipes-core/busybox/busybox/0001-Remove-stime-function-calls.patch +++ /dev/null @@ -1,85 +0,0 @@ -From d3539be8f27b8cbfdfee460fe08299158f08bcd9 Mon Sep 17 00:00:00 2001 -From: Alistair Francis -Date: Tue, 19 Nov 2019 13:06:40 +0100 -Subject: [PATCH] Remove stime() function calls - -stime() has been deprecated in glibc 2.31 and replaced with -clock_settime(). Let's replace the stime() function calls with -clock_settime() in preperation. - -function old new delta -rdate_main 197 224 +27 -clock_settime - 27 +27 -date_main 926 941 +15 -stime 37 - -37 ------------------------------------------------------------------------------- -(add/remove: 2/2 grow/shrink: 2/0 up/down: 69/-37) Total: 32 bytes - -Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=d3539be8f27b8cbfdfee460fe08299158f08bcd9] -Signed-off-by: Alistair Francis -Signed-off-by: Denys Vlasenko ---- - coreutils/date.c | 6 +++++- - libbb/missing_syscalls.c | 8 -------- - util-linux/rdate.c | 8 ++++++-- - 3 files changed, 11 insertions(+), 11 deletions(-) - ---- a/coreutils/date.c -+++ b/coreutils/date.c -@@ -279,6 +279,9 @@ int date_main(int argc UNUSED_PARAM, cha - time(&ts.tv_sec); - #endif - } -+#if !ENABLE_FEATURE_DATE_NANO -+ ts.tv_nsec = 0; -+#endif - localtime_r(&ts.tv_sec, &tm_time); - - /* If date string is given, update tm_time, and maybe set date */ -@@ -301,9 +304,10 @@ int date_main(int argc UNUSED_PARAM, cha - if (date_str[0] != '@') - tm_time.tm_isdst = -1; - ts.tv_sec = validate_tm_time(date_str, &tm_time); -+ ts.tv_nsec = 0; - - /* if setting time, set it */ -- if ((opt & OPT_SET) && stime(&ts.tv_sec) < 0) { -+ if ((opt & OPT_SET) && clock_settime(CLOCK_REALTIME, &ts) < 0) { - bb_perror_msg("can't set date"); - } - } ---- a/libbb/missing_syscalls.c -+++ b/libbb/missing_syscalls.c -@@ -15,14 +15,6 @@ pid_t getsid(pid_t pid) - return syscall(__NR_getsid, pid); - } - --int stime(const time_t *t) --{ -- struct timeval tv; -- tv.tv_sec = *t; -- tv.tv_usec = 0; -- return settimeofday(&tv, NULL); --} -- - int sethostname(const char *name, size_t len) - { - return syscall(__NR_sethostname, name, len); ---- a/util-linux/rdate.c -+++ b/util-linux/rdate.c -@@ -95,9 +95,13 @@ int rdate_main(int argc UNUSED_PARAM, ch - if (!(flags & 2)) { /* no -p (-s may be present) */ - if (time(NULL) == remote_time) - bb_error_msg("current time matches remote time"); -- else -- if (stime(&remote_time) < 0) -+ else { -+ struct timespec ts; -+ ts.tv_sec = remote_time; -+ ts.tv_nsec = 0; -+ if (clock_settime(CLOCK_REALTIME, &ts) < 0) - bb_perror_msg_and_die("can't set time of day"); -+ } - } - - if (flags != 1) /* not lone -s */ diff --git a/poky/meta/recipes-core/busybox/busybox/0001-Remove-syscall-wrappers-around-clock_gettime-closes-.patch b/poky/meta/recipes-core/busybox/busybox/0001-Remove-syscall-wrappers-around-clock_gettime-closes-.patch deleted file mode 100644 index 0c7f9b813..000000000 --- a/poky/meta/recipes-core/busybox/busybox/0001-Remove-syscall-wrappers-around-clock_gettime-closes-.patch +++ /dev/null @@ -1,120 +0,0 @@ -From be5a505d771a77c640acc35ceaa470c80e62f954 Mon Sep 17 00:00:00 2001 -From: Denys Vlasenko -Date: Thu, 24 Oct 2019 16:26:55 +0200 -Subject: [PATCH] Remove syscall wrappers around clock_gettime, closes 12091 - -12091 "Direct use of __NR_clock_gettime is not time64-safe". - -function old new delta -runsv_main 1698 1712 +14 -startservice 378 383 +5 -get_mono 31 25 -6 -date_main 932 926 -6 -gettimeofday_ns 17 - -17 ------------------------------------------------------------------------------- -(add/remove: 0/1 grow/shrink: 2/2 up/down: 19/-29) Total: -10 bytes - -Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=be5a505d771a77c640acc35ceaa470c80e62f954] -Signed-off-by: Denys Vlasenko ---- - Makefile.flags | 6 ++++-- - coreutils/date.c | 16 +++------------- - libbb/time.c | 11 +---------- - runit/runsv.c | 11 +---------- - 4 files changed, 9 insertions(+), 35 deletions(-) - ---- a/Makefile.flags -+++ b/Makefile.flags -@@ -129,10 +129,12 @@ endif - # fall back to using a temp file: - CRYPT_AVAILABLE := $(shell echo 'int main(void){return 0;}' >crypttest.c; $(CC) $(CFLAGS) -lcrypt -o /dev/null crypttest.c >/dev/null 2>&1 && echo "y"; rm crypttest.c) - ifeq ($(CRYPT_AVAILABLE),y) --LDLIBS += m crypt -+LDLIBS += m rt crypt - else --LDLIBS += m -+LDLIBS += m rt - endif -+# libm may be needed for dc, awk, ntpd -+# librt may be needed for clock_gettime() - - # libpam may use libpthread, libdl and/or libaudit. - # On some platforms that requires an explicit -lpthread, -ldl, -laudit. ---- a/coreutils/date.c -+++ b/coreutils/date.c -@@ -33,10 +33,9 @@ - //config: Enable option (-I) to output an ISO-8601 compliant - //config: date/time string. - //config: --//config:# defaults to "no": stat's nanosecond field is a bit non-portable - //config:config FEATURE_DATE_NANO - //config: bool "Support %[num]N nanosecond format specifier" --//config: default n # syscall(__NR_clock_gettime) or syscall(__NR_clock_gettime64) -+//config: default n # stat's nanosecond field is a bit non-portable - //config: depends on DATE - //config: select PLATFORM_LINUX - //config: help -@@ -271,17 +270,8 @@ int date_main(int argc UNUSED_PARAM, cha - */ - #endif - } else { --#if ENABLE_FEATURE_DATE_NANO && defined(__NR_clock_gettime) -- /* libc has incredibly messy way of doing this, -- * typically requiring -lrt. We just skip all this mess */ -- syscall(__NR_clock_gettime, CLOCK_REALTIME, &ts); --#elif ENABLE_FEATURE_DATE_NANO && __TIMESIZE == 64 -- /* Let's only support the 64 suffix syscalls for 64-bit time_t. -- * This simplifies the code for us as we don't need to convert -- * between 64-bit and 32-bit. We also don't have a way to -- * report overflow errors here. -- */ -- syscall(__NR_clock_gettime64, CLOCK_REALTIME, &ts); -+#if ENABLE_FEATURE_DATE_NANO -+ clock_gettime(CLOCK_REALTIME, &ts); - #else - time(&ts.tv_sec); - #endif ---- a/libbb/time.c -+++ b/libbb/time.c -@@ -253,18 +253,9 @@ char* FAST_FUNC strftime_YYYYMMDDHHMMSS( - #define CLOCK_MONOTONIC 1 - #endif - --/* libc has incredibly messy way of doing this, -- * typically requiring -lrt. We just skip all this mess */ - static void get_mono(struct timespec *ts) - { --#if defined(__NR_clock_gettime) -- if (syscall(__NR_clock_gettime, CLOCK_MONOTONIC, ts)) --#elif __TIMESIZE == 64 -- if (syscall(__NR_clock_gettime64, CLOCK_MONOTONIC, ts)) --#else --# error "We currently don't support architectures without " \ -- "the __NR_clock_gettime syscall and 32-bit time_t" --#endif -+ if (clock_gettime(CLOCK_MONOTONIC, ts)) - bb_error_msg_and_die("clock_gettime(MONOTONIC) failed"); - } - unsigned long long FAST_FUNC monotonic_ns(void) ---- a/runit/runsv.c -+++ b/runit/runsv.c -@@ -51,18 +51,9 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAG - #if ENABLE_MONOTONIC_SYSCALL - #include - --/* libc has incredibly messy way of doing this, -- * typically requiring -lrt. We just skip all this mess */ - static void gettimeofday_ns(struct timespec *ts) - { --#if defined(__NR_clock_gettime) -- syscall(__NR_clock_gettime, CLOCK_REALTIME, ts); --#elif __TIMESIZE == 64 -- syscall(__NR_clock_gettime64, CLOCK_REALTIME, ts); --#else --# error "We currently don't support architectures without " \ -- "the __NR_clock_gettime syscall and 32-bit time_t" --#endif -+ clock_gettime(CLOCK_REALTIME, ts); - } - #else - static void gettimeofday_ns(struct timespec *ts) diff --git a/poky/meta/recipes-core/busybox/busybox/0001-date-Use-64-prefix-syscall-if-we-have-to.patch b/poky/meta/recipes-core/busybox/busybox/0001-date-Use-64-prefix-syscall-if-we-have-to.patch deleted file mode 100644 index 944526b7c..000000000 --- a/poky/meta/recipes-core/busybox/busybox/0001-date-Use-64-prefix-syscall-if-we-have-to.patch +++ /dev/null @@ -1,53 +0,0 @@ -From b7b7452f292f03eefafa6fd1da9bcfc933dee15a Mon Sep 17 00:00:00 2001 -From: Alistair Francis -Date: Wed, 18 Sep 2019 09:28:49 -0700 -Subject: [PATCH] date: Use 64 prefix syscall if we have to - -Some 32-bit architectures no longer have the 32-bit time_t syscalls. -Instead they have suffixed syscalls that returns a 64-bit time_t. If -the architecture doesn't have the non-suffixed syscall and is using a -64-bit time_t let's use the suffixed syscall instead. - -This fixes build issues when building for RISC-V 32-bit with 5.1+ kernel -headers. - -If an architecture only supports the suffixed syscalls, but is still -using a 32-bit time_t fall back to the libc call. - -Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=b7b7452f292f03eefafa6fd1da9bcfc933dee15a] -Signed-off-by: Alistair Francis -Signed-off-by: Denys Vlasenko ---- - coreutils/date.c | 11 +++++++++-- - 1 file changed, 9 insertions(+), 2 deletions(-) - ---- a/coreutils/date.c -+++ b/coreutils/date.c -@@ -36,7 +36,7 @@ - //config:# defaults to "no": stat's nanosecond field is a bit non-portable - //config:config FEATURE_DATE_NANO - //config: bool "Support %[num]N nanosecond format specifier" --//config: default n # syscall(__NR_clock_gettime) -+//config: default n # syscall(__NR_clock_gettime) or syscall(__NR_clock_gettime64) - //config: depends on DATE - //config: select PLATFORM_LINUX - //config: help -@@ -271,10 +271,17 @@ int date_main(int argc UNUSED_PARAM, cha - */ - #endif - } else { --#if ENABLE_FEATURE_DATE_NANO -+#if ENABLE_FEATURE_DATE_NANO && defined(__NR_clock_gettime) - /* libc has incredibly messy way of doing this, - * typically requiring -lrt. We just skip all this mess */ - syscall(__NR_clock_gettime, CLOCK_REALTIME, &ts); -+#elif ENABLE_FEATURE_DATE_NANO && __TIMESIZE == 64 -+ /* Let's only support the 64 suffix syscalls for 64-bit time_t. -+ * This simplifies the code for us as we don't need to convert -+ * between 64-bit and 32-bit. We also don't have a way to -+ * report overflow errors here. -+ */ -+ syscall(__NR_clock_gettime64, CLOCK_REALTIME, &ts); - #else - time(&ts.tv_sec); - #endif diff --git a/poky/meta/recipes-core/busybox/busybox/0001-time-Use-64-prefix-syscall-if-we-have-to.patch b/poky/meta/recipes-core/busybox/busybox/0001-time-Use-64-prefix-syscall-if-we-have-to.patch deleted file mode 100644 index 58a6c0d8a..000000000 --- a/poky/meta/recipes-core/busybox/busybox/0001-time-Use-64-prefix-syscall-if-we-have-to.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 902d3992922fc8db8495d5fb30a4581711b60c62 Mon Sep 17 00:00:00 2001 -From: Alistair Francis -Date: Wed, 18 Sep 2019 09:28:50 -0700 -Subject: [PATCH] time: Use 64 prefix syscall if we have to - -Some 32-bit architectures no longer have the 32-bit time_t syscalls. -Instead they have suffixed syscalls that returns a 64-bit time_t. If -the architecture doesn't have the non-suffixed syscall and is using a -64-bit time_t let's use the suffixed syscall instead. - -This fixes build issues when building for RISC-V 32-bit with 5.1+ kernel -headers. - -If an architecture only supports the suffixed syscalls, but is still -using a 32-bit time_t report a compilation error. This avoids us have to -deal with converting between 64-bit and 32-bit values. There are -currently no architectures where this is the case. - -Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=902d3992922fc8db8495d5fb30a4581711b60c62] -Signed-off-by: Alistair Francis -Signed-off-by: Denys Vlasenko ---- - libbb/time.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/libbb/time.c -+++ b/libbb/time.c -@@ -257,7 +257,14 @@ char* FAST_FUNC strftime_YYYYMMDDHHMMSS( - * typically requiring -lrt. We just skip all this mess */ - static void get_mono(struct timespec *ts) - { -- if (syscall(__NR_clock_gettime, CLOCK_MONOTONIC, ts)) -+#if defined(__NR_clock_gettime) -+ if (syscall(__NR_clock_gettime, CLOCK_MONOTONIC, ts)) -+#elif __TIMESIZE == 64 -+ if (syscall(__NR_clock_gettime64, CLOCK_MONOTONIC, ts)) -+#else -+# error "We currently don't support architectures without " \ -+ "the __NR_clock_gettime syscall and 32-bit time_t" -+#endif - bb_error_msg_and_die("clock_gettime(MONOTONIC) failed"); - } - unsigned long long FAST_FUNC monotonic_ns(void) diff --git a/poky/meta/recipes-core/busybox/busybox/0003-runsv-Use-64-prefix-syscall-if-we-have-to.patch b/poky/meta/recipes-core/busybox/busybox/0003-runsv-Use-64-prefix-syscall-if-we-have-to.patch deleted file mode 100644 index 476057044..000000000 --- a/poky/meta/recipes-core/busybox/busybox/0003-runsv-Use-64-prefix-syscall-if-we-have-to.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 8c7419649d6e6fda8fa7d0e863084c78ac728628 Mon Sep 17 00:00:00 2001 -From: Alistair Francis -Date: Wed, 28 Aug 2019 10:54:15 -0700 -Subject: [PATCH 3/3] runsv: Use 64 prefix syscall if we have to - -Some 32-bit architectures no longer have the 32-bit time_t syscalls. -Instead they have suffixed syscalls that returns a 64-bit time_t. If -the architecture doesn't have the non-suffixed syscall and is using a -64-bit time_t let's use the suffixed syscall instead. - -This fixes build issues when building for RISC-V 32-bit with 5.1+ kernel -headers. - -If an architecture only supports the suffixed syscalls, but is still -using a 32-bit time_t report a compilation error. This avoids us have to -deal with converting between 64-bit and 32-bit values. There are -currently no architectures where this is the case. - -Signed-off-by: Alistair Francis -Upstream-Status: Backport [https://git.busybox.net/busybox/commit/?id=ad27d44ebe950335616f37e36863469dc181b455] ---- - runit/runsv.c | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/runit/runsv.c b/runit/runsv.c -index ccc762d78..737909b0e 100644 ---- a/runit/runsv.c -+++ b/runit/runsv.c -@@ -55,7 +55,14 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * typically requiring -lrt. We just skip all this mess */ - static void gettimeofday_ns(struct timespec *ts) - { -+#if defined(__NR_clock_gettime) - syscall(__NR_clock_gettime, CLOCK_REALTIME, ts); -+#elif __TIMESIZE == 64 -+ syscall(__NR_clock_gettime64, CLOCK_REALTIME, ts); -+#else -+# error "We currently don't support architectures without " \ -+ "the __NR_clock_gettime syscall and 32-bit time_t" -+#endif - } - #else - static void gettimeofday_ns(struct timespec *ts) --- -2.22.0 - diff --git a/poky/meta/recipes-core/busybox/busybox/busybox-udhcpc-no_deconfig.patch b/poky/meta/recipes-core/busybox/busybox/busybox-udhcpc-no_deconfig.patch index 2a0a40c51..35e981d6a 100644 --- a/poky/meta/recipes-core/busybox/busybox/busybox-udhcpc-no_deconfig.patch +++ b/poky/meta/recipes-core/busybox/busybox/busybox-udhcpc-no_deconfig.patch @@ -31,11 +31,11 @@ Signed-off-by: Andreas Oberritter networking/udhcp/dhcpc.c | 29 ++++++++++++++++------ 1 file changed, 21 insertions(+), 8 deletions(-) -Index: busybox-1.31.0/networking/udhcp/dhcpc.c +Index: busybox-1.32.0/networking/udhcp/dhcpc.c =================================================================== ---- busybox-1.31.0.orig/networking/udhcp/dhcpc.c -+++ busybox-1.31.0/networking/udhcp/dhcpc.c -@@ -48,6 +48,8 @@ +--- busybox-1.32.0.orig/networking/udhcp/dhcpc.c ++++ busybox-1.32.0/networking/udhcp/dhcpc.c +@@ -48,6 +48,8 @@ struct tpacket_auxdata { }; #endif @@ -55,7 +55,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c USE_FOR_MMU( OPTBIT_b,) IF_FEATURE_UDHCPC_ARPING(OPTBIT_a,) IF_FEATURE_UDHCP_PORT( OPTBIT_P,) -@@ -1124,7 +1128,8 @@ +@@ -1084,7 +1088,8 @@ client_data.state = RENEW_REQUESTED; break; case RENEW_REQUESTED: /* impatient are we? fine, square 1 */ @@ -65,7 +65,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c case REQUESTING: case RELEASED: change_listen_mode(LISTEN_RAW); -@@ -1160,7 +1165,8 @@ +@@ -1120,7 +1125,8 @@ static void perform_release(uint32_t server_addr, uint32_t requested_ip) * Users requested to be notified in all cases, even if not in one * of the states above. */ @@ -75,7 +75,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c change_listen_mode(LISTEN_NONE); client_data.state = RELEASED; -@@ -1278,7 +1284,7 @@ +@@ -1238,7 +1244,7 @@ /* Parse command line */ opt = getopt32long(argv, "^" /* O,x: list; -T,-t,-A take numeric param */ @@ -84,7 +84,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c USE_FOR_MMU("b") IF_FEATURE_UDHCPC_ARPING("a::") IF_FEATURE_UDHCP_PORT("P:") -@@ -1389,6 +1395,10 @@ +@@ -1349,6 +1355,10 @@ logmode |= LOGMODE_SYSLOG; } @@ -95,7 +95,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c /* Create pidfile */ write_pidfile(client_data.pidfile); /* Goes to stdout (unless NOMMU) and possibly syslog */ -@@ -1397,7 +1407,8 @@ +@@ -1357,7 +1367,8 @@ srand(monotonic_us()); client_data.state = INIT_SELECTING; @@ -105,17 +105,17 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c change_listen_mode(LISTEN_RAW); packet_num = 0; timeout = 0; -@@ -1570,7 +1581,8 @@ +@@ -1530,7 +1541,8 @@ } /* Timed out, enter init state */ - bb_info_msg("lease lost, entering init state"); + bb_simple_info_msg("lease lost, entering init state"); - udhcp_run_script(NULL, "deconfig"); + if (allow_deconfig) + udhcp_run_script(NULL, "deconfig"); client_data.state = INIT_SELECTING; client_data.first_secs = 0; /* make secs field count from 0 */ /*timeout = 0; - already is */ -@@ -1762,8 +1774,10 @@ +@@ -1722,8 +1734,10 @@ "(got ARP reply), declining"); send_decline(/*xid,*/ server_addr, packet.yiaddr); @@ -128,7 +128,7 @@ Index: busybox-1.31.0/networking/udhcp/dhcpc.c change_listen_mode(LISTEN_RAW); client_data.state = INIT_SELECTING; client_data.first_secs = 0; /* make secs field count from 0 */ -@@ -1832,8 +1846,10 @@ +@@ -1792,8 +1806,10 @@ /* return to init state */ bb_info_msg("received %s", "DHCP NAK"); udhcp_run_script(&packet, "nak"); diff --git a/poky/meta/recipes-core/busybox/busybox_1.31.1.bb b/poky/meta/recipes-core/busybox/busybox_1.31.1.bb deleted file mode 100644 index 2bb1d59ba..000000000 --- a/poky/meta/recipes-core/busybox/busybox_1.31.1.bb +++ /dev/null @@ -1,55 +0,0 @@ -require busybox.inc - -SRC_URI = "https://busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \ - file://busybox-udhcpc-no_deconfig.patch \ - file://find-touchscreen.sh \ - file://busybox-cron \ - file://busybox-httpd \ - file://busybox-udhcpd \ - file://default.script \ - file://simple.script \ - file://hwclock.sh \ - file://syslog \ - file://syslog-startup.conf \ - file://syslog.conf \ - file://busybox-syslog.default \ - file://mdev \ - file://mdev.conf \ - file://mdev-mount.sh \ - file://defconfig \ - file://busybox-syslog.service.in \ - file://busybox-klogd.service.in \ - file://fail_on_no_media.patch \ - file://run-ptest \ - file://inetd.conf \ - file://inetd \ - file://login-utilities.cfg \ - file://recognize_connmand.patch \ - file://busybox-cross-menuconfig.patch \ - file://0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch \ - file://mount-via-label.cfg \ - file://sha1sum.cfg \ - file://sha256sum.cfg \ - file://getopts.cfg \ - file://resize.cfg \ - ${@["", "file://init.cfg"][(d.getVar('VIRTUAL-RUNTIME_init_manager') == 'busybox')]} \ - ${@["", "file://mdev.cfg"][(d.getVar('VIRTUAL-RUNTIME_dev_manager') == 'busybox-mdev')]} \ - file://syslog.cfg \ - file://unicode.cfg \ - file://rcS \ - file://rcK \ - file://makefile-libbb-race.patch \ - file://0001-testsuite-check-uudecode-before-using-it.patch \ - file://0001-testsuite-use-www.example.org-for-wget-test-cases.patch \ - file://0001-du-l-works-fix-to-use-145-instead-of-144.patch \ - file://0001-date-Use-64-prefix-syscall-if-we-have-to.patch \ - file://0001-time-Use-64-prefix-syscall-if-we-have-to.patch \ - file://0003-runsv-Use-64-prefix-syscall-if-we-have-to.patch \ - file://0001-Remove-syscall-wrappers-around-clock_gettime-closes-.patch \ - file://0001-Remove-stime-function-calls.patch \ - file://0001-sysctl-ignore-EIO-of-stable_secret-below-proc-sys-ne.patch \ -" -SRC_URI_append_libc-musl = " file://musl.cfg " - -SRC_URI[tarball.md5sum] = "70913edaf2263a157393af07565c17f0" -SRC_URI[tarball.sha256sum] = "d0f940a72f648943c1f2211e0e3117387c31d765137d92bd8284a3fb9752a998" diff --git a/poky/meta/recipes-core/busybox/busybox_1.32.0.bb b/poky/meta/recipes-core/busybox/busybox_1.32.0.bb new file mode 100644 index 000000000..0dfdfdb54 --- /dev/null +++ b/poky/meta/recipes-core/busybox/busybox_1.32.0.bb @@ -0,0 +1,50 @@ +require busybox.inc + +SRC_URI = "https://busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \ + file://busybox-udhcpc-no_deconfig.patch \ + file://find-touchscreen.sh \ + file://busybox-cron \ + file://busybox-httpd \ + file://busybox-udhcpd \ + file://default.script \ + file://simple.script \ + file://hwclock.sh \ + file://syslog \ + file://syslog-startup.conf \ + file://syslog.conf \ + file://busybox-syslog.default \ + file://mdev \ + file://mdev.conf \ + file://mdev-mount.sh \ + file://defconfig \ + file://busybox-syslog.service.in \ + file://busybox-klogd.service.in \ + file://fail_on_no_media.patch \ + file://run-ptest \ + file://inetd.conf \ + file://inetd \ + file://login-utilities.cfg \ + file://recognize_connmand.patch \ + file://busybox-cross-menuconfig.patch \ + file://0001-Use-CC-when-linking-instead-of-LD-and-use-CFLAGS-and.patch \ + file://mount-via-label.cfg \ + file://sha1sum.cfg \ + file://sha256sum.cfg \ + file://getopts.cfg \ + file://resize.cfg \ + ${@["", "file://init.cfg"][(d.getVar('VIRTUAL-RUNTIME_init_manager') == 'busybox')]} \ + ${@["", "file://mdev.cfg"][(d.getVar('VIRTUAL-RUNTIME_dev_manager') == 'busybox-mdev')]} \ + file://syslog.cfg \ + file://unicode.cfg \ + file://rcS \ + file://rcK \ + file://makefile-libbb-race.patch \ + file://0001-testsuite-check-uudecode-before-using-it.patch \ + file://0001-testsuite-use-www.example.org-for-wget-test-cases.patch \ + file://0001-du-l-works-fix-to-use-145-instead-of-144.patch \ + file://0001-sysctl-ignore-EIO-of-stable_secret-below-proc-sys-ne.patch \ +" +SRC_URI_append_libc-musl = " file://musl.cfg " + +SRC_URI[tarball.md5sum] = "9576986f1a960da471d03b72a62f13c7" +SRC_URI[tarball.sha256sum] = "c35d87f1d04b2b153d33c275c2632e40d388a88f19a9e71727e0bbbff51fe689" diff --git a/poky/meta/recipes-core/coreutils/coreutils_8.32.bb b/poky/meta/recipes-core/coreutils/coreutils_8.32.bb index 2422f8634..f8ae3fd94 100644 --- a/poky/meta/recipes-core/coreutils/coreutils_8.32.bb +++ b/poky/meta/recipes-core/coreutils/coreutils_8.32.bb @@ -59,9 +59,20 @@ base_bindir_progs = "cat chgrp chmod chown cp date dd echo false hostname kill l sbindir_progs= "chroot" -PACKAGE_BEFORE_PN_class-target += "coreutils-stdbuf" +# Split stdbuf into its own package, so one can include +# coreutils-stdbuf without getting the rest of coreutils, but make +# coreutils itself pull in stdbuf, so IMAGE_INSTALL += "coreutils" +# always provides all coreutils +PACKAGE_BEFORE_PN_class-target += "${@bb.utils.contains('PACKAGECONFIG', 'single-binary', '', 'coreutils-stdbuf', d)}" FILES_coreutils-stdbuf = "${bindir}/stdbuf ${libdir}/coreutils/libstdbuf.so" -RDEPENDS_coreutils_class-target += "coreutils-stdbuf" +RDEPENDS_coreutils_class-target += "${@bb.utils.contains('PACKAGECONFIG', 'single-binary', '', 'coreutils-stdbuf', d)}" + +# However, when the single-binary PACKAGECONFIG is used, stdbuf +# functionality is built into the single coreutils binary, so there's +# no point splitting /usr/bin/stdbuf to its own package. Instead, add +# an RPROVIDE so that rdepending on coreutils-stdbuf will work +# regardless of whether single-binary is in effect. +RPROVIDES_coreutils += "${@bb.utils.contains('PACKAGECONFIG', 'single-binary', 'coreutils-stdbuf', '', d)}" # Let aclocal use the relative path for the m4 file rather than the # absolute since coreutils has a lot of m4 files, otherwise there might diff --git a/poky/meta/recipes-core/dropbear/dropbear.inc b/poky/meta/recipes-core/dropbear/dropbear.inc index 7269888a4..080ee26b2 100644 --- a/poky/meta/recipes-core/dropbear/dropbear.inc +++ b/poky/meta/recipes-core/dropbear/dropbear.inc @@ -5,7 +5,7 @@ SECTION = "console/network" # some files are from other projects and have others license terms: # public domain, OpenSSH 3.5p1, OpenSSH3.6.1p2, PuTTY LICENSE = "MIT & BSD-3-Clause & BSD-2-Clause & PD" -LIC_FILES_CHKSUM = "file://LICENSE;md5=a5ec40cafba26fc4396d0b550f824e01" +LIC_FILES_CHKSUM = "file://LICENSE;md5=25cf44512b7bc8966a48b6b1a9b7605f" DEPENDS = "zlib virtual/crypt" RPROVIDES_${PN} = "ssh sshd" diff --git a/poky/meta/recipes-core/dropbear/dropbear/dropbear-disable-weak-ciphers.patch b/poky/meta/recipes-core/dropbear/dropbear/dropbear-disable-weak-ciphers.patch index e48a34bac..b54581f17 100644 --- a/poky/meta/recipes-core/dropbear/dropbear/dropbear-disable-weak-ciphers.patch +++ b/poky/meta/recipes-core/dropbear/dropbear/dropbear-disable-weak-ciphers.patch @@ -1,33 +1,24 @@ -This feature disables all CBC, SHA1, and diffie-hellman group1 ciphers +From c347ece05a7fdbf50d76cb136b9ed45caed333f6 Mon Sep 17 00:00:00 2001 +From: Joseph Reynolds +Date: Thu, 20 Jun 2019 16:29:15 -0500 +Subject: [PATCH] dropbear: new feature: disable-weak-ciphers + +This feature disables all CBC, SHA1, and diffie-hellman group1 ciphers in the dropbear ssh server and client since they're considered weak ciphers and we want to support the stong algorithms. Upstream-Status: Inappropriate [configuration] Signed-off-by: Joseph Reynolds -Index: dropbear-2019.78/default_options.h -=================================================================== ---- dropbear-2019.78.orig/default_options.h -+++ dropbear-2019.78/default_options.h -@@ -91,7 +91,7 @@ IMPORTANT: Some options will require "ma - - /* Enable CBC mode for ciphers. This has security issues though - * is the most compatible with older SSH implementations */ --#define DROPBEAR_ENABLE_CBC_MODE 1 -+#define DROPBEAR_ENABLE_CBC_MODE 0 - - /* Enable "Counter Mode" for ciphers. This is more secure than - * CBC mode against certain attacks. It is recommended for security -@@ -101,7 +101,7 @@ IMPORTANT: Some options will require "ma - /* Message integrity. sha2-256 is recommended as a default, - sha1 for compatibility */ - #define DROPBEAR_SHA1_HMAC 1 --#define DROPBEAR_SHA1_96_HMAC 1 -+#define DROPBEAR_SHA1_96_HMAC 0 - #define DROPBEAR_SHA2_256_HMAC 1 - - /* Hostkey/public key algorithms - at least one required, these are used -@@ -149,12 +149,12 @@ IMPORTANT: Some options will require "ma +--- + default_options.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/default_options.h b/default_options.h +index 1aa2297..7ff1394 100644 +--- a/default_options.h ++++ b/default_options.h +@@ -163,12 +163,12 @@ IMPORTANT: Some options will require "make clean" after changes */ * Small systems should generally include either curve25519 or ecdh for performance. * curve25519 is less widely supported but is faster */ diff --git a/poky/meta/recipes-core/dropbear/dropbear_2019.78.bb b/poky/meta/recipes-core/dropbear/dropbear_2019.78.bb deleted file mode 100644 index d2cd8161b..000000000 --- a/poky/meta/recipes-core/dropbear/dropbear_2019.78.bb +++ /dev/null @@ -1,4 +0,0 @@ -require dropbear.inc - -SRC_URI[md5sum] = "a972c85ed678ad0fdcb7844e1294fb54" -SRC_URI[sha256sum] = "525965971272270995364a0eb01f35180d793182e63dd0b0c3eb0292291644a4" diff --git a/poky/meta/recipes-core/dropbear/dropbear_2020.80.bb b/poky/meta/recipes-core/dropbear/dropbear_2020.80.bb new file mode 100644 index 000000000..088c71cb9 --- /dev/null +++ b/poky/meta/recipes-core/dropbear/dropbear_2020.80.bb @@ -0,0 +1,3 @@ +require dropbear.inc + +SRC_URI[sha256sum] = "d927941b91f2da150b2033f1a88b6a47999bf0afb1493a73e9216cffdb5d7949" diff --git a/poky/meta/recipes-core/initscripts/initscripts_1.0.bb b/poky/meta/recipes-core/initscripts/initscripts_1.0.bb index 4080c4a49..32c527799 100644 --- a/poky/meta/recipes-core/initscripts/initscripts_1.0.bb +++ b/poky/meta/recipes-core/initscripts/initscripts_1.0.bb @@ -170,7 +170,7 @@ MASKED_SCRIPTS = " \ urandom" pkg_postinst_${PN} () { - if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then + if type systemctl >/dev/null 2>/dev/null; then if [ -n "$D" ]; then OPTS="--root=$D" fi diff --git a/poky/meta/recipes-core/meta/signing-keys.bb b/poky/meta/recipes-core/meta/signing-keys.bb index 1e1c7e345..5bab94aa3 100644 --- a/poky/meta/recipes-core/meta/signing-keys.bb +++ b/poky/meta/recipes-core/meta/signing-keys.bb @@ -67,8 +67,6 @@ do_deploy () { fi } do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_RPM}" -# cleandirs should possibly be in deploy.bbclass but we need it -do_deploy[cleandirs] = "${DEPLOYDIR}" # clear stamp-extra-info since MACHINE_ARCH is normally put there by # deploy.bbclass do_deploy[stamp-extra-info] = "" diff --git a/poky/meta/recipes-core/musl/musl/0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch b/poky/meta/recipes-core/musl/musl/0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch index 462d338b9..ba00efe7b 100644 --- a/poky/meta/recipes-core/musl/musl/0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch +++ b/poky/meta/recipes-core/musl/musl/0001-Make-dynamic-linker-a-relative-symlink-to-libc.patch @@ -22,11 +22,9 @@ Upstream-Status: Pending tools/install.sh | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) -diff --git a/Makefile b/Makefile -index 8246b78..d1dbe39 100644 --- a/Makefile +++ b/Makefile -@@ -215,7 +215,7 @@ $(DESTDIR)$(includedir)/%: $(srcdir)/include/% +@@ -210,7 +210,7 @@ $(DESTDIR)$(includedir)/%: $(srcdir)/inc $(INSTALL) -D -m 644 $< $@ $(DESTDIR)$(LDSO_PATHNAME): $(DESTDIR)$(libdir)/libc.so @@ -35,8 +33,6 @@ index 8246b78..d1dbe39 100644 install-libs: $(ALL_LIBS:lib/%=$(DESTDIR)$(libdir)/%) $(if $(SHARED_LIBS),$(DESTDIR)$(LDSO_PATHNAME),) -diff --git a/tools/install.sh b/tools/install.sh -index d913b60..b6a7f79 100755 --- a/tools/install.sh +++ b/tools/install.sh @@ -6,18 +6,20 @@ @@ -62,7 +58,7 @@ index d913b60..b6a7f79 100755 m) mode=$OPTARG ;; ?) usage ;; esac -@@ -48,7 +50,7 @@ trap 'rm -f "$tmp"' EXIT INT QUIT TERM HUP +@@ -48,7 +50,7 @@ trap 'rm -f "$tmp"' EXIT INT QUIT TERM H umask 077 if test "$symlink" ; then @@ -71,6 +67,3 @@ index d913b60..b6a7f79 100755 else cat < "$1" > "$tmp" chmod "$mode" "$tmp" --- -2.7.4 - diff --git a/poky/meta/recipes-core/musl/musl/0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch b/poky/meta/recipes-core/musl/musl/0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch index 6a875a717..f57aae5f3 100644 --- a/poky/meta/recipes-core/musl/musl/0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch +++ b/poky/meta/recipes-core/musl/musl/0002-ldso-Use-syslibdir-and-libdir-as-default-pathes-to-l.patch @@ -20,11 +20,9 @@ Signed-off-by: Serhey Popovych ldso/dynlink.c | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) -diff --git a/Makefile b/Makefile -index b46f8ca4..c07e4ae8 100644 --- a/Makefile +++ b/Makefile -@@ -46,7 +46,8 @@ CFLAGS_AUTO = -Os -pipe +@@ -47,7 +47,8 @@ CFLAGS_AUTO = -Os -pipe CFLAGS_C99FSE = -std=c99 -ffreestanding -nostdinc CFLAGS_ALL = $(CFLAGS_C99FSE) @@ -34,20 +32,18 @@ index b46f8ca4..c07e4ae8 100644 CFLAGS_ALL += $(CPPFLAGS) $(CFLAGS_AUTO) $(CFLAGS) LDFLAGS_ALL = $(LDFLAGS_AUTO) $(LDFLAGS) -diff --git a/ldso/dynlink.c b/ldso/dynlink.c -index ec921dfd..7c119c55 100644 --- a/ldso/dynlink.c +++ b/ldso/dynlink.c -@@ -22,6 +22,8 @@ +@@ -24,6 +24,8 @@ + #include "libc.h" #include "dynlink.h" - #include "malloc_impl.h" +#define SYS_PATH_DFLT SYSLIBDIR ":" LIBDIR + static void error(const char *, ...); #define MAXP2(a,b) (-(-(a)&-(b))) -@@ -1038,7 +1040,7 @@ static struct dso *load_library(const char *name, struct dso *needed_by) +@@ -1071,7 +1073,7 @@ static struct dso *load_library(const ch sys_path = ""; } } @@ -56,6 +52,3 @@ index ec921dfd..7c119c55 100644 fd = path_open(name, sys_path, buf, sizeof buf); } pathname = buf; --- -2.7.4 - diff --git a/poky/meta/recipes-core/musl/musl_git.bb b/poky/meta/recipes-core/musl/musl_git.bb index 6aa69985d..0913b09aa 100644 --- a/poky/meta/recipes-core/musl/musl_git.bb +++ b/poky/meta/recipes-core/musl/musl_git.bb @@ -4,7 +4,7 @@ require musl.inc inherit linuxloader -SRCREV = "1b4e84c56df0f8ca30f6bc05962a860f869e71df" +SRCREV = "fca7428c096066482d8c3f52450810288e27515c" BASEVER = "1.2.0" diff --git a/poky/meta/recipes-core/ovmf/ovmf_git.bb b/poky/meta/recipes-core/ovmf/ovmf_git.bb index 91c1b6ba1..84547630e 100644 --- a/poky/meta/recipes-core/ovmf/ovmf_git.bb +++ b/poky/meta/recipes-core/ovmf/ovmf_git.bb @@ -37,7 +37,7 @@ EDK_TOOLS_DIR="edk2_basetools" BUILD_OPTIMIZATION="-pipe" # OVMF supports IA only, although it could conceivably support ARM someday. -COMPATIBLE_HOST='(i.86|x86_64).*' +COMPATIBLE_HOST_class-target='(i.86|x86_64).*' # Additional build flags for OVMF with Secure Boot. # Fedora also uses "-D SMM_REQUIRE -D EXCLUDE_SHELL_FROM_FD". @@ -220,7 +220,6 @@ do_deploy[depends] += "${DEPLOYDEP}" do_deploy() { } -do_deploy[cleandirs] = "${DEPLOYDIR}" do_deploy_class-target() { # For use with "runqemu ovmf". for i in \ diff --git a/poky/meta/recipes-core/systemd/systemd_245.6.bb b/poky/meta/recipes-core/systemd/systemd_245.6.bb index b6681b206..cdafb9824 100644 --- a/poky/meta/recipes-core/systemd/systemd_245.6.bb +++ b/poky/meta/recipes-core/systemd/systemd_245.6.bb @@ -544,7 +544,6 @@ FILES_${PN} = " ${base_bindir}/* \ ${bindir}/resolvectl \ ${bindir}/timedatectl \ ${bindir}/bootctl \ - ${bindir}/kernel-install \ ${exec_prefix}/lib/tmpfiles.d/*.conf \ ${exec_prefix}/lib/systemd \ ${exec_prefix}/lib/modules-load.d \ diff --git a/poky/meta/recipes-devtools/bison/bison_3.6.3.bb b/poky/meta/recipes-devtools/bison/bison_3.6.3.bb deleted file mode 100644 index 1d0f5995c..000000000 --- a/poky/meta/recipes-devtools/bison/bison_3.6.3.bb +++ /dev/null @@ -1,44 +0,0 @@ -SUMMARY = "GNU Project parser generator (yacc replacement)" -DESCRIPTION = "Bison is a general-purpose parser generator that converts an annotated context-free grammar into \ -an LALR(1) or GLR parser for that grammar. Bison is upward compatible with Yacc: all properly-written Yacc \ -grammars ought to work with Bison with no change. Anyone familiar with Yacc should be able to use Bison with \ -little trouble." -HOMEPAGE = "http://www.gnu.org/software/bison/" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" -SECTION = "devel" -DEPENDS = "bison-native flex-native" - -SRC_URI = "${GNU_MIRROR}/bison/bison-${PV}.tar.xz \ - file://add-with-bisonlocaledir.patch \ - file://0001-bison-fix-the-parallel-build.patch \ - " -SRC_URI[sha256sum] = "06db793651de9dd5f0a85a6fe4bdbca413c0806bf2432377523da96ca0b4b73d" - -# No point in hardcoding path to m4, just use PATH -EXTRA_OECONF += "M4=m4" - -# Reset any loadavg set via environment, it breaks parallel build -# | ../bison-3.5.2/lib/uniwidth/width.c:21:10: fatal error: uniwidth.h: No such file or directory -# | #include "uniwidth.h" -# | ^~~~~~~~~~~~ -EXTRA_OEMAKE_append = " -l" - -inherit autotools gettext texinfo - -# The automatic m4 path detection gets confused, so force the right value -acpaths = "-I ${S}/m4" - -do_compile_prepend() { - for i in mfcalc calc++ rpcalc; do mkdir -p ${B}/examples/$i; done -} - -do_install_append_class-native() { - create_wrapper ${D}/${bindir}/bison \ - BISON_PKGDATADIR=${STAGING_DATADIR_NATIVE}/bison -} -do_install_append_class-nativesdk() { - create_wrapper ${D}/${bindir}/bison \ - BISON_PKGDATADIR=${datadir}/bison -} -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/bison/bison_3.6.4.bb b/poky/meta/recipes-devtools/bison/bison_3.6.4.bb new file mode 100644 index 000000000..0c877c3c5 --- /dev/null +++ b/poky/meta/recipes-devtools/bison/bison_3.6.4.bb @@ -0,0 +1,44 @@ +SUMMARY = "GNU Project parser generator (yacc replacement)" +DESCRIPTION = "Bison is a general-purpose parser generator that converts an annotated context-free grammar into \ +an LALR(1) or GLR parser for that grammar. Bison is upward compatible with Yacc: all properly-written Yacc \ +grammars ought to work with Bison with no change. Anyone familiar with Yacc should be able to use Bison with \ +little trouble." +HOMEPAGE = "http://www.gnu.org/software/bison/" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" +SECTION = "devel" +DEPENDS = "bison-native flex-native" + +SRC_URI = "${GNU_MIRROR}/bison/bison-${PV}.tar.xz \ + file://add-with-bisonlocaledir.patch \ + file://0001-bison-fix-the-parallel-build.patch \ + " +SRC_URI[sha256sum] = "8b13473b31ca7fcf65e5e8a74224368ffd5df19275602a9c9567ba393f18577d" + +# No point in hardcoding path to m4, just use PATH +EXTRA_OECONF += "M4=m4" + +# Reset any loadavg set via environment, it breaks parallel build +# | ../bison-3.5.2/lib/uniwidth/width.c:21:10: fatal error: uniwidth.h: No such file or directory +# | #include "uniwidth.h" +# | ^~~~~~~~~~~~ +EXTRA_OEMAKE_append = " -l" + +inherit autotools gettext texinfo + +# The automatic m4 path detection gets confused, so force the right value +acpaths = "-I ${S}/m4" + +do_compile_prepend() { + for i in mfcalc calc++ rpcalc; do mkdir -p ${B}/examples/$i; done +} + +do_install_append_class-native() { + create_wrapper ${D}/${bindir}/bison \ + BISON_PKGDATADIR=${STAGING_DATADIR_NATIVE}/bison +} +do_install_append_class-nativesdk() { + create_wrapper ${D}/${bindir}/bison \ + BISON_PKGDATADIR=${datadir}/bison +} +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/ccache/ccache.inc b/poky/meta/recipes-devtools/ccache/ccache.inc deleted file mode 100644 index dcbbf0ca7..000000000 --- a/poky/meta/recipes-devtools/ccache/ccache.inc +++ /dev/null @@ -1,28 +0,0 @@ -SUMMARY = "a fast C/C++ compiler cache" -DESCRIPTION = "ccache is a compiler cache. It speeds up recompilation \ -by caching the result of previous compilations and detecting when the \ -same compilation is being done again. Supported languages are C, C\+\+, \ -Objective-C and Objective-C++." -HOMEPAGE = "http://ccache.samba.org" -SECTION = "devel" -LICENSE = "GPLv3+" - -DEPENDS = "zlib" - -SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz" -UPSTREAM_CHECK_URI = "https://github.com/ccache/ccache/releases/" - -inherit autotools - -# Remove ccache-native's dependencies, so that it can be used widely by -# other native recipes. -DEPENDS_class-native = "" -EXTRA_OECONF_class-native = "--with-bundled-zlib" -INHIBIT_AUTOTOOLS_DEPS_class-native = "1" -PATCHTOOL = "patch" - -BBCLASSEXTEND = "native" - -do_configure_class-native() { - oe_runconf -} diff --git a/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb b/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb new file mode 100644 index 000000000..852583dfd --- /dev/null +++ b/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb @@ -0,0 +1,32 @@ +SUMMARY = "a fast C/C++ compiler cache" +DESCRIPTION = "ccache is a compiler cache. It speeds up recompilation \ +by caching the result of previous compilations and detecting when the \ +same compilation is being done again. Supported languages are C, C\+\+, \ +Objective-C and Objective-C++." +HOMEPAGE = "http://ccache.samba.org" +SECTION = "devel" + +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://LICENSE.adoc;md5=22d514dbc01fdf9a9784334b6b59417a" + +DEPENDS = "zlib" + +SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz" +SRC_URI[sha256sum] = "447ddf21a5f0ffa6b6d26839ae876a6d17d0d7e3533926cdf78ecd11dad793f8" + +UPSTREAM_CHECK_URI = "https://github.com/ccache/ccache/releases/" + +inherit autotools + +# Remove ccache-native's dependencies, so that it can be used widely by +# other native recipes. +DEPENDS_class-native = "" +EXTRA_OECONF_class-native = "--with-bundled-zlib" +INHIBIT_AUTOTOOLS_DEPS_class-native = "1" +PATCHTOOL = "patch" + +BBCLASSEXTEND = "native" + +do_configure_class-native() { + oe_runconf +} diff --git a/poky/meta/recipes-devtools/ccache/ccache_3.7.9.bb b/poky/meta/recipes-devtools/ccache/ccache_3.7.9.bb deleted file mode 100644 index a4de6c9fe..000000000 --- a/poky/meta/recipes-devtools/ccache/ccache_3.7.9.bb +++ /dev/null @@ -1,7 +0,0 @@ -require ccache.inc - -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://LICENSE.adoc;md5=22d514dbc01fdf9a9784334b6b59417a" - -SRC_URI[md5sum] = "a4a38afc62ed189904357739fd8f3fb8" -SRC_URI[sha256sum] = "92838e2133c9e704fdab9ee2608dad86c99021278b9ac47d065aa8ff2ea8ce36" diff --git a/poky/meta/recipes-devtools/dnf/dnf_4.2.21.bb b/poky/meta/recipes-devtools/dnf/dnf_4.2.21.bb deleted file mode 100644 index 44dab1d8a..000000000 --- a/poky/meta/recipes-devtools/dnf/dnf_4.2.21.bb +++ /dev/null @@ -1,90 +0,0 @@ -SUMMARY = "Package manager forked from Yum, using libsolv as a dependency resolver" -DESCRIPTION = "Software package manager that installs, updates, and removes \ -packages on RPM-based Linux distributions. It automatically computes \ -dependencies and determines the actions required to install packages." -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://PACKAGE-LICENSING;md5=4a0548e303dbc77f067335b4d688e745 \ - " - -SRC_URI = "git://github.com/rpm-software-management/dnf.git \ - file://0001-Corretly-install-tmpfiles.d-configuration.patch \ - file://0001-Do-not-hardcode-etc-and-systemd-unit-directories.patch \ - file://0005-Do-not-prepend-installroot-to-logdir.patch \ - file://0029-Do-not-set-PYTHON_INSTALL_DIR-by-running-python.patch \ - file://0030-Run-python-scripts-using-env.patch \ - file://0001-set-python-path-for-completion_helper.patch \ - file://0001-dnf-write-the-log-lock-to-root.patch \ - " - -SRCREV = "864c381baabf024c299dca75abfda96139b8f583" -UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+)" - -S = "${WORKDIR}/git" - -inherit cmake gettext bash-completion distutils3-base systemd - -DEPENDS += "libdnf librepo libcomps python3-iniparse" - -# manpages generation requires http://www.sphinx-doc.org/ -EXTRA_OECMAKE = " -DWITH_MAN=0 -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} -DPYTHON_DESIRED=3" - -BBCLASSEXTEND = "native nativesdk" - -RDEPENDS_${PN} += " \ - python3-core \ - python3-codecs \ - python3-netclient \ - python3-email \ - python3-threading \ - python3-distutils \ - python3-logging \ - python3-fcntl \ - librepo \ - python3-shell \ - libcomps \ - libdnf \ - python3-sqlite3 \ - python3-compression \ - python3-rpm \ - python3-iniparse \ - python3-json \ - python3-curses \ - python3-misc \ - python3-gpg \ - " - -RDEPENDS_${PN}_class-native = "" - -RRECOMMENDS_${PN}_class-target += "gnupg" - -# Create a symlink called 'dnf' as 'make install' does not do it, but -# .spec file in dnf source tree does (and then Fedora and dnf documentation -# says that dnf binary is plain 'dnf'). -do_install_append() { - lnr ${D}/${bindir}/dnf-3 ${D}/${bindir}/dnf - lnr ${D}/${bindir}/dnf-automatic-3 ${D}/${bindir}/dnf-automatic -} - -# Direct dnf-native to read rpm configuration from our sysroot, not the one it was compiled in -do_install_append_class-native() { - create_wrapper ${D}/${bindir}/dnf \ - RPM_CONFIGDIR=${STAGING_LIBDIR_NATIVE}/rpm \ - RPM_NO_CHROOT_FOR_SCRIPTS=1 -} - -do_install_append_class-nativesdk() { - create_wrapper ${D}/${bindir}/dnf \ - RPM_CONFIGDIR=${SDKPATHNATIVE}${libdir_nativesdk}/rpm \ - RPM_NO_CHROOT_FOR_SCRIPTS=1 -} - -SYSTEMD_SERVICE_${PN} = "dnf-makecache.service dnf-makecache.timer \ - dnf-automatic.service dnf-automatic.timer \ - dnf-automatic-download.service dnf-automatic-download.timer \ - dnf-automatic-install.service dnf-automatic-install.timer \ - dnf-automatic-notifyonly.service dnf-automatic-notifyonly.timer \ -" -SYSTEMD_AUTO_ENABLE ?= "disable" - -PNBLACKLIST[dnf] ?= "${@bb.utils.contains('PACKAGE_CLASSES', 'package_rpm', '', 'does not build without package_rpm in PACKAGE_CLASSES due disabled rpm support in libsolv', d)}" diff --git a/poky/meta/recipes-devtools/dnf/dnf_4.2.23.bb b/poky/meta/recipes-devtools/dnf/dnf_4.2.23.bb new file mode 100644 index 000000000..8cfd4d0c8 --- /dev/null +++ b/poky/meta/recipes-devtools/dnf/dnf_4.2.23.bb @@ -0,0 +1,90 @@ +SUMMARY = "Package manager forked from Yum, using libsolv as a dependency resolver" +DESCRIPTION = "Software package manager that installs, updates, and removes \ +packages on RPM-based Linux distributions. It automatically computes \ +dependencies and determines the actions required to install packages." +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://PACKAGE-LICENSING;md5=4a0548e303dbc77f067335b4d688e745 \ + " + +SRC_URI = "git://github.com/rpm-software-management/dnf.git \ + file://0001-Corretly-install-tmpfiles.d-configuration.patch \ + file://0001-Do-not-hardcode-etc-and-systemd-unit-directories.patch \ + file://0005-Do-not-prepend-installroot-to-logdir.patch \ + file://0029-Do-not-set-PYTHON_INSTALL_DIR-by-running-python.patch \ + file://0030-Run-python-scripts-using-env.patch \ + file://0001-set-python-path-for-completion_helper.patch \ + file://0001-dnf-write-the-log-lock-to-root.patch \ + " + +SRCREV = "126d861812f827285960592359615ed7cb195aac" +UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+)" + +S = "${WORKDIR}/git" + +inherit cmake gettext bash-completion distutils3-base systemd + +DEPENDS += "libdnf librepo libcomps python3-iniparse" + +# manpages generation requires http://www.sphinx-doc.org/ +EXTRA_OECMAKE = " -DWITH_MAN=0 -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} -DPYTHON_DESIRED=3" + +BBCLASSEXTEND = "native nativesdk" + +RDEPENDS_${PN} += " \ + python3-core \ + python3-codecs \ + python3-netclient \ + python3-email \ + python3-threading \ + python3-distutils \ + python3-logging \ + python3-fcntl \ + librepo \ + python3-shell \ + libcomps \ + libdnf \ + python3-sqlite3 \ + python3-compression \ + python3-rpm \ + python3-iniparse \ + python3-json \ + python3-curses \ + python3-misc \ + python3-gpg \ + " + +RDEPENDS_${PN}_class-native = "" + +RRECOMMENDS_${PN}_class-target += "gnupg" + +# Create a symlink called 'dnf' as 'make install' does not do it, but +# .spec file in dnf source tree does (and then Fedora and dnf documentation +# says that dnf binary is plain 'dnf'). +do_install_append() { + lnr ${D}/${bindir}/dnf-3 ${D}/${bindir}/dnf + lnr ${D}/${bindir}/dnf-automatic-3 ${D}/${bindir}/dnf-automatic +} + +# Direct dnf-native to read rpm configuration from our sysroot, not the one it was compiled in +do_install_append_class-native() { + create_wrapper ${D}/${bindir}/dnf \ + RPM_CONFIGDIR=${STAGING_LIBDIR_NATIVE}/rpm \ + RPM_NO_CHROOT_FOR_SCRIPTS=1 +} + +do_install_append_class-nativesdk() { + create_wrapper ${D}/${bindir}/dnf \ + RPM_CONFIGDIR=${SDKPATHNATIVE}${libdir_nativesdk}/rpm \ + RPM_NO_CHROOT_FOR_SCRIPTS=1 +} + +SYSTEMD_SERVICE_${PN} = "dnf-makecache.service dnf-makecache.timer \ + dnf-automatic.service dnf-automatic.timer \ + dnf-automatic-download.service dnf-automatic-download.timer \ + dnf-automatic-install.service dnf-automatic-install.timer \ + dnf-automatic-notifyonly.service dnf-automatic-notifyonly.timer \ +" +SYSTEMD_AUTO_ENABLE ?= "disable" + +PNBLACKLIST[dnf] ?= "${@bb.utils.contains('PACKAGE_CLASSES', 'package_rpm', '', 'does not build without package_rpm in PACKAGE_CLASSES due disabled rpm support in libsolv', d)}" diff --git a/poky/meta/recipes-devtools/elfutils/elfutils_0.179.bb b/poky/meta/recipes-devtools/elfutils/elfutils_0.179.bb deleted file mode 100644 index 1da95ec1d..000000000 --- a/poky/meta/recipes-devtools/elfutils/elfutils_0.179.bb +++ /dev/null @@ -1,157 +0,0 @@ -SUMMARY = "Utilities and libraries for handling compiled object files" -HOMEPAGE = "https://sourceware.org/elfutils" -SECTION = "base" -LICENSE = "GPLv2 & LGPLv3+ & GPLv3+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" -DEPENDS = "zlib virtual/libintl" -DEPENDS_append_libc-musl = " argp-standalone fts musl-obstack " -# The Debian patches below are from: -# http://ftp.de.debian.org/debian/pool/main/e/elfutils/elfutils_0.176-1.debian.tar.xz -SRC_URI = "https://sourceware.org/elfutils/ftp/${PV}/${BP}.tar.bz2 \ - file://0001-dso-link-change.patch \ - file://0002-Fix-elf_cvt_gunhash-if-dest-and-src-are-same.patch \ - file://0003-fixheadercheck.patch \ - file://0004-Disable-the-test-to-convert-euc-jp.patch \ - file://0006-Fix-build-on-aarch64-musl.patch \ - file://0001-libasm-may-link-with-libbz2-if-found.patch \ - file://0001-libelf-elf_end.c-check-data_list.data.d.d_buf-before.patch \ - file://debian/hppa_backend.diff \ - file://debian/arm_backend.diff \ - file://debian/mips_backend.diff \ - file://debian/mips_readelf_w.patch \ - file://debian/kfreebsd_path.patch \ - file://debian/0001-Ignore-differences-between-mips-machine-identifiers.patch \ - file://debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch \ - file://debian/0003-Add-mips-n64-relocation-format-hack.patch \ - file://debian/hurd_path.patch \ - file://debian/ignore_strmerge.diff \ - file://debian/disable_werror.patch \ - file://debian/testsuite-ignore-elflint.diff \ - file://debian/mips_cfi.patch \ - file://debian/0001-fix-compile-failure-with-debian-patches.patch \ - file://0001-skip-the-test-when-gcc-not-deployed.patch \ - file://0001-ppc_initreg.c-Incliude-asm-ptrace.h-for-pt_regs-defi.patch \ - file://run-ptest \ - file://ptest.patch \ - " -SRC_URI_append_libc-musl = " \ - file://0001-musl-obstack-fts.patch \ - file://0002-musl-libs.patch \ - file://0003-musl-utils.patch \ - file://0004-Fix-error-on-musl.patch \ - " -SRC_URI[md5sum] = "8ee56b371b5a7ea081284c44e5164600" -SRC_URI[sha256sum] = "25a545566cbacaa37ae6222e58f1c48ea4570f53ba991886e2f5ce96e22a23a2" - -inherit autotools gettext ptest pkgconfig - -EXTRA_OECONF = "--program-prefix=eu- --disable-debuginfod" - -DEPENDS_BZIP2 = "bzip2-replacement-native" -DEPENDS_BZIP2_class-target = "bzip2" - -PACKAGECONFIG ??= "" -PACKAGECONFIG[bzip2] = "--with-bzlib,--without-bzlib,${DEPENDS_BZIP2}" -PACKAGECONFIG[xz] = "--with-lzma,--without-lzma,xz" - -RDEPENDS_${PN}-ptest += "libasm libelf bash make coreutils ${PN}-binutils" - -EXTRA_OECONF_append_class-target = " --disable-tests-rpath" - -RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" - -do_compile_ptest() { - cd ${B}/tests - oe_runmake buildtest-TESTS oecheck -} - -do_install_ptest() { - if [ ${PTEST_ENABLED} = "1" ]; then - # copy the files which needed by the cases - TEST_FILES="strip strip.o addr2line elfcmp objdump readelf size.o nm.o nm elflint elfcompress elfclassify stack unstrip" - install -d -m 755 ${D}${PTEST_PATH}/src - install -d -m 755 ${D}${PTEST_PATH}/libelf - install -d -m 755 ${D}${PTEST_PATH}/libdw - install -d -m 755 ${D}${PTEST_PATH}/libasm - for test_file in ${TEST_FILES}; do - if [ -f ${B}/src/${test_file} ]; then - cp -r ${B}/src/${test_file} ${D}${PTEST_PATH}/src - fi - done - cp ${D}${libdir}/libelf-${PV}.so ${D}${PTEST_PATH}/libelf/libelf.so - cp ${D}${libdir}/libdw-${PV}.so ${D}${PTEST_PATH}/libdw/libdw.so - cp ${D}${libdir}/libasm-${PV}.so ${D}${PTEST_PATH}/libasm/libasm.so - cp -r ${S}/tests/ ${D}${PTEST_PATH} - cp -r ${B}/tests/* ${D}${PTEST_PATH}/tests - cp -r ${B}/config.h ${D}${PTEST_PATH} - cp -r ${B}/backends ${D}${PTEST_PATH} - sed -i '/^Makefile:/c Makefile:' ${D}${PTEST_PATH}/tests/Makefile - find ${D}${PTEST_PATH} -type f -name *.[hoc] | xargs -i rm {} - fi -} - -EXTRA_OEMAKE_class-native = "" -EXTRA_OEMAKE_class-nativesdk = "" - -BBCLASSEXTEND = "native nativesdk" - -# Package utilities separately -PACKAGES =+ "${PN}-binutils libelf libasm libdw" - -# shared libraries are licensed GPLv2 or GPLv3+, binaries GPLv3+ -# according to NEWS file: -# "The license is now GPLv2/LGPLv3+ for the libraries and GPLv3+ for stand-alone -# programs. There is now also a formal CONTRIBUTING document describing how to -# submit patches." -LICENSE_${PN}-binutils = "GPLv3+" -LICENSE_${PN} = "GPLv3+" -LICENSE_libelf = "GPLv2 | LGPLv3+" -LICENSE_libasm = "GPLv2 | LGPLv3+" -LICENSE_libdw = "GPLv2 | LGPLv3+" - -FILES_${PN}-binutils = "\ - ${bindir}/eu-addr2line \ - ${bindir}/eu-ld \ - ${bindir}/eu-nm \ - ${bindir}/eu-readelf \ - ${bindir}/eu-size \ - ${bindir}/eu-strip" - -FILES_libelf = "${libdir}/libelf-${PV}.so ${libdir}/libelf.so.*" -FILES_libasm = "${libdir}/libasm-${PV}.so ${libdir}/libasm.so.*" -FILES_libdw = "${libdir}/libdw-${PV}.so ${libdir}/libdw.so.* ${libdir}/elfutils/lib*" -# Some packages have the version preceeding the .so instead properly -# versioned .so., so we need to reorder and repackage. -#FILES_${PN} += "${libdir}/*-${PV}.so ${base_libdir}/*-${PV}.so" -#FILES_SOLIBSDEV = "${libdir}/libasm.so ${libdir}/libdw.so ${libdir}/libelf.so" - -# The package contains symlinks that trip up insane -INSANE_SKIP_${MLPREFIX}libdw = "dev-so" - -# avoid stripping some generated binaries otherwise some of the tests such as test-nlist, -# run-strip-reloc.sh, run-strip-strmerge.sh and so on will fail -INHIBIT_PACKAGE_STRIP_FILES = "\ - ${PKGD}${PTEST_PATH}/tests/test-nlist \ - ${PKGD}${PTEST_PATH}/tests/elfstrmerge \ - ${PKGD}${PTEST_PATH}/tests/backtrace-child \ - ${PKGD}${PTEST_PATH}/tests/backtrace-data \ - ${PKGD}${PTEST_PATH}/tests/backtrace-dwarf \ - ${PKGD}${PTEST_PATH}/tests/deleted \ - ${PKGD}${PTEST_PATH}/src/strip \ - ${PKGD}${PTEST_PATH}/src/addr2line \ - ${PKGD}${PTEST_PATH}/src/elfcmp \ - ${PKGD}${PTEST_PATH}/src/objdump \ - ${PKGD}${PTEST_PATH}/src/readelf \ - ${PKGD}${PTEST_PATH}/src/nm \ - ${PKGD}${PTEST_PATH}/src/elflint \ - ${PKGD}${PTEST_PATH}/src/elfclassify \ - ${PKGD}${PTEST_PATH}/src/stack \ - ${PKGD}${PTEST_PATH}/src/unstrip \ - ${PKGD}${PTEST_PATH}/libelf/libelf.so \ - ${PKGD}${PTEST_PATH}/libdw/libdw.so \ - ${PKGD}${PTEST_PATH}/libasm/libasm.so \ - ${PKGD}${PTEST_PATH}/backends/libebl_i386.so \ - ${PKGD}${PTEST_PATH}/backends/libebl_x86_64.so \ -" - -PRIVATE_LIBS_${PN}-ptest = "libdw.so.1 libelf.so.1 libasm.so.1" diff --git a/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb b/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb new file mode 100644 index 000000000..9f8bfc24f --- /dev/null +++ b/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb @@ -0,0 +1,144 @@ +SUMMARY = "Utilities and libraries for handling compiled object files" +HOMEPAGE = "https://sourceware.org/elfutils" +SECTION = "base" +LICENSE = "GPLv2 & LGPLv3+ & GPLv3+" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" +DEPENDS = "zlib virtual/libintl" +DEPENDS_append_libc-musl = " argp-standalone fts musl-obstack " +# The Debian patches below are from: +# http://ftp.de.debian.org/debian/pool/main/e/elfutils/elfutils_0.176-1.debian.tar.xz +SRC_URI = "https://sourceware.org/elfutils/ftp/${PV}/${BP}.tar.bz2 \ + file://0001-dso-link-change.patch \ + file://0002-Fix-elf_cvt_gunhash-if-dest-and-src-are-same.patch \ + file://0003-fixheadercheck.patch \ + file://0004-Disable-the-test-to-convert-euc-jp.patch \ + file://0006-Fix-build-on-aarch64-musl.patch \ + file://0001-libasm-may-link-with-libbz2-if-found.patch \ + file://0001-libelf-elf_end.c-check-data_list.data.d.d_buf-before.patch \ + file://0001-skip-the-test-when-gcc-not-deployed.patch \ + file://0001-ppc_initreg.c-Incliude-asm-ptrace.h-for-pt_regs-defi.patch \ + file://run-ptest \ + file://ptest.patch \ + file://0001-tests-Makefile.am-compile-test_nlist-with-standard-C.patch \ + " +SRC_URI_append_libc-musl = " \ + file://0001-musl-obstack-fts.patch \ + file://0002-musl-libs.patch \ + file://0003-musl-utils.patch \ + file://0004-Fix-error-on-musl.patch \ + file://0015-config-eu.am-do-not-use-Werror.patch \ + " +SRC_URI[sha256sum] = "b827b6e35c59d188ba97d7cf148fa8dc6f5c68eb6c5981888dfdbb758c0b569d" + +inherit autotools gettext ptest pkgconfig + +EXTRA_OECONF = "--program-prefix=eu- --disable-debuginfod" + +DEPENDS_BZIP2 = "bzip2-replacement-native" +DEPENDS_BZIP2_class-target = "bzip2" + +PACKAGECONFIG ??= "" +PACKAGECONFIG[bzip2] = "--with-bzlib,--without-bzlib,${DEPENDS_BZIP2}" +PACKAGECONFIG[xz] = "--with-lzma,--without-lzma,xz" + +RDEPENDS_${PN}-ptest += "libasm libelf bash make coreutils ${PN}-binutils" + +EXTRA_OECONF_append_class-target = " --disable-tests-rpath" + +RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" + +do_compile_ptest() { + cd ${B}/tests + oe_runmake buildtest-TESTS oecheck +} + +do_install_ptest() { + if [ ${PTEST_ENABLED} = "1" ]; then + # copy the files which needed by the cases + TEST_FILES="strip strip.o addr2line elfcmp objdump readelf size.o nm.o nm elflint elfcompress elfclassify stack unstrip" + install -d -m 755 ${D}${PTEST_PATH}/src + install -d -m 755 ${D}${PTEST_PATH}/libelf + install -d -m 755 ${D}${PTEST_PATH}/libdw + install -d -m 755 ${D}${PTEST_PATH}/libasm + for test_file in ${TEST_FILES}; do + if [ -f ${B}/src/${test_file} ]; then + cp -r ${B}/src/${test_file} ${D}${PTEST_PATH}/src + fi + done + cp ${D}${libdir}/libelf-${PV}.so ${D}${PTEST_PATH}/libelf/libelf.so + cp ${D}${libdir}/libdw-${PV}.so ${D}${PTEST_PATH}/libdw/libdw.so + cp ${D}${libdir}/libasm-${PV}.so ${D}${PTEST_PATH}/libasm/libasm.so + cp -r ${S}/tests/ ${D}${PTEST_PATH} + cp -r ${B}/tests/* ${D}${PTEST_PATH}/tests + cp -r ${B}/config.h ${D}${PTEST_PATH} + cp -r ${B}/backends ${D}${PTEST_PATH} + sed -i '/^Makefile:/c Makefile:' ${D}${PTEST_PATH}/tests/Makefile + find ${D}${PTEST_PATH} -type f -name *.[hoc] | xargs -i rm {} + fi +} + +EXTRA_OEMAKE_class-native = "" +EXTRA_OEMAKE_class-nativesdk = "" + +BBCLASSEXTEND = "native nativesdk" + +# Package utilities separately +PACKAGES =+ "${PN}-binutils libelf libasm libdw" + +# shared libraries are licensed GPLv2 or GPLv3+, binaries GPLv3+ +# according to NEWS file: +# "The license is now GPLv2/LGPLv3+ for the libraries and GPLv3+ for stand-alone +# programs. There is now also a formal CONTRIBUTING document describing how to +# submit patches." +LICENSE_${PN}-binutils = "GPLv3+" +LICENSE_${PN} = "GPLv3+" +LICENSE_libelf = "GPLv2 | LGPLv3+" +LICENSE_libasm = "GPLv2 | LGPLv3+" +LICENSE_libdw = "GPLv2 | LGPLv3+" + +FILES_${PN}-binutils = "\ + ${bindir}/eu-addr2line \ + ${bindir}/eu-ld \ + ${bindir}/eu-nm \ + ${bindir}/eu-readelf \ + ${bindir}/eu-size \ + ${bindir}/eu-strip" + +FILES_libelf = "${libdir}/libelf-${PV}.so ${libdir}/libelf.so.*" +FILES_libasm = "${libdir}/libasm-${PV}.so ${libdir}/libasm.so.*" +FILES_libdw = "${libdir}/libdw-${PV}.so ${libdir}/libdw.so.* ${libdir}/elfutils/lib*" +# Some packages have the version preceeding the .so instead properly +# versioned .so., so we need to reorder and repackage. +#FILES_${PN} += "${libdir}/*-${PV}.so ${base_libdir}/*-${PV}.so" +#FILES_SOLIBSDEV = "${libdir}/libasm.so ${libdir}/libdw.so ${libdir}/libelf.so" + +# The package contains symlinks that trip up insane +INSANE_SKIP_${MLPREFIX}libdw = "dev-so" + +# avoid stripping some generated binaries otherwise some of the tests such as test-nlist, +# run-strip-reloc.sh, run-strip-strmerge.sh and so on will fail +INHIBIT_PACKAGE_STRIP_FILES = "\ + ${PKGD}${PTEST_PATH}/tests/test-nlist \ + ${PKGD}${PTEST_PATH}/tests/elfstrmerge \ + ${PKGD}${PTEST_PATH}/tests/backtrace-child \ + ${PKGD}${PTEST_PATH}/tests/backtrace-data \ + ${PKGD}${PTEST_PATH}/tests/backtrace-dwarf \ + ${PKGD}${PTEST_PATH}/tests/deleted \ + ${PKGD}${PTEST_PATH}/src/strip \ + ${PKGD}${PTEST_PATH}/src/addr2line \ + ${PKGD}${PTEST_PATH}/src/elfcmp \ + ${PKGD}${PTEST_PATH}/src/objdump \ + ${PKGD}${PTEST_PATH}/src/readelf \ + ${PKGD}${PTEST_PATH}/src/nm \ + ${PKGD}${PTEST_PATH}/src/elflint \ + ${PKGD}${PTEST_PATH}/src/elfclassify \ + ${PKGD}${PTEST_PATH}/src/stack \ + ${PKGD}${PTEST_PATH}/src/unstrip \ + ${PKGD}${PTEST_PATH}/libelf/libelf.so \ + ${PKGD}${PTEST_PATH}/libdw/libdw.so \ + ${PKGD}${PTEST_PATH}/libasm/libasm.so \ + ${PKGD}${PTEST_PATH}/backends/libebl_i386.so \ + ${PKGD}${PTEST_PATH}/backends/libebl_x86_64.so \ +" + +PRIVATE_LIBS_${PN}-ptest = "libdw.so.1 libelf.so.1 libasm.so.1" diff --git a/poky/meta/recipes-devtools/elfutils/files/0001-musl-obstack-fts.patch b/poky/meta/recipes-devtools/elfutils/files/0001-musl-obstack-fts.patch index f751a2ecd..2450b0d6b 100644 --- a/poky/meta/recipes-devtools/elfutils/files/0001-musl-obstack-fts.patch +++ b/poky/meta/recipes-devtools/elfutils/files/0001-musl-obstack-fts.patch @@ -1,7 +1,7 @@ -From 8ccf3d92b8766b53f203df7e01b489604ef851f3 Mon Sep 17 00:00:00 2001 +From 1ec7b2208803e0fbdcbe6c07b849e7dc4d9fa8a2 Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Fri, 23 Aug 2019 10:17:25 +0800 -Subject: [PATCH 1/4] musl-obstack-fts +Subject: [PATCH] musl-obstack-fts Look for libfts and libobstack during configure, these libraries are external to libc when using musl, whereas @@ -20,10 +20,10 @@ Signed-off-by: Hongxu Jia 3 files changed, 58 insertions(+), 4 deletions(-) diff --git a/configure.ac b/configure.ac -index cad7b2b..6d4229b 100644 +index e9649c2..03643c8 100644 --- a/configure.ac +++ b/configure.ac -@@ -529,6 +529,60 @@ else +@@ -538,6 +538,60 @@ else fi AC_SUBST([argp_LDADD]) @@ -122,6 +122,3 @@ index 13d9bda..d5a4f7d 100644 unstrip_LDADD = $(libebl) $(libelf) $(libdw) $(libeu) $(argp_LDADD) stack_LDADD = $(libebl) $(libelf) $(libdw) $(libeu) $(argp_LDADD) $(demanglelib) elfcompress_LDADD = $(libebl) $(libelf) $(libdw) $(libeu) $(argp_LDADD) --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/elfutils/files/0001-tests-Makefile.am-compile-test_nlist-with-standard-C.patch b/poky/meta/recipes-devtools/elfutils/files/0001-tests-Makefile.am-compile-test_nlist-with-standard-C.patch new file mode 100644 index 000000000..481b91238 --- /dev/null +++ b/poky/meta/recipes-devtools/elfutils/files/0001-tests-Makefile.am-compile-test_nlist-with-standard-C.patch @@ -0,0 +1,27 @@ +From 1ca86294ee5454592c9ad855e13080509d8a92d3 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Tue, 23 Jun 2020 07:49:35 +0000 +Subject: [PATCH] tests/Makefile.am: compile test_nlist with standard CFLAGS + +Otherwise, it will contain build paths in it and wont +be reproducible. + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Alexander Kanavin +--- + tests/Makefile.am | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/tests/Makefile.am b/tests/Makefile.am +index 8048b69..d2b28a4 100644 +--- a/tests/Makefile.am ++++ b/tests/Makefile.am +@@ -86,7 +86,7 @@ backtrace-child-biarch$(EXEEXT): backtrace-child.c + test-nlist$(EXEEXT): test-nlist.c + $(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) \ +- $(test_nlist_CFLAGS) $(test_nlist_LDADD) -o $@ $< ++ $(CFLAGS) $(test_nlist_LDADD) -o $@ $< + + TESTS = run-arextract.sh run-arsymtest.sh run-ar.sh newfile test-nlist \ + update1 update2 update3 update4 \ diff --git a/poky/meta/recipes-devtools/elfutils/files/0002-musl-libs.patch b/poky/meta/recipes-devtools/elfutils/files/0002-musl-libs.patch index cf1468386..d5ec0ec9e 100644 --- a/poky/meta/recipes-devtools/elfutils/files/0002-musl-libs.patch +++ b/poky/meta/recipes-devtools/elfutils/files/0002-musl-libs.patch @@ -1,7 +1,7 @@ -From ea908c7009de5a208383abf4bec4c6b3d9519ca3 Mon Sep 17 00:00:00 2001 +From 9a57efb0f826a70ae360aa55504ee2de656b92b6 Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Fri, 23 Aug 2019 10:18:47 +0800 -Subject: [PATCH 2/4] musl-libs +Subject: [PATCH] musl-libs Collection of fixes needed to compile libelf and other libraries provided by elfutils for musl targets @@ -104,7 +104,7 @@ index 7bcf61c..11dcc8b 100644 return elf_errmsg (error & 0xffff); case OTHER_ERROR (LIBDW): diff --git a/libdwfl/linux-kernel-modules.c b/libdwfl/linux-kernel-modules.c -index d46ab5a..1c3faee 100644 +index 0434f1e..5afaee8 100644 --- a/libdwfl/linux-kernel-modules.c +++ b/libdwfl/linux-kernel-modules.c @@ -50,6 +50,7 @@ @@ -116,7 +116,7 @@ index d46ab5a..1c3faee 100644 /* If fts.h is included before config.h, its indirect inclusions may not give us the right LFS aliases of these functions, so map them manually. */ diff --git a/libelf/elf.h b/libelf/elf.h -index bed273d..be228e6 100644 +index 197b557..8e5b94c 100644 --- a/libelf/elf.h +++ b/libelf/elf.h @@ -21,7 +21,9 @@ @@ -130,9 +130,9 @@ index bed273d..be228e6 100644 /* Standard ELF types. */ -@@ -4029,6 +4031,7 @@ enum - #define R_NDS32_TLS_TPOFF 102 - #define R_NDS32_TLS_DESC 119 +@@ -4103,6 +4105,7 @@ enum + #define R_ARC_TLS_LE_S9 0x4a + #define R_ARC_TLS_LE_32 0x4b -__END_DECLS - @@ -140,6 +140,3 @@ index bed273d..be228e6 100644 +} +#endif #endif /* elf.h */ --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/elfutils/files/0003-musl-utils.patch b/poky/meta/recipes-devtools/elfutils/files/0003-musl-utils.patch index 79c9d9ad5..4f28d9d02 100644 --- a/poky/meta/recipes-devtools/elfutils/files/0003-musl-utils.patch +++ b/poky/meta/recipes-devtools/elfutils/files/0003-musl-utils.patch @@ -1,7 +1,7 @@ -From 94028b16e56c8eef1aa02dcc4da268a0e471b4ea Mon Sep 17 00:00:00 2001 +From 6923400b777d4ba6f040c4006413bf997326460f Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Fri, 23 Aug 2019 10:19:48 +0800 -Subject: [PATCH 3/4] musl-utils +Subject: [PATCH] musl-utils Provide missing defines which otherwise are available on glibc system headers @@ -161,6 +161,3 @@ index 9b8c09a..1fb5063 100644 /* Name and version of program. */ ARGP_PROGRAM_VERSION_HOOK_DEF = print_version; --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/elfutils/files/0004-Fix-error-on-musl.patch b/poky/meta/recipes-devtools/elfutils/files/0004-Fix-error-on-musl.patch index 1b2f9ed98..481e4b38e 100644 --- a/poky/meta/recipes-devtools/elfutils/files/0004-Fix-error-on-musl.patch +++ b/poky/meta/recipes-devtools/elfutils/files/0004-Fix-error-on-musl.patch @@ -1,7 +1,7 @@ -From 7ff8cbecde7455b530fa7894a78d2326799f2556 Mon Sep 17 00:00:00 2001 +From 48b769ab692c8f02c1ae467229fe3404f662098a Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Wed, 1 May 2019 22:15:03 +0100 -Subject: [PATCH 4/4] Fix error on musl: +Subject: [PATCH] Fix error on musl: | ../../elfutils-0.176/tests/elfstrmerge.c: In function 'main': | ../../elfutils-0.176/tests/elfstrmerge.c:370:60: error: 'ALLPERMS' undeclared (first use in this function); did you mean 'EPERM'? @@ -34,6 +34,3 @@ index ba0d68d..1d2447f 100644 /* The original ELF file. */ static int fd = -1; static Elf *elf = NULL; --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/elfutils/files/0015-config-eu.am-do-not-use-Werror.patch b/poky/meta/recipes-devtools/elfutils/files/0015-config-eu.am-do-not-use-Werror.patch new file mode 100644 index 000000000..c3ae35726 --- /dev/null +++ b/poky/meta/recipes-devtools/elfutils/files/0015-config-eu.am-do-not-use-Werror.patch @@ -0,0 +1,36 @@ +From dce2187dd8f592316357b200ebbe8dbed9ee65cb Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Mon, 22 Jun 2020 21:35:16 +0000 +Subject: [PATCH] config/eu.am: do not use -Werror + +Due to re-definition of error() on musl, gcc starts throwing +errors where none happen with glibc. Since upstream is not +likely to be interested in musl builds, lets just disable +Werror. + +Upstream-Status: Inappropriate [oe core specific] +Signed-off-by: Alexander Kanavin +--- + config/eu.am | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/config/eu.am b/config/eu.am +index 6c3c444..3bc0dc9 100644 +--- a/config/eu.am ++++ b/config/eu.am +@@ -73,7 +73,6 @@ AM_CFLAGS = -std=gnu99 -Wall -Wshadow -Wformat=2 \ + -Wold-style-definition -Wstrict-prototypes -Wtrampolines \ + $(LOGICAL_OP_WARNING) $(DUPLICATED_COND_WARNING) \ + $(NULL_DEREFERENCE_WARNING) $(IMPLICIT_FALLTHROUGH_WARNING) \ +- $(if $($(*F)_no_Werror),,-Werror) \ + $(if $($(*F)_no_Wunused),,-Wunused -Wextra) \ + $(if $($(*F)_no_Wstack_usage),,$(STACK_USAGE_WARNING)) \ + $(if $($(*F)_no_Wpacked_not_aligned),-Wno-packed-not-aligned,) \ +@@ -83,7 +82,6 @@ AM_CXXFLAGS = -std=c++11 -Wall -Wshadow \ + -Wtrampolines \ + $(LOGICAL_OP_WARNING) $(DUPLICATED_COND_WARNING) \ + $(NULL_DEREFERENCE_WARNING) $(IMPLICIT_FALLTHROUGH_WARNING) \ +- $(if $($(*F)_no_Werror),,-Werror) \ + $(if $($(*F)_no_Wunused),,-Wunused -Wextra) \ + $(if $($(*F)_no_Wstack_usage),,$(STACK_USAGE_WARNING)) \ + $(if $($(*F)_no_Wpacked_not_aligned),-Wno-packed-not-aligned,) \ diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/0001-Ignore-differences-between-mips-machine-identifiers.patch b/poky/meta/recipes-devtools/elfutils/files/debian/0001-Ignore-differences-between-mips-machine-identifiers.patch deleted file mode 100644 index a2983c577..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/0001-Ignore-differences-between-mips-machine-identifiers.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 77cb4a53c270d5854d3af24f19547bc3de825233 Mon Sep 17 00:00:00 2001 -From: James Cowgill -Date: Mon, 5 Jan 2015 15:16:58 +0000 -Subject: [PATCH 1/3] Ignore differences between mips machine identifiers - -Little endian binaries actually use EM_MIPS so you can't tell the endianness -from the elf machine id. Also, the EM_MIPS_RS3_LE machine is dead anyway (the -kernel will not load binaries containing it). - -Signed-off-by: James Cowgill - -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia ---- - backends/mips_init.c | 6 +----- - 1 file changed, 1 insertion(+), 5 deletions(-) - -Index: b/backends/mips_init.c -=================================================================== ---- a/backends/mips_init.c -+++ b/backends/mips_init.c -@@ -45,11 +45,7 @@ mips_init (Elf *elf __attribute__ ((unus - return NULL; - - /* We handle it. */ -- if (machine == EM_MIPS) -- eh->name = "MIPS R3000 big-endian"; -- else if (machine == EM_MIPS_RS3_LE) -- eh->name = "MIPS R3000 little-endian"; -- -+ eh->name = "MIPS"; - mips_init_reloc (eh); - HOOK (eh, reloc_simple_type); - HOOK (eh, return_value_location); diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/0001-fix-compile-failure-with-debian-patches.patch b/poky/meta/recipes-devtools/elfutils/files/debian/0001-fix-compile-failure-with-debian-patches.patch deleted file mode 100644 index 19a201506..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/0001-fix-compile-failure-with-debian-patches.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 3f3e7b16934ec58ab47d2bdc9982f54a55b07534 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 16:25:33 +0800 -Subject: [PATCH] fix compile failure with debian patches - -While applying debian patches, there is a compile failure -... -elfutils-0.177/backends/mips_init.c:48:5: error: 'Ebl' {aka 'struct ebl'} has no member named 'name' -... - -Since upstream applied commit [b323391 libdwelf: Add dwelf_elf_e_machine_string -and use it in readelf], it remove 'name' from 'struct ebl' - -Upstream-Status: Pending - -Signed-off-by: Hongxu Jia ---- - backends/mips_init.c | 1 - - backends/parisc_init.c | 1 - - 2 files changed, 2 deletions(-) - -diff --git a/backends/mips_init.c b/backends/mips_init.c -index bce5abe..e1c65c8 100644 ---- a/backends/mips_init.c -+++ b/backends/mips_init.c -@@ -45,7 +45,6 @@ mips_init (Elf *elf __attribute__ ((unused)), - return NULL; - - /* We handle it. */ -- eh->name = "MIPS"; - mips_init_reloc (eh); - HOOK (eh, reloc_simple_type); - HOOK (eh, return_value_location); -diff --git a/backends/parisc_init.c b/backends/parisc_init.c -index f1e401c..97b4a8c 100644 ---- a/backends/parisc_init.c -+++ b/backends/parisc_init.c -@@ -56,7 +56,6 @@ parisc_init (Elf *elf __attribute__ ((unused)), - pa64 = 1; - } - /* We handle it. */ -- eh->name = "PA-RISC"; - parisc_init_reloc (eh); - HOOK (eh, reloc_simple_type); - HOOK (eh, machine_flag_check); --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch b/poky/meta/recipes-devtools/elfutils/files/debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch deleted file mode 100644 index aee00cba1..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/0002-Add-support-for-mips64-abis-in-mips_retval.c.patch +++ /dev/null @@ -1,171 +0,0 @@ -From fdaab18a65ed2529656baa64cb6169f34d7e507b Mon Sep 17 00:00:00 2001 -From: James Cowgill -Date: Mon, 5 Jan 2015 15:17:01 +0000 -Subject: [PATCH 2/3] Add support for mips64 abis in mips_retval.c - -Signed-off-by: James Cowgill - -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia ---- - backends/mips_retval.c | 104 ++++++++++++++++++++++++++++++++++++++++++++----- - 1 file changed, 94 insertions(+), 10 deletions(-) - -diff --git a/backends/mips_retval.c b/backends/mips_retval.c -index 33f12a7..d5c6ef0 100644 ---- a/backends/mips_retval.c -+++ b/backends/mips_retval.c -@@ -91,6 +91,8 @@ enum mips_abi find_mips_abi(Elf *elf) - default: - if ((elf_flags & EF_MIPS_ABI2)) - return MIPS_ABI_N32; -+ else if ((ehdr->e_ident[EI_CLASS] == ELFCLASS64)) -+ return MIPS_ABI_N64; - } - - /* GCC creates a pseudo-section whose name describes the ABI. */ -@@ -195,6 +197,57 @@ static const Dwarf_Op loc_aggregate[] = - }; - #define nloc_aggregate 1 - -+/* Test if a struct member is a float */ -+static int is_float_child(Dwarf_Die *childdie) -+{ -+ /* Test if this is actually a struct member */ -+ if (dwarf_tag(childdie) != DW_TAG_member) -+ return 0; -+ -+ /* Get type of member */ -+ Dwarf_Attribute attr_mem; -+ Dwarf_Die child_type_mem; -+ Dwarf_Die *child_typedie = -+ dwarf_formref_die(dwarf_attr_integrate(childdie, -+ DW_AT_type, -+ &attr_mem), &child_type_mem); -+ -+ if (dwarf_tag(child_typedie) != DW_TAG_base_type) -+ return 0; -+ -+ /* Get base subtype */ -+ Dwarf_Word encoding; -+ if (dwarf_formudata (dwarf_attr_integrate (child_typedie, -+ DW_AT_encoding, -+ &attr_mem), &encoding) != 0) -+ return 0; -+ -+ return encoding == DW_ATE_float; -+} -+ -+/* Returns the number of fpregs which can be returned in the given struct */ -+static int get_struct_fpregs(Dwarf_Die *structtypedie) -+{ -+ Dwarf_Die child_mem; -+ int fpregs = 0; -+ -+ /* Get first structure member */ -+ if (dwarf_child(structtypedie, &child_mem) != 0) -+ return 0; -+ -+ do -+ { -+ /* Ensure this register is a float */ -+ if (!is_float_child(&child_mem)) -+ return 0; -+ -+ fpregs++; -+ } -+ while (dwarf_siblingof (&child_mem, &child_mem) == 0); -+ -+ return fpregs; -+} -+ - int - mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - { -@@ -240,6 +293,7 @@ mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - tag = dwarf_tag (typedie); - } - -+ Dwarf_Word size; - switch (tag) - { - case -1: -@@ -258,8 +312,6 @@ mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - case DW_TAG_enumeration_type: - case DW_TAG_pointer_type: - case DW_TAG_ptr_to_member_type: -- { -- Dwarf_Word size; - if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_byte_size, - &attr_mem), &size) != 0) - { -@@ -289,7 +341,7 @@ mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - if (size <= 4*regsize && abi == MIPS_ABI_O32) - return nloc_fpregquad; - -- goto aggregate; -+ goto large; - } - } - *locp = ABI_LOC(loc_intreg, regsize); -@@ -298,18 +350,50 @@ mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - if (size <= 2*regsize) - return nloc_intregpair; - -- /* Else fall through. Shouldn't happen though (at least with gcc) */ -- } -+ /* Else pass in memory. Shouldn't happen though (at least with gcc) */ -+ goto large; - - case DW_TAG_structure_type: - case DW_TAG_class_type: - case DW_TAG_union_type: -- case DW_TAG_array_type: -- aggregate: -- /* XXX TODO: Can't handle structure return with other ABI's yet :-/ */ -- if ((abi != MIPS_ABI_O32) && (abi != MIPS_ABI_O64)) -- return -2; -+ /* Handle special cases for structures <= 128 bytes in newer ABIs */ -+ if (abi == MIPS_ABI_EABI32 || abi == MIPS_ABI_EABI64 || -+ abi == MIPS_ABI_N32 || abi == MIPS_ABI_N64) -+ { -+ if (dwarf_aggregate_size (typedie, &size) == 0 && size <= 16) -+ { -+ /* -+ * Special case in N64 / N32 - -+ * structures containing only floats are returned in fp regs. -+ * Everything else is returned in integer regs. -+ */ -+ if (tag != DW_TAG_union_type && -+ (abi == MIPS_ABI_N32 || abi == MIPS_ABI_N64)) -+ { -+ int num_fpregs = get_struct_fpregs(typedie); -+ if (num_fpregs == 1 || num_fpregs == 2) -+ { -+ *locp = loc_fpreg; -+ if (num_fpregs == 1) -+ return nloc_fpreg; -+ else -+ return nloc_fpregpair; -+ } -+ } -+ -+ *locp = loc_intreg; -+ if (size <= 8) -+ return nloc_intreg; -+ else -+ return nloc_intregpair; -+ } -+ } -+ -+ /* Fallthrough to handle large types */ - -+ case DW_TAG_array_type: -+ large: -+ /* Return large structures in memory */ - *locp = loc_aggregate; - return nloc_aggregate; - } --- -2.1.4 - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/0003-Add-mips-n64-relocation-format-hack.patch b/poky/meta/recipes-devtools/elfutils/files/debian/0003-Add-mips-n64-relocation-format-hack.patch deleted file mode 100644 index c949e9653..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/0003-Add-mips-n64-relocation-format-hack.patch +++ /dev/null @@ -1,229 +0,0 @@ -From 59d4b8c48e5040af7e02b34eb26ea602ec82a38e Mon Sep 17 00:00:00 2001 -From: James Cowgill -Date: Mon, 5 Jan 2015 15:17:02 +0000 -Subject: [PATCH 3/3] Add mips n64 relocation format hack - -MIPSEL N64 ELF files use a slightly different format for storing relocation -entries which is incompatible with the normal R_SYM / R_INFO macros. -To workaround this, we rearrange the bytes in the relocation's r_info field -when reading and writing the relocations. - -This patch also ensures that strip.c sets the correct value of e_machine -before manipulating relocations so that these changes take effect. - -Signed-off-by: James Cowgill - -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia ---- - libelf/gelf_getrel.c | 25 +++++++++++++++++++++++-- - libelf/gelf_getrela.c | 25 +++++++++++++++++++++++-- - libelf/gelf_update_rel.c | 20 +++++++++++++++++++- - libelf/gelf_update_rela.c | 20 +++++++++++++++++++- - src/strip.c | 17 +++++++++++++++++ - 5 files changed, 101 insertions(+), 6 deletions(-) - -Index: elfutils-0.175/libelf/gelf_getrel.c -=================================================================== ---- elfutils-0.175.orig/libelf/gelf_getrel.c -+++ elfutils-0.175/libelf/gelf_getrel.c -@@ -36,6 +36,7 @@ - - #include "libelfP.h" - -+#define EF_MIPS_ABI 0x0000F000 - - GElf_Rel * - gelf_getrel (Elf_Data *data, int ndx, GElf_Rel *dst) -@@ -89,8 +90,28 @@ gelf_getrel (Elf_Data *data, int ndx, GE - result = NULL; - } - else -- result = memcpy (dst, &((Elf64_Rel *) data_scn->d.d_buf)[ndx], -- sizeof (Elf64_Rel)); -+ { -+ GElf_Ehdr hdr; -+ result = memcpy (dst, &((Elf64_Rel *) data_scn->d.d_buf)[ndx], -+ sizeof (Elf64_Rel)); -+ -+ if (gelf_getehdr(scn->elf, &hdr) != NULL && -+ hdr.e_ident[EI_DATA] == ELFDATA2LSB && -+ hdr.e_machine == EM_MIPS && -+ (hdr.e_flags & EF_MIPS_ABI) == 0) -+ { -+ /* -+ * The relocation format is mangled on MIPSEL N64 -+ * We'll adjust it so at least R_SYM will work on it -+ */ -+ GElf_Xword r_info = dst->r_info; -+ dst->r_info = (r_info << 32) | -+ ((r_info >> 8) & 0xFF000000) | -+ ((r_info >> 24) & 0x00FF0000) | -+ ((r_info >> 40) & 0x0000FF00) | -+ ((r_info >> 56) & 0x000000FF); -+ } -+ } - } - - rwlock_unlock (scn->elf->lock); -Index: elfutils-0.175/libelf/gelf_getrela.c -=================================================================== ---- elfutils-0.175.orig/libelf/gelf_getrela.c -+++ elfutils-0.175/libelf/gelf_getrela.c -@@ -36,6 +36,7 @@ - - #include "libelfP.h" - -+#define EF_MIPS_ABI 0x0000F000 - - GElf_Rela * - gelf_getrela (Elf_Data *data, int ndx, GElf_Rela *dst) -@@ -90,8 +91,28 @@ gelf_getrela (Elf_Data *data, int ndx, G - result = NULL; - } - else -- result = memcpy (dst, &((Elf64_Rela *) data_scn->d.d_buf)[ndx], -- sizeof (Elf64_Rela)); -+ { -+ GElf_Ehdr hdr; -+ result = memcpy (dst, &((Elf64_Rela *) data_scn->d.d_buf)[ndx], -+ sizeof (Elf64_Rela)); -+ -+ if (gelf_getehdr(scn->elf, &hdr) != NULL && -+ hdr.e_ident[EI_DATA] == ELFDATA2LSB && -+ hdr.e_machine == EM_MIPS && -+ (hdr.e_flags & EF_MIPS_ABI) == 0) -+ { -+ /* -+ * The relocation format is mangled on MIPSEL N64 -+ * We'll adjust it so at least R_SYM will work on it -+ */ -+ GElf_Xword r_info = dst->r_info; -+ dst->r_info = (r_info << 32) | -+ ((r_info >> 8) & 0xFF000000) | -+ ((r_info >> 24) & 0x00FF0000) | -+ ((r_info >> 40) & 0x0000FF00) | -+ ((r_info >> 56) & 0x000000FF); -+ } -+ } - } - - rwlock_unlock (scn->elf->lock); -Index: elfutils-0.175/libelf/gelf_update_rel.c -=================================================================== ---- elfutils-0.175.orig/libelf/gelf_update_rel.c -+++ elfutils-0.175/libelf/gelf_update_rel.c -@@ -36,6 +36,7 @@ - - #include "libelfP.h" - -+#define EF_MIPS_ABI 0x0000F000 - - int - gelf_update_rel (Elf_Data *dst, int ndx, GElf_Rel *src) -@@ -86,6 +87,9 @@ gelf_update_rel (Elf_Data *dst, int ndx, - } - else - { -+ GElf_Ehdr hdr; -+ GElf_Rel value = *src; -+ - /* Check whether we have to resize the data buffer. */ - if (INVALID_NDX (ndx, Elf64_Rel, &data_scn->d)) - { -@@ -93,7 +97,21 @@ gelf_update_rel (Elf_Data *dst, int ndx, - goto out; - } - -- ((Elf64_Rel *) data_scn->d.d_buf)[ndx] = *src; -+ if (gelf_getehdr(scn->elf, &hdr) != NULL && -+ hdr.e_ident[EI_DATA] == ELFDATA2LSB && -+ hdr.e_machine == EM_MIPS && -+ (hdr.e_flags & EF_MIPS_ABI) == 0) -+ { -+ /* Undo the MIPSEL N64 hack from gelf_getrel */ -+ GElf_Xword r_info = value.r_info; -+ value.r_info = (r_info >> 32) | -+ ((r_info << 8) & 0x000000FF00000000) | -+ ((r_info << 24) & 0x0000FF0000000000) | -+ ((r_info << 40) & 0x00FF000000000000) | -+ ((r_info << 56) & 0xFF00000000000000); -+ } -+ -+ ((Elf64_Rel *) data_scn->d.d_buf)[ndx] = value; - } - - result = 1; -Index: elfutils-0.175/libelf/gelf_update_rela.c -=================================================================== ---- elfutils-0.175.orig/libelf/gelf_update_rela.c -+++ elfutils-0.175/libelf/gelf_update_rela.c -@@ -36,6 +36,7 @@ - - #include "libelfP.h" - -+#define EF_MIPS_ABI 0x0000F000 - - int - gelf_update_rela (Elf_Data *dst, int ndx, GElf_Rela *src) -@@ -89,6 +90,9 @@ gelf_update_rela (Elf_Data *dst, int ndx - } - else - { -+ GElf_Ehdr hdr; -+ GElf_Rela value = *src; -+ - /* Check whether we have to resize the data buffer. */ - if (INVALID_NDX (ndx, Elf64_Rela, &data_scn->d)) - { -@@ -96,7 +100,21 @@ gelf_update_rela (Elf_Data *dst, int ndx - goto out; - } - -- ((Elf64_Rela *) data_scn->d.d_buf)[ndx] = *src; -+ if (gelf_getehdr(scn->elf, &hdr) != NULL && -+ hdr.e_ident[EI_DATA] == ELFDATA2LSB && -+ hdr.e_machine == EM_MIPS && -+ (hdr.e_flags & EF_MIPS_ABI) == 0) -+ { -+ /* Undo the MIPSEL N64 hack from gelf_getrel */ -+ GElf_Xword r_info = value.r_info; -+ value.r_info = (r_info >> 32) | -+ ((r_info << 8) & 0x000000FF00000000) | -+ ((r_info << 24) & 0x0000FF0000000000) | -+ ((r_info << 40) & 0x00FF000000000000) | -+ ((r_info << 56) & 0xFF00000000000000); -+ } -+ -+ ((Elf64_Rela *) data_scn->d.d_buf)[ndx] = value; - } - - result = 1; -Index: elfutils-0.175/src/strip.c -=================================================================== ---- elfutils-0.175.orig/src/strip.c -+++ elfutils-0.175/src/strip.c -@@ -1062,6 +1062,23 @@ handle_elf (int fd, Elf *elf, const char - goto fail; - } - -+ /* Copy identity part of the ELF header now */ -+ newehdr = gelf_getehdr (newelf, &newehdr_mem); -+ if (newehdr == NULL) -+ INTERNAL_ERROR (fname); -+ -+ memcpy (newehdr->e_ident, ehdr->e_ident, EI_NIDENT); -+ newehdr->e_type = ehdr->e_type; -+ newehdr->e_machine = ehdr->e_machine; -+ newehdr->e_version = ehdr->e_version; -+ -+ if (gelf_update_ehdr (newelf, newehdr) == 0) -+ { -+ error (0, 0, gettext ("%s: error while creating ELF header: %s"), -+ fname, elf_errmsg (-1)); -+ return 1; -+ } -+ - /* Copy over the old program header if needed. */ - if (phnum > 0) - { diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/arm_backend.diff b/poky/meta/recipes-devtools/elfutils/files/debian/arm_backend.diff deleted file mode 100644 index ba0ce33bd..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/arm_backend.diff +++ /dev/null @@ -1,624 +0,0 @@ -From 4e6fededb3d8c90694c44214c862ac216a69ecae Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 16:50:33 +0800 -Subject: [PATCH] arm_backend - -Upstream-Status: Pending [from debian] -Rebase to 0.177 -Signed-off-by: Hongxu Jia ---- - backends/arm_init.c | 18 ++++- - backends/arm_regs.c | 132 ++++++++++++++++++++++++++++++++++++ - backends/arm_retval.c | 43 +++++++++++- - backends/libebl_arm.h | 9 +++ - libelf/elf.h | 11 +++ - tests/run-addrcfi.sh | 93 ++++++++++++++++++++++++- - tests/run-allregs.sh | 95 +++++++++++++++++++++++++- - tests/run-readelf-mixed-corenote.sh | 11 ++- - 8 files changed, 400 insertions(+), 12 deletions(-) - create mode 100644 backends/libebl_arm.h - -diff --git a/backends/arm_init.c b/backends/arm_init.c -index af023f0..ea2bcb7 100644 ---- a/backends/arm_init.c -+++ b/backends/arm_init.c -@@ -35,20 +35,31 @@ - #define RELOC_PREFIX R_ARM_ - #include "libebl_CPU.h" - -+#include "libebl_arm.h" -+ - /* This defines the common reloc hooks based on arm_reloc.def. */ - #include "common-reloc.c" - - - const char * --arm_init (Elf *elf __attribute__ ((unused)), -+arm_init (Elf *elf, - GElf_Half machine __attribute__ ((unused)), - Ebl *eh, - size_t ehlen) - { -+ int soft_float = 0; -+ - /* Check whether the Elf_BH object has a sufficent size. */ - if (ehlen < sizeof (Ebl)) - return NULL; - -+ if (elf) { -+ GElf_Ehdr ehdr_mem; -+ GElf_Ehdr *ehdr = gelf_getehdr (elf, &ehdr_mem); -+ if (ehdr && (ehdr->e_flags & EF_ARM_SOFT_FLOAT)) -+ soft_float = 1; -+ } -+ - /* We handle it. */ - arm_init_reloc (eh); - HOOK (eh, segment_type_name); -@@ -59,7 +70,10 @@ arm_init (Elf *elf __attribute__ ((unused)), - HOOK (eh, core_note); - HOOK (eh, auxv_info); - HOOK (eh, check_object_attribute); -- HOOK (eh, return_value_location); -+ if (soft_float) -+ eh->return_value_location = arm_return_value_location_soft; -+ else -+ eh->return_value_location = arm_return_value_location_hard; - HOOK (eh, abi_cfi); - HOOK (eh, check_reloc_target_type); - HOOK (eh, symbol_type_name); -diff --git a/backends/arm_regs.c b/backends/arm_regs.c -index a46a4c9..418c931 100644 ---- a/backends/arm_regs.c -+++ b/backends/arm_regs.c -@@ -31,6 +31,7 @@ - #endif - - #include -+#include - #include - - #define BACKEND arm_ -@@ -76,6 +77,9 @@ arm_register_info (Ebl *ebl __attribute__ ((unused)), - break; - - case 16 + 0 ... 16 + 7: -+ /* AADWARF says that there are no registers in that range, -+ * but gcc maps FPA registers here -+ */ - regno += 96 - 16; - FALLTHROUGH; - case 96 + 0 ... 96 + 7: -@@ -87,11 +91,139 @@ arm_register_info (Ebl *ebl __attribute__ ((unused)), - namelen = 2; - break; - -+ case 64 + 0 ... 64 + 9: -+ *setname = "VFP"; -+ *bits = 32; -+ *type = DW_ATE_float; -+ name[0] = 's'; -+ name[1] = regno - 64 + '0'; -+ namelen = 2; -+ break; -+ -+ case 64 + 10 ... 64 + 31: -+ *setname = "VFP"; -+ *bits = 32; -+ *type = DW_ATE_float; -+ name[0] = 's'; -+ name[1] = (regno - 64) / 10 + '0'; -+ name[2] = (regno - 64) % 10 + '0'; -+ namelen = 3; -+ break; -+ -+ case 104 + 0 ... 104 + 7: -+ /* XXX TODO: -+ * This can be either intel wireless MMX general purpose/control -+ * registers or xscale accumulator, which have different usage. -+ * We only have the intel wireless MMX here now. -+ * The name needs to be changed for the xscale accumulator too. */ -+ *setname = "MMX"; -+ *type = DW_ATE_unsigned; -+ *bits = 32; -+ memcpy(name, "wcgr", 4); -+ name[4] = regno - 104 + '0'; -+ namelen = 5; -+ break; -+ -+ case 112 + 0 ... 112 + 9: -+ *setname = "MMX"; -+ *type = DW_ATE_unsigned; -+ *bits = 64; -+ name[0] = 'w'; -+ name[1] = 'r'; -+ name[2] = regno - 112 + '0'; -+ namelen = 3; -+ break; -+ -+ case 112 + 10 ... 112 + 15: -+ *setname = "MMX"; -+ *type = DW_ATE_unsigned; -+ *bits = 64; -+ name[0] = 'w'; -+ name[1] = 'r'; -+ name[2] = '1'; -+ name[3] = regno - 112 - 10 + '0'; -+ namelen = 4; -+ break; -+ - case 128: -+ *setname = "state"; - *type = DW_ATE_unsigned; - return stpcpy (name, "spsr") + 1 - name; - -+ case 129: -+ *setname = "state"; -+ *type = DW_ATE_unsigned; -+ return stpcpy(name, "spsr_fiq") + 1 - name; -+ -+ case 130: -+ *setname = "state"; -+ *type = DW_ATE_unsigned; -+ return stpcpy(name, "spsr_irq") + 1 - name; -+ -+ case 131: -+ *setname = "state"; -+ *type = DW_ATE_unsigned; -+ return stpcpy(name, "spsr_abt") + 1 - name; -+ -+ case 132: -+ *setname = "state"; -+ *type = DW_ATE_unsigned; -+ return stpcpy(name, "spsr_und") + 1 - name; -+ -+ case 133: -+ *setname = "state"; -+ *type = DW_ATE_unsigned; -+ return stpcpy(name, "spsr_svc") + 1 - name; -+ -+ case 144 ... 150: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_usr", regno - 144 + 8) + 1; -+ -+ case 151 ... 157: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_fiq", regno - 151 + 8) + 1; -+ -+ case 158 ... 159: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_irq", regno - 158 + 13) + 1; -+ -+ case 160 ... 161: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_abt", regno - 160 + 13) + 1; -+ -+ case 162 ... 163: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_und", regno - 162 + 13) + 1; -+ -+ case 164 ... 165: -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ return sprintf(name, "r%d_svc", regno - 164 + 13) + 1; -+ -+ case 192 ... 199: -+ *setname = "MMX"; -+ *bits = 32; -+ *type = DW_ATE_unsigned; -+ name[0] = 'w'; -+ name[1] = 'c'; -+ name[2] = regno - 192 + '0'; -+ namelen = 3; -+ break; -+ - case 256 + 0 ... 256 + 9: -+ /* XXX TODO: Neon also uses those registers and can contain -+ * both float and integers */ - *setname = "VFP"; - *type = DW_ATE_float; - *bits = 64; -diff --git a/backends/arm_retval.c b/backends/arm_retval.c -index 1c28f01..313e4eb 100644 ---- a/backends/arm_retval.c -+++ b/backends/arm_retval.c -@@ -48,6 +48,13 @@ static const Dwarf_Op loc_intreg[] = - #define nloc_intreg 1 - #define nloc_intregs(n) (2 * (n)) - -+/* f1 */ /* XXX TODO: f0 can also have number 96 if program was compiled with -mabi=aapcs */ -+static const Dwarf_Op loc_fpreg[] = -+ { -+ { .atom = DW_OP_reg16 }, -+ }; -+#define nloc_fpreg 1 -+ - /* The return value is a structure and is actually stored in stack space - passed in a hidden argument by the caller. But, the compiler - helpfully returns the address of that space in r0. */ -@@ -58,8 +65,9 @@ static const Dwarf_Op loc_aggregate[] = - #define nloc_aggregate 1 - - --int --arm_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+static int -+arm_return_value_location_ (Dwarf_Die *functypedie, const Dwarf_Op **locp, -+ int soft_float) - { - /* Start with the function's type, and get the DW_AT_type attribute, - which is the type of the return value. */ -@@ -98,6 +106,21 @@ arm_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - else - return -1; - } -+ if (tag == DW_TAG_base_type) -+ { -+ Dwarf_Word encoding; -+ if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_encoding, -+ &attr_mem), &encoding) != 0) -+ return -1; -+ -+ if ((encoding == DW_ATE_float) && !soft_float) -+ { -+ *locp = loc_fpreg; -+ if (size <= 8) -+ return nloc_fpreg; -+ goto aggregate; -+ } -+ } - if (size <= 16) - { - intreg: -@@ -106,6 +129,7 @@ arm_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - } - - aggregate: -+ /* XXX TODO sometimes aggregates are returned in r0 (-mabi=aapcs) */ - *locp = loc_aggregate; - return nloc_aggregate; - } -@@ -125,3 +149,18 @@ arm_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) - DWARF and might be valid. */ - return -2; - } -+ -+/* return location for -mabi=apcs-gnu -msoft-float */ -+int -+arm_return_value_location_soft (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+{ -+ return arm_return_value_location_ (functypedie, locp, 1); -+} -+ -+/* return location for -mabi=apcs-gnu -mhard-float (current default) */ -+int -+arm_return_value_location_hard (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+{ -+ return arm_return_value_location_ (functypedie, locp, 0); -+} -+ -diff --git a/backends/libebl_arm.h b/backends/libebl_arm.h -new file mode 100644 -index 0000000..c00770c ---- /dev/null -+++ b/backends/libebl_arm.h -@@ -0,0 +1,9 @@ -+#ifndef _LIBEBL_ARM_H -+#define _LIBEBL_ARM_H 1 -+ -+#include -+ -+extern int arm_return_value_location_soft(Dwarf_Die *, const Dwarf_Op **locp); -+extern int arm_return_value_location_hard(Dwarf_Die *, const Dwarf_Op **locp); -+ -+#endif -diff --git a/libelf/elf.h b/libelf/elf.h -index 01648bd..05b7e7e 100644 ---- a/libelf/elf.h -+++ b/libelf/elf.h -@@ -2690,6 +2690,9 @@ enum - #define EF_ARM_EABI_VER4 0x04000000 - #define EF_ARM_EABI_VER5 0x05000000 - -+/* EI_OSABI values */ -+#define ELFOSABI_ARM_AEABI 64 /* Contains symbol versioning. */ -+ - /* Additional symbol types for Thumb. */ - #define STT_ARM_TFUNC STT_LOPROC /* A Thumb function. */ - #define STT_ARM_16BIT STT_HIPROC /* A Thumb label. */ -@@ -2707,12 +2710,19 @@ enum - - /* Processor specific values for the Phdr p_type field. */ - #define PT_ARM_EXIDX (PT_LOPROC + 1) /* ARM unwind segment. */ -+#define PT_ARM_UNWIND PT_ARM_EXIDX - - /* Processor specific values for the Shdr sh_type field. */ - #define SHT_ARM_EXIDX (SHT_LOPROC + 1) /* ARM unwind section. */ - #define SHT_ARM_PREEMPTMAP (SHT_LOPROC + 2) /* Preemption details. */ - #define SHT_ARM_ATTRIBUTES (SHT_LOPROC + 3) /* ARM attributes section. */ - -+/* Processor specific values for the Dyn d_tag field. */ -+#define DT_ARM_RESERVED1 (DT_LOPROC + 0) -+#define DT_ARM_SYMTABSZ (DT_LOPROC + 1) -+#define DT_ARM_PREEMTMAB (DT_LOPROC + 2) -+#define DT_ARM_RESERVED2 (DT_LOPROC + 3) -+#define DT_ARM_NUM 4 - - /* AArch64 relocs. */ - -@@ -3005,6 +3015,7 @@ enum - TLS block (LDR, STR). */ - #define R_ARM_TLS_IE12GP 111 /* 12 bit GOT entry relative - to GOT origin (LDR). */ -+/* 112 - 127 private range */ - #define R_ARM_ME_TOO 128 /* Obsolete. */ - #define R_ARM_THM_TLS_DESCSEQ 129 - #define R_ARM_THM_TLS_DESCSEQ16 129 -diff --git a/tests/run-addrcfi.sh b/tests/run-addrcfi.sh -index 64fa24d..1c2aa4d 100755 ---- a/tests/run-addrcfi.sh -+++ b/tests/run-addrcfi.sh -@@ -3554,6 +3554,38 @@ dwarf_cfi_addrframe (.eh_frame): no matching address range - FPA reg21 (f5): undefined - FPA reg22 (f6): undefined - FPA reg23 (f7): undefined -+ VFP reg64 (s0): undefined -+ VFP reg65 (s1): undefined -+ VFP reg66 (s2): undefined -+ VFP reg67 (s3): undefined -+ VFP reg68 (s4): undefined -+ VFP reg69 (s5): undefined -+ VFP reg70 (s6): undefined -+ VFP reg71 (s7): undefined -+ VFP reg72 (s8): undefined -+ VFP reg73 (s9): undefined -+ VFP reg74 (s10): undefined -+ VFP reg75 (s11): undefined -+ VFP reg76 (s12): undefined -+ VFP reg77 (s13): undefined -+ VFP reg78 (s14): undefined -+ VFP reg79 (s15): undefined -+ VFP reg80 (s16): undefined -+ VFP reg81 (s17): undefined -+ VFP reg82 (s18): undefined -+ VFP reg83 (s19): undefined -+ VFP reg84 (s20): undefined -+ VFP reg85 (s21): undefined -+ VFP reg86 (s22): undefined -+ VFP reg87 (s23): undefined -+ VFP reg88 (s24): undefined -+ VFP reg89 (s25): undefined -+ VFP reg90 (s26): undefined -+ VFP reg91 (s27): undefined -+ VFP reg92 (s28): undefined -+ VFP reg93 (s29): undefined -+ VFP reg94 (s30): undefined -+ VFP reg95 (s31): undefined - FPA reg96 (f0): undefined - FPA reg97 (f1): undefined - FPA reg98 (f2): undefined -@@ -3562,7 +3594,66 @@ dwarf_cfi_addrframe (.eh_frame): no matching address range - FPA reg101 (f5): undefined - FPA reg102 (f6): undefined - FPA reg103 (f7): undefined -- integer reg128 (spsr): undefined -+ MMX reg104 (wcgr0): undefined -+ MMX reg105 (wcgr1): undefined -+ MMX reg106 (wcgr2): undefined -+ MMX reg107 (wcgr3): undefined -+ MMX reg108 (wcgr4): undefined -+ MMX reg109 (wcgr5): undefined -+ MMX reg110 (wcgr6): undefined -+ MMX reg111 (wcgr7): undefined -+ MMX reg112 (wr0): undefined -+ MMX reg113 (wr1): undefined -+ MMX reg114 (wr2): undefined -+ MMX reg115 (wr3): undefined -+ MMX reg116 (wr4): undefined -+ MMX reg117 (wr5): undefined -+ MMX reg118 (wr6): undefined -+ MMX reg119 (wr7): undefined -+ MMX reg120 (wr8): undefined -+ MMX reg121 (wr9): undefined -+ MMX reg122 (wr10): undefined -+ MMX reg123 (wr11): undefined -+ MMX reg124 (wr12): undefined -+ MMX reg125 (wr13): undefined -+ MMX reg126 (wr14): undefined -+ MMX reg127 (wr15): undefined -+ state reg128 (spsr): undefined -+ state reg129 (spsr_fiq): undefined -+ state reg130 (spsr_irq): undefined -+ state reg131 (spsr_abt): undefined -+ state reg132 (spsr_und): undefined -+ state reg133 (spsr_svc): undefined -+ integer reg144 (r8_usr): undefined -+ integer reg145 (r9_usr): undefined -+ integer reg146 (r10_usr): undefined -+ integer reg147 (r11_usr): undefined -+ integer reg148 (r12_usr): undefined -+ integer reg149 (r13_usr): undefined -+ integer reg150 (r14_usr): undefined -+ integer reg151 (r8_fiq): undefined -+ integer reg152 (r9_fiq): undefined -+ integer reg153 (r10_fiq): undefined -+ integer reg154 (r11_fiq): undefined -+ integer reg155 (r12_fiq): undefined -+ integer reg156 (r13_fiq): undefined -+ integer reg157 (r14_fiq): undefined -+ integer reg158 (r13_irq): undefined -+ integer reg159 (r14_irq): undefined -+ integer reg160 (r13_abt): undefined -+ integer reg161 (r14_abt): undefined -+ integer reg162 (r13_und): undefined -+ integer reg163 (r14_und): undefined -+ integer reg164 (r13_svc): undefined -+ integer reg165 (r14_svc): undefined -+ MMX reg192 (wc0): undefined -+ MMX reg193 (wc1): undefined -+ MMX reg194 (wc2): undefined -+ MMX reg195 (wc3): undefined -+ MMX reg196 (wc4): undefined -+ MMX reg197 (wc5): undefined -+ MMX reg198 (wc6): undefined -+ MMX reg199 (wc7): undefined - VFP reg256 (d0): undefined - VFP reg257 (d1): undefined - VFP reg258 (d2): undefined -diff --git a/tests/run-allregs.sh b/tests/run-allregs.sh -index 1422bd6..dc0fc99 100755 ---- a/tests/run-allregs.sh -+++ b/tests/run-allregs.sh -@@ -2672,7 +2672,28 @@ integer registers: - 13: sp (sp), address 32 bits - 14: lr (lr), address 32 bits - 15: pc (pc), address 32 bits -- 128: spsr (spsr), unsigned 32 bits -+ 144: r8_usr (r8_usr), signed 32 bits -+ 145: r9_usr (r9_usr), signed 32 bits -+ 146: r10_usr (r10_usr), signed 32 bits -+ 147: r11_usr (r11_usr), signed 32 bits -+ 148: r12_usr (r12_usr), signed 32 bits -+ 149: r13_usr (r13_usr), signed 32 bits -+ 150: r14_usr (r14_usr), signed 32 bits -+ 151: r8_fiq (r8_fiq), signed 32 bits -+ 152: r9_fiq (r9_fiq), signed 32 bits -+ 153: r10_fiq (r10_fiq), signed 32 bits -+ 154: r11_fiq (r11_fiq), signed 32 bits -+ 155: r12_fiq (r12_fiq), signed 32 bits -+ 156: r13_fiq (r13_fiq), signed 32 bits -+ 157: r14_fiq (r14_fiq), signed 32 bits -+ 158: r13_irq (r13_irq), signed 32 bits -+ 159: r14_irq (r14_irq), signed 32 bits -+ 160: r13_abt (r13_abt), signed 32 bits -+ 161: r14_abt (r14_abt), signed 32 bits -+ 162: r13_und (r13_und), signed 32 bits -+ 163: r14_und (r14_und), signed 32 bits -+ 164: r13_svc (r13_svc), signed 32 bits -+ 165: r14_svc (r14_svc), signed 32 bits - FPA registers: - 16: f0 (f0), float 96 bits - 17: f1 (f1), float 96 bits -@@ -2690,7 +2711,72 @@ FPA registers: - 101: f5 (f5), float 96 bits - 102: f6 (f6), float 96 bits - 103: f7 (f7), float 96 bits -+MMX registers: -+ 104: wcgr0 (wcgr0), unsigned 32 bits -+ 105: wcgr1 (wcgr1), unsigned 32 bits -+ 106: wcgr2 (wcgr2), unsigned 32 bits -+ 107: wcgr3 (wcgr3), unsigned 32 bits -+ 108: wcgr4 (wcgr4), unsigned 32 bits -+ 109: wcgr5 (wcgr5), unsigned 32 bits -+ 110: wcgr6 (wcgr6), unsigned 32 bits -+ 111: wcgr7 (wcgr7), unsigned 32 bits -+ 112: wr0 (wr0), unsigned 64 bits -+ 113: wr1 (wr1), unsigned 64 bits -+ 114: wr2 (wr2), unsigned 64 bits -+ 115: wr3 (wr3), unsigned 64 bits -+ 116: wr4 (wr4), unsigned 64 bits -+ 117: wr5 (wr5), unsigned 64 bits -+ 118: wr6 (wr6), unsigned 64 bits -+ 119: wr7 (wr7), unsigned 64 bits -+ 120: wr8 (wr8), unsigned 64 bits -+ 121: wr9 (wr9), unsigned 64 bits -+ 122: wr10 (wr10), unsigned 64 bits -+ 123: wr11 (wr11), unsigned 64 bits -+ 124: wr12 (wr12), unsigned 64 bits -+ 125: wr13 (wr13), unsigned 64 bits -+ 126: wr14 (wr14), unsigned 64 bits -+ 127: wr15 (wr15), unsigned 64 bits -+ 192: wc0 (wc0), unsigned 32 bits -+ 193: wc1 (wc1), unsigned 32 bits -+ 194: wc2 (wc2), unsigned 32 bits -+ 195: wc3 (wc3), unsigned 32 bits -+ 196: wc4 (wc4), unsigned 32 bits -+ 197: wc5 (wc5), unsigned 32 bits -+ 198: wc6 (wc6), unsigned 32 bits -+ 199: wc7 (wc7), unsigned 32 bits - VFP registers: -+ 64: s0 (s0), float 32 bits -+ 65: s1 (s1), float 32 bits -+ 66: s2 (s2), float 32 bits -+ 67: s3 (s3), float 32 bits -+ 68: s4 (s4), float 32 bits -+ 69: s5 (s5), float 32 bits -+ 70: s6 (s6), float 32 bits -+ 71: s7 (s7), float 32 bits -+ 72: s8 (s8), float 32 bits -+ 73: s9 (s9), float 32 bits -+ 74: s10 (s10), float 32 bits -+ 75: s11 (s11), float 32 bits -+ 76: s12 (s12), float 32 bits -+ 77: s13 (s13), float 32 bits -+ 78: s14 (s14), float 32 bits -+ 79: s15 (s15), float 32 bits -+ 80: s16 (s16), float 32 bits -+ 81: s17 (s17), float 32 bits -+ 82: s18 (s18), float 32 bits -+ 83: s19 (s19), float 32 bits -+ 84: s20 (s20), float 32 bits -+ 85: s21 (s21), float 32 bits -+ 86: s22 (s22), float 32 bits -+ 87: s23 (s23), float 32 bits -+ 88: s24 (s24), float 32 bits -+ 89: s25 (s25), float 32 bits -+ 90: s26 (s26), float 32 bits -+ 91: s27 (s27), float 32 bits -+ 92: s28 (s28), float 32 bits -+ 93: s29 (s29), float 32 bits -+ 94: s30 (s30), float 32 bits -+ 95: s31 (s31), float 32 bits - 256: d0 (d0), float 64 bits - 257: d1 (d1), float 64 bits - 258: d2 (d2), float 64 bits -@@ -2723,6 +2809,13 @@ VFP registers: - 285: d29 (d29), float 64 bits - 286: d30 (d30), float 64 bits - 287: d31 (d31), float 64 bits -+state registers: -+ 128: spsr (spsr), unsigned 32 bits -+ 129: spsr_fiq (spsr_fiq), unsigned 32 bits -+ 130: spsr_irq (spsr_irq), unsigned 32 bits -+ 131: spsr_abt (spsr_abt), unsigned 32 bits -+ 132: spsr_und (spsr_und), unsigned 32 bits -+ 133: spsr_svc (spsr_svc), unsigned 32 bits - EOF - - # See run-readelf-mixed-corenote.sh for instructions to regenerate -diff --git a/tests/run-readelf-mixed-corenote.sh b/tests/run-readelf-mixed-corenote.sh -index c960f1d..e4bf074 100755 ---- a/tests/run-readelf-mixed-corenote.sh -+++ b/tests/run-readelf-mixed-corenote.sh -@@ -31,12 +31,11 @@ Note segment of 892 bytes at offset 0x274: - pid: 11087, ppid: 11063, pgrp: 11087, sid: 11063 - utime: 0.000000, stime: 0.010000, cutime: 0.000000, cstime: 0.000000 - orig_r0: -1, fpvalid: 1 -- r0: 1 r1: -1091672508 r2: -1091672500 -- r3: 0 r4: 0 r5: 0 -- r6: 33728 r7: 0 r8: 0 -- r9: 0 r10: -1225703496 r11: -1091672844 -- r12: 0 sp: 0xbeee64f4 lr: 0xb6dc3f48 -- pc: 0x00008500 spsr: 0x60000010 -+ r0: 1 r1: -1091672508 r2: -1091672500 r3: 0 -+ r4: 0 r5: 0 r6: 33728 r7: 0 -+ r8: 0 r9: 0 r10: -1225703496 r11: -1091672844 -+ r12: 0 sp: 0xbeee64f4 lr: 0xb6dc3f48 pc: 0x00008500 -+ spsr: 0x60000010 - CORE 124 PRPSINFO - state: 0, sname: R, zomb: 0, nice: 0, flag: 0x00400500 - uid: 0, gid: 0, pid: 11087, ppid: 11063, pgrp: 11087, sid: 11063 --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/disable_werror.patch b/poky/meta/recipes-devtools/elfutils/files/debian/disable_werror.patch deleted file mode 100644 index bd98dae4b..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/disable_werror.patch +++ /dev/null @@ -1,20 +0,0 @@ -From: Helmut Grohne -Subject: disable -Werror as it tends to break with new gcc versions -Bug-Debian: https://bugs.debian.org/886004 -Last-Update: 2018-01-01 - -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia - -Index: elfutils-0.176/config/eu.am -=================================================================== ---- elfutils-0.176.orig/config/eu.am -+++ elfutils-0.176/config/eu.am -@@ -73,7 +73,6 @@ AM_CFLAGS = -std=gnu99 -Wall -Wshadow -W - -Wold-style-definition -Wstrict-prototypes -Wtrampolines \ - $(LOGICAL_OP_WARNING) $(DUPLICATED_COND_WARNING) \ - $(NULL_DEREFERENCE_WARNING) $(IMPLICIT_FALLTHROUGH_WARNING) \ -- $(if $($(*F)_no_Werror),,-Werror) \ - $(if $($(*F)_no_Wunused),,-Wunused -Wextra) \ - $(if $($(*F)_no_Wstack_usage),,$(STACK_USAGE_WARNING)) \ - $(if $($(*F)_no_Wpacked_not_aligned),-Wno-packed-not-aligned,) \ diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/hppa_backend.diff b/poky/meta/recipes-devtools/elfutils/files/debian/hppa_backend.diff deleted file mode 100644 index 53fa2f435..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/hppa_backend.diff +++ /dev/null @@ -1,828 +0,0 @@ -From ffb811e18d7046d5bbe54ede5b1b7e14eaac0146 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 15:44:18 +0800 -Subject: [PATCH] hppa backend - -Rebase to 0.177 -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia - ---- - backends/Makefile.am | 7 +- - backends/libebl_parisc.h | 9 ++ - backends/parisc_init.c | 73 +++++++++++++ - backends/parisc_regs.c | 159 ++++++++++++++++++++++++++++ - backends/parisc_reloc.def | 128 +++++++++++++++++++++++ - backends/parisc_retval.c | 213 ++++++++++++++++++++++++++++++++++++++ - backends/parisc_symbol.c | 113 ++++++++++++++++++++ - libelf/elf.h | 11 ++ - 8 files changed, 711 insertions(+), 2 deletions(-) - create mode 100644 backends/libebl_parisc.h - create mode 100644 backends/parisc_init.c - create mode 100644 backends/parisc_regs.c - create mode 100644 backends/parisc_reloc.def - create mode 100644 backends/parisc_retval.c - create mode 100644 backends/parisc_symbol.c - -diff --git a/backends/Makefile.am b/backends/Makefile.am -index f405212..4755f61 100644 ---- a/backends/Makefile.am -+++ b/backends/Makefile.am -@@ -37,7 +37,9 @@ AM_CPPFLAGS += -I$(top_srcdir)/libebl -I$(top_srcdir)/libasm \ - noinst_LIBRARIES = libebl_backends.a libebl_backends_pic.a - - modules = i386 sh x86_64 ia64 alpha arm aarch64 sparc ppc ppc64 s390 \ -- tilegx m68k bpf riscv csky -+ tilegx m68k bpf riscv csky parisc -+ -+parisc_SRCS = parisc_init.c parisc_symbol.c parisc_regs.c parisc_retval.c - - i386_SRCS = i386_init.c i386_symbol.c i386_corenote.c i386_cfi.c \ - i386_retval.c i386_regs.c i386_auxv.c i386_syscall.c \ -@@ -102,7 +104,8 @@ libebl_backends_a_SOURCES = $(i386_SRCS) $(sh_SRCS) $(x86_64_SRCS) \ - $(ia64_SRCS) $(alpha_SRCS) $(arm_SRCS) \ - $(aarch64_SRCS) $(sparc_SRCS) $(ppc_SRCS) \ - $(ppc64_SRCS) $(s390_SRCS) $(tilegx_SRCS) \ -- $(m68k_SRCS) $(bpf_SRCS) $(riscv_SRCS) $(csky_SRCS) -+ $(m68k_SRCS) $(bpf_SRCS) $(riscv_SRCS) $(csky_SRCS) \ -+ $(parisc_SRCS) - - libebl_backends_pic_a_SOURCES = - am_libebl_backends_pic_a_OBJECTS = $(libebl_backends_a_SOURCES:.c=.os) -diff --git a/backends/libebl_parisc.h b/backends/libebl_parisc.h -new file mode 100644 -index 0000000..f473b79 ---- /dev/null -+++ b/backends/libebl_parisc.h -@@ -0,0 +1,9 @@ -+#ifndef _LIBEBL_HPPA_H -+#define _LIBEBL_HPPA_H 1 -+ -+#include -+ -+extern int parisc_return_value_location_32(Dwarf_Die *, const Dwarf_Op **locp); -+extern int parisc_return_value_location_64(Dwarf_Die *, const Dwarf_Op **locp); -+ -+#endif -diff --git a/backends/parisc_init.c b/backends/parisc_init.c -new file mode 100644 -index 0000000..f1e401c ---- /dev/null -+++ b/backends/parisc_init.c -@@ -0,0 +1,73 @@ -+/* Initialization of PA-RISC specific backend library. -+ Copyright (C) 2002, 2005, 2006 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ Written by Ulrich Drepper , 2002. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#define BACKEND parisc_ -+#define RELOC_PREFIX R_PARISC_ -+#include "libebl_CPU.h" -+#include "libebl_parisc.h" -+ -+/* This defines the common reloc hooks based on parisc_reloc.def. */ -+#include "common-reloc.c" -+ -+ -+const char * -+parisc_init (Elf *elf __attribute__ ((unused)), -+ GElf_Half machine __attribute__ ((unused)), -+ Ebl *eh, -+ size_t ehlen) -+{ -+ int pa64 = 0; -+ -+ /* Check whether the Elf_BH object has a sufficent size. */ -+ if (ehlen < sizeof (Ebl)) -+ return NULL; -+ -+ if (elf) { -+ GElf_Ehdr ehdr_mem; -+ GElf_Ehdr *ehdr = gelf_getehdr (elf, &ehdr_mem); -+ if (ehdr && (ehdr->e_flags & EF_PARISC_WIDE)) -+ pa64 = 1; -+ } -+ /* We handle it. */ -+ eh->name = "PA-RISC"; -+ parisc_init_reloc (eh); -+ HOOK (eh, reloc_simple_type); -+ HOOK (eh, machine_flag_check); -+ HOOK (eh, symbol_type_name); -+ HOOK (eh, segment_type_name); -+ HOOK (eh, section_type_name); -+ HOOK (eh, register_info); -+ if (pa64) -+ eh->return_value_location = parisc_return_value_location_64; -+ else -+ eh->return_value_location = parisc_return_value_location_32; -+ -+ return MODVERSION; -+} -diff --git a/backends/parisc_regs.c b/backends/parisc_regs.c -new file mode 100644 -index 0000000..3895f8e ---- /dev/null -+++ b/backends/parisc_regs.c -@@ -0,0 +1,159 @@ -+/* Register names and numbers for PA-RISC DWARF. -+ Copyright (C) 2005, 2006 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+ -+#define BACKEND parisc_ -+#include "libebl_CPU.h" -+ -+ssize_t -+parisc_register_info (Ebl *ebl, int regno, char *name, size_t namelen, -+ const char **prefix, const char **setname, -+ int *bits, int *type) -+{ -+ int pa64 = 0; -+ -+ if (ebl->elf) { -+ GElf_Ehdr ehdr_mem; -+ GElf_Ehdr *ehdr = gelf_getehdr (ebl->elf, &ehdr_mem); -+ if (ehdr->e_flags & EF_PARISC_WIDE) -+ pa64 = 1; -+ } -+ -+ int nregs = pa64 ? 127 : 128; -+ -+ if (name == NULL) -+ return nregs; -+ -+ if (regno < 0 || regno >= nregs || namelen < 6) -+ return -1; -+ -+ *prefix = "%"; -+ -+ if (regno < 32) -+ { -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ if (pa64) -+ { -+ *bits = 64; -+ } -+ else -+ { -+ *bits = 32; -+ } -+ } -+ else if (regno == 32) -+ { -+ *setname = "special"; -+ if (pa64) -+ { -+ *bits = 6; -+ } -+ else -+ { -+ *bits = 5; -+ } -+ *type = DW_ATE_unsigned; -+ } -+ else -+ { -+ *setname = "FPU"; -+ *type = DW_ATE_float; -+ if (pa64) -+ { -+ *bits = 64; -+ } -+ else -+ { -+ *bits = 32; -+ } -+ } -+ -+ if (regno < 33) { -+ switch (regno) -+ { -+ case 0 ... 9: -+ name[0] = 'r'; -+ name[1] = regno + '0'; -+ namelen = 2; -+ break; -+ case 10 ... 31: -+ name[0] = 'r'; -+ name[1] = regno / 10 + '0'; -+ name[2] = regno % 10 + '0'; -+ namelen = 3; -+ break; -+ case 32: -+ *prefix = NULL; -+ name[0] = 'S'; -+ name[1] = 'A'; -+ name[2] = 'R'; -+ namelen = 3; -+ break; -+ } -+ } -+ else { -+ if (pa64 && ((regno - 72) % 2)) { -+ *setname = NULL; -+ return 0; -+ } -+ -+ switch (regno) -+ { -+ case 72 + 0 ... 72 + 11: -+ name[0] = 'f'; -+ name[1] = 'r'; -+ name[2] = (regno + 8 - 72) / 2 + '0'; -+ namelen = 3; -+ if ((regno + 8 - 72) % 2) { -+ name[3] = 'R'; -+ namelen++; -+ } -+ break; -+ case 72 + 12 ... 72 + 55: -+ name[0] = 'f'; -+ name[1] = 'r'; -+ name[2] = (regno + 8 - 72) / 2 / 10 + '0'; -+ name[3] = (regno + 8 - 72) / 2 % 10 + '0'; -+ namelen = 4; -+ if ((regno + 8 - 72) % 2) { -+ name[4] = 'R'; -+ namelen++; -+ } -+ break; -+ default: -+ *setname = NULL; -+ return 0; -+ } -+ } -+ name[namelen++] = '\0'; -+ return namelen; -+} -diff --git a/backends/parisc_reloc.def b/backends/parisc_reloc.def -new file mode 100644 -index 0000000..1f875ba ---- /dev/null -+++ b/backends/parisc_reloc.def -@@ -0,0 +1,128 @@ -+/* List the relocation types for PA-RISC. -*- C -*- -+ Copyright (C) 2005 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+/* NAME, REL|EXEC|DYN */ -+ -+RELOC_TYPE (NONE, EXEC|DYN) -+RELOC_TYPE (DIR32, REL|EXEC|DYN) -+RELOC_TYPE (DIR21L, REL|EXEC|DYN) -+RELOC_TYPE (DIR17R, REL) -+RELOC_TYPE (DIR17F, REL) -+RELOC_TYPE (DIR14R, REL|DYN) -+RELOC_TYPE (PCREL32, REL) -+RELOC_TYPE (PCREL21L, REL) -+RELOC_TYPE (PCREL17R, REL) -+RELOC_TYPE (PCREL17F, REL) -+RELOC_TYPE (PCREL14R, REL|EXEC) -+RELOC_TYPE (DPREL21L, REL) -+RELOC_TYPE (DPREL14WR, REL) -+RELOC_TYPE (DPREL14DR, REL) -+RELOC_TYPE (DPREL14R, REL) -+RELOC_TYPE (GPREL21L, 0) -+RELOC_TYPE (GPREL14R, 0) -+RELOC_TYPE (LTOFF21L, REL) -+RELOC_TYPE (LTOFF14R, REL) -+RELOC_TYPE (DLTIND14F, 0) -+RELOC_TYPE (SETBASE, 0) -+RELOC_TYPE (SECREL32, REL) -+RELOC_TYPE (BASEREL21L, 0) -+RELOC_TYPE (BASEREL17R, 0) -+RELOC_TYPE (BASEREL14R, 0) -+RELOC_TYPE (SEGBASE, 0) -+RELOC_TYPE (SEGREL32, REL) -+RELOC_TYPE (PLTOFF21L, 0) -+RELOC_TYPE (PLTOFF14R, 0) -+RELOC_TYPE (PLTOFF14F, 0) -+RELOC_TYPE (LTOFF_FPTR32, 0) -+RELOC_TYPE (LTOFF_FPTR21L, 0) -+RELOC_TYPE (LTOFF_FPTR14R, 0) -+RELOC_TYPE (FPTR64, 0) -+RELOC_TYPE (PLABEL32, REL|DYN) -+RELOC_TYPE (PCREL64, 0) -+RELOC_TYPE (PCREL22C, 0) -+RELOC_TYPE (PCREL22F, 0) -+RELOC_TYPE (PCREL14WR, 0) -+RELOC_TYPE (PCREL14DR, 0) -+RELOC_TYPE (PCREL16F, 0) -+RELOC_TYPE (PCREL16WF, 0) -+RELOC_TYPE (PCREL16DF, 0) -+RELOC_TYPE (DIR64, REL|DYN) -+RELOC_TYPE (DIR14WR, REL) -+RELOC_TYPE (DIR14DR, REL) -+RELOC_TYPE (DIR16F, REL) -+RELOC_TYPE (DIR16WF, REL) -+RELOC_TYPE (DIR16DF, REL) -+RELOC_TYPE (GPREL64, 0) -+RELOC_TYPE (GPREL14WR, 0) -+RELOC_TYPE (GPREL14DR, 0) -+RELOC_TYPE (GPREL16F, 0) -+RELOC_TYPE (GPREL16WF, 0) -+RELOC_TYPE (GPREL16DF, 0) -+RELOC_TYPE (LTOFF64, 0) -+RELOC_TYPE (LTOFF14WR, 0) -+RELOC_TYPE (LTOFF14DR, 0) -+RELOC_TYPE (LTOFF16F, 0) -+RELOC_TYPE (LTOFF16WF, 0) -+RELOC_TYPE (LTOFF16DF, 0) -+RELOC_TYPE (SECREL64, 0) -+RELOC_TYPE (BASEREL14WR, 0) -+RELOC_TYPE (BASEREL14DR, 0) -+RELOC_TYPE (SEGREL64, 0) -+RELOC_TYPE (PLTOFF14WR, 0) -+RELOC_TYPE (PLTOFF14DR, 0) -+RELOC_TYPE (PLTOFF16F, 0) -+RELOC_TYPE (PLTOFF16WF, 0) -+RELOC_TYPE (PLTOFF16DF, 0) -+RELOC_TYPE (LTOFF_FPTR64, 0) -+RELOC_TYPE (LTOFF_FPTR14WR, 0) -+RELOC_TYPE (LTOFF_FPTR14DR, 0) -+RELOC_TYPE (LTOFF_FPTR16F, 0) -+RELOC_TYPE (LTOFF_FPTR16WF, 0) -+RELOC_TYPE (LTOFF_FPTR16DF, 0) -+RELOC_TYPE (COPY, EXEC) -+RELOC_TYPE (IPLT, EXEC|DYN) -+RELOC_TYPE (EPLT, 0) -+RELOC_TYPE (TPREL32, DYN) -+RELOC_TYPE (TPREL21L, 0) -+RELOC_TYPE (TPREL14R, 0) -+RELOC_TYPE (LTOFF_TP21L, 0) -+RELOC_TYPE (LTOFF_TP14R, 0) -+RELOC_TYPE (LTOFF_TP14F, 0) -+RELOC_TYPE (TPREL64, 0) -+RELOC_TYPE (TPREL14WR, 0) -+RELOC_TYPE (TPREL14DR, 0) -+RELOC_TYPE (TPREL16F, 0) -+RELOC_TYPE (TPREL16WF, 0) -+RELOC_TYPE (TPREL16DF, 0) -+RELOC_TYPE (LTOFF_TP64, 0) -+RELOC_TYPE (LTOFF_TP14WR, 0) -+RELOC_TYPE (LTOFF_TP14DR, 0) -+RELOC_TYPE (LTOFF_TP16F, 0) -+RELOC_TYPE (LTOFF_TP16WF, 0) -+RELOC_TYPE (LTOFF_TP16DF, 0) -+RELOC_TYPE (TLS_DTPMOD32, DYN) -+RELOC_TYPE (TLS_DTPMOD64, DYN) -+ -+#define NO_RELATIVE_RELOC 1 -diff --git a/backends/parisc_retval.c b/backends/parisc_retval.c -new file mode 100644 -index 0000000..df7ec3a ---- /dev/null -+++ b/backends/parisc_retval.c -@@ -0,0 +1,213 @@ -+/* Function return value location for Linux/PA-RISC ABI. -+ Copyright (C) 2005 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+ -+#define BACKEND parisc_ -+#include "libebl_CPU.h" -+#include "libebl_parisc.h" -+ -+/* %r28, or pair %r28, %r29. */ -+static const Dwarf_Op loc_intreg32[] = -+ { -+ { .atom = DW_OP_reg28 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_reg29 }, { .atom = DW_OP_piece, .number = 4 }, -+ }; -+ -+static const Dwarf_Op loc_intreg[] = -+ { -+ { .atom = DW_OP_reg28 }, { .atom = DW_OP_piece, .number = 8 }, -+ { .atom = DW_OP_reg29 }, { .atom = DW_OP_piece, .number = 8 }, -+ }; -+#define nloc_intreg 1 -+#define nloc_intregpair 4 -+ -+/* %fr4L, or pair %fr4L, %fr4R on pa-32 */ -+static const Dwarf_Op loc_fpreg32[] = -+ { -+ { .atom = DW_OP_regx, .number = 72 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_regx, .number = 73 }, { .atom = DW_OP_piece, .number = 4 }, -+ }; -+#define nloc_fpreg32 2 -+#define nloc_fpregpair32 4 -+ -+/* $fr4 */ -+static const Dwarf_Op loc_fpreg[] = -+ { -+ { .atom = DW_OP_regx, .number = 72 }, -+ }; -+#define nloc_fpreg 1 -+ -+#if 0 -+/* The return value is a structure and is actually stored in stack space -+ passed in a hidden argument by the caller. Address of the location is stored -+ in %r28 before function call, but it may be changed by function. */ -+static const Dwarf_Op loc_aggregate[] = -+ { -+ { .atom = DW_OP_breg28 }, -+ }; -+#define nloc_aggregate 1 -+#endif -+ -+static int -+parisc_return_value_location_ (Dwarf_Die *functypedie, const Dwarf_Op **locp, int pa64) -+{ -+ Dwarf_Word regsize = pa64 ? 8 : 4; -+ -+ /* Start with the function's type, and get the DW_AT_type attribute, -+ which is the type of the return value. */ -+ -+ Dwarf_Attribute attr_mem; -+ Dwarf_Attribute *attr = dwarf_attr_integrate (functypedie, DW_AT_type, &attr_mem); -+ if (attr == NULL) -+ /* The function has no return value, like a `void' function in C. */ -+ return 0; -+ -+ Dwarf_Die die_mem; -+ Dwarf_Die *typedie = dwarf_formref_die (attr, &die_mem); -+ int tag = dwarf_tag (typedie); -+ -+ /* Follow typedefs and qualifiers to get to the actual type. */ -+ while (tag == DW_TAG_typedef -+ || tag == DW_TAG_const_type || tag == DW_TAG_volatile_type -+ || tag == DW_TAG_restrict_type) -+ { -+ attr = dwarf_attr_integrate (typedie, DW_AT_type, &attr_mem); -+ typedie = dwarf_formref_die (attr, &die_mem); -+ tag = dwarf_tag (typedie); -+ } -+ -+ switch (tag) -+ { -+ case -1: -+ return -1; -+ -+ case DW_TAG_subrange_type: -+ if (! dwarf_hasattr_integrate (typedie, DW_AT_byte_size)) -+ { -+ attr = dwarf_attr_integrate (typedie, DW_AT_type, &attr_mem); -+ typedie = dwarf_formref_die (attr, &die_mem); -+ tag = dwarf_tag (typedie); -+ } -+ /* Fall through. */ -+ -+ case DW_TAG_base_type: -+ case DW_TAG_enumeration_type: -+ case DW_TAG_pointer_type: -+ case DW_TAG_ptr_to_member_type: -+ { -+ Dwarf_Word size; -+ if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_byte_size, -+ &attr_mem), &size) != 0) -+ { -+ if (tag == DW_TAG_pointer_type || tag == DW_TAG_ptr_to_member_type) -+ size = 4; -+ else -+ return -1; -+ } -+ if (tag == DW_TAG_base_type) -+ { -+ Dwarf_Word encoding; -+ if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_encoding, -+ &attr_mem), &encoding) != 0) -+ return -1; -+ -+ if (encoding == DW_ATE_float) -+ { -+ if (pa64) { -+ *locp = loc_fpreg; -+ if (size <= 8) -+ return nloc_fpreg; -+ } -+ else { -+ *locp = loc_fpreg32; -+ if (size <= 4) -+ return nloc_fpreg32; -+ else if (size <= 8) -+ return nloc_fpregpair32; -+ } -+ goto aggregate; -+ } -+ } -+ if (pa64) -+ *locp = loc_intreg; -+ else -+ *locp = loc_intreg32; -+ if (size <= regsize) -+ return nloc_intreg; -+ if (size <= 2 * regsize) -+ return nloc_intregpair; -+ -+ /* Else fall through. */ -+ } -+ -+ case DW_TAG_structure_type: -+ case DW_TAG_class_type: -+ case DW_TAG_union_type: -+ case DW_TAG_array_type: -+ aggregate: { -+ Dwarf_Word size; -+ if (dwarf_aggregate_size (typedie, &size) != 0) -+ return -1; -+ if (pa64) -+ *locp = loc_intreg; -+ else -+ *locp = loc_intreg32; -+ if (size <= regsize) -+ return nloc_intreg; -+ if (size <= 2 * regsize) -+ return nloc_intregpair; -+#if 0 -+ /* there should be some way to know this location... But I do not see it. */ -+ *locp = loc_aggregate; -+ return nloc_aggregate; -+#endif -+ /* fall through. */ -+ } -+ } -+ -+ /* XXX We don't have a good way to return specific errors from ebl calls. -+ This value means we do not understand the type, but it is well-formed -+ DWARF and might be valid. */ -+ return -2; -+} -+ -+int -+parisc_return_value_location_32 (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+{ -+ return parisc_return_value_location_ (functypedie, locp, 0); -+} -+ -+int -+parisc_return_value_location_64 (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+{ -+ return parisc_return_value_location_ (functypedie, locp, 1); -+} -+ -diff --git a/backends/parisc_symbol.c b/backends/parisc_symbol.c -new file mode 100644 -index 0000000..5754bd8 ---- /dev/null -+++ b/backends/parisc_symbol.c -@@ -0,0 +1,113 @@ -+/* PA-RISC specific symbolic name handling. -+ Copyright (C) 2002, 2005 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ Written by Ulrich Drepper , 2002. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+ -+#define BACKEND parisc_ -+#include "libebl_CPU.h" -+ -+const char * -+parisc_segment_type_name (int segment, char *buf __attribute__ ((unused)), -+ size_t len __attribute__ ((unused))) -+{ -+ switch (segment) -+ { -+ case PT_PARISC_ARCHEXT: -+ return "PARISC_ARCHEXT"; -+ case PT_PARISC_UNWIND: -+ return "PARISC_UNWIND"; -+ default: -+ break; -+ } -+ return NULL; -+} -+ -+/* Return symbolic representation of symbol type. */ -+const char * -+parisc_symbol_type_name(int symbol, char *buf __attribute__ ((unused)), -+ size_t len __attribute__ ((unused))) -+{ -+ if (symbol == STT_PARISC_MILLICODE) -+ return "PARISC_MILLI"; -+ return NULL; -+} -+ -+/* Return symbolic representation of section type. */ -+const char * -+parisc_section_type_name (int type, -+ char *buf __attribute__ ((unused)), -+ size_t len __attribute__ ((unused))) -+{ -+ switch (type) -+ { -+ case SHT_PARISC_EXT: -+ return "PARISC_EXT"; -+ case SHT_PARISC_UNWIND: -+ return "PARISC_UNWIND"; -+ case SHT_PARISC_DOC: -+ return "PARISC_DOC"; -+ } -+ -+ return NULL; -+} -+ -+/* Check whether machine flags are valid. */ -+bool -+parisc_machine_flag_check (GElf_Word flags) -+{ -+ if (flags &~ (EF_PARISC_TRAPNIL | EF_PARISC_EXT | EF_PARISC_LSB | -+ EF_PARISC_WIDE | EF_PARISC_NO_KABP | -+ EF_PARISC_LAZYSWAP | EF_PARISC_ARCH)) -+ return 0; -+ -+ GElf_Word arch = flags & EF_PARISC_ARCH; -+ -+ return ((arch == EFA_PARISC_1_0) || (arch == EFA_PARISC_1_1) || -+ (arch == EFA_PARISC_2_0)); -+} -+ -+/* Check for the simple reloc types. */ -+Elf_Type -+parisc_reloc_simple_type (Ebl *ebl __attribute__ ((unused)), int type, -+ int *addsub __attribute__ ((unused))) -+{ -+ switch (type) -+ { -+ case R_PARISC_DIR64: -+ case R_PARISC_SECREL64: -+ return ELF_T_XWORD; -+ case R_PARISC_DIR32: -+ case R_PARISC_SECREL32: -+ return ELF_T_WORD; -+ default: -+ return ELF_T_NUM; -+ } -+} -diff --git a/libelf/elf.h b/libelf/elf.h -index 01648bd..218ceb2 100644 ---- a/libelf/elf.h -+++ b/libelf/elf.h -@@ -2162,16 +2162,24 @@ enum - #define R_PARISC_PCREL17F 12 /* 17 bits of rel. address. */ - #define R_PARISC_PCREL14R 14 /* Right 14 bits of rel. address. */ - #define R_PARISC_DPREL21L 18 /* Left 21 bits of rel. address. */ -+#define R_PARISC_DPREL14WR 19 -+#define R_PARISC_DPREL14DR 20 - #define R_PARISC_DPREL14R 22 /* Right 14 bits of rel. address. */ - #define R_PARISC_GPREL21L 26 /* GP-relative, left 21 bits. */ - #define R_PARISC_GPREL14R 30 /* GP-relative, right 14 bits. */ - #define R_PARISC_LTOFF21L 34 /* LT-relative, left 21 bits. */ - #define R_PARISC_LTOFF14R 38 /* LT-relative, right 14 bits. */ -+#define R_PARISC_DLTIND14F 39 -+#define R_PARISC_SETBASE 40 - #define R_PARISC_SECREL32 41 /* 32 bits section rel. address. */ -+#define R_PARISC_BASEREL21L 42 -+#define R_PARISC_BASEREL17R 43 -+#define R_PARISC_BASEREL14R 46 - #define R_PARISC_SEGBASE 48 /* No relocation, set segment base. */ - #define R_PARISC_SEGREL32 49 /* 32 bits segment rel. address. */ - #define R_PARISC_PLTOFF21L 50 /* PLT rel. address, left 21 bits. */ - #define R_PARISC_PLTOFF14R 54 /* PLT rel. address, right 14 bits. */ -+#define R_PARISC_PLTOFF14F 55 - #define R_PARISC_LTOFF_FPTR32 57 /* 32 bits LT-rel. function pointer. */ - #define R_PARISC_LTOFF_FPTR21L 58 /* LT-rel. fct ptr, left 21 bits. */ - #define R_PARISC_LTOFF_FPTR14R 62 /* LT-rel. fct ptr, right 14 bits. */ -@@ -2180,6 +2188,7 @@ enum - #define R_PARISC_PLABEL21L 66 /* Left 21 bits of fdesc address. */ - #define R_PARISC_PLABEL14R 70 /* Right 14 bits of fdesc address. */ - #define R_PARISC_PCREL64 72 /* 64 bits PC-rel. address. */ -+#define R_PARISC_PCREL22C 73 - #define R_PARISC_PCREL22F 74 /* 22 bits PC-rel. address. */ - #define R_PARISC_PCREL14WR 75 /* PC-rel. address, right 14 bits. */ - #define R_PARISC_PCREL14DR 76 /* PC rel. address, right 14 bits. */ -@@ -2205,6 +2214,8 @@ enum - #define R_PARISC_LTOFF16WF 102 /* 16 bits LT-rel. address. */ - #define R_PARISC_LTOFF16DF 103 /* 16 bits LT-rel. address. */ - #define R_PARISC_SECREL64 104 /* 64 bits section rel. address. */ -+#define R_PARISC_BASEREL14WR 107 -+#define R_PARISC_BASEREL14DR 108 - #define R_PARISC_SEGREL64 112 /* 64 bits segment rel. address. */ - #define R_PARISC_PLTOFF14WR 115 /* PLT-rel. address, right 14 bits. */ - #define R_PARISC_PLTOFF14DR 116 /* PLT-rel. address, right 14 bits. */ diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/hurd_path.patch b/poky/meta/recipes-devtools/elfutils/files/debian/hurd_path.patch deleted file mode 100644 index 62a960f3a..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/hurd_path.patch +++ /dev/null @@ -1,17 +0,0 @@ -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia - -Index: elfutils-0.165/tests/run-native-test.sh -=================================================================== ---- elfutils-0.165.orig/tests/run-native-test.sh -+++ elfutils-0.165/tests/run-native-test.sh -@@ -83,6 +83,9 @@ native_test() - # "cannot attach to process: Function not implemented". - [ "$(uname)" = "GNU/kFreeBSD" ] && exit 77 - -+# hurd's /proc/$PID/maps does not give paths yet. -+[ "$(uname)" = "GNU" ] && exit 77 -+ - native_test ${abs_builddir}/allregs - native_test ${abs_builddir}/funcretval - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/ignore_strmerge.diff b/poky/meta/recipes-devtools/elfutils/files/debian/ignore_strmerge.diff deleted file mode 100644 index 55513eedd..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/ignore_strmerge.diff +++ /dev/null @@ -1,14 +0,0 @@ -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia - ---- elfutils-0.165.orig/tests/run-strip-strmerge.sh -+++ elfutils-0.165/tests/run-strip-strmerge.sh -@@ -30,7 +30,7 @@ remerged=remerged.elf - tempfiles $merged $stripped $debugfile $remerged - - echo elflint $input --testrun ${abs_top_builddir}/src/elflint --gnu $input -+testrun_on_self_skip ${abs_top_builddir}/src/elflint --gnu $input - echo elfstrmerge - testrun ${abs_top_builddir}/tests/elfstrmerge -o $merged $input - echo elflint $merged diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/kfreebsd_path.patch b/poky/meta/recipes-devtools/elfutils/files/debian/kfreebsd_path.patch deleted file mode 100644 index e85a2fed9..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/kfreebsd_path.patch +++ /dev/null @@ -1,20 +0,0 @@ -Upstream-Status: Pending [from debian] -Signed-off-by: Hongxu Jia - -Index: b/tests/run-native-test.sh -=================================================================== ---- a/tests/run-native-test.sh -+++ b/tests/run-native-test.sh -@@ -77,6 +77,12 @@ native_test() - test $native -eq 0 || testrun "$@" -p $native > /dev/null - } - -+# On the Debian buildds, GNU/kFreeBSD linprocfs /proc/$PID/maps does -+# not give absolute paths due to sbuild's bind mounts (bug #570805) -+# therefore the next two test programs are expected to fail with -+# "cannot attach to process: Function not implemented". -+[ "$(uname)" = "GNU/kFreeBSD" ] && exit 77 -+ - native_test ${abs_builddir}/allregs - native_test ${abs_builddir}/funcretval - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/mips_backend.diff b/poky/meta/recipes-devtools/elfutils/files/debian/mips_backend.diff deleted file mode 100644 index 749faa403..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/mips_backend.diff +++ /dev/null @@ -1,724 +0,0 @@ -From 7e0b036d087dfff7f5e306f52fc78745f99454c3 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 15:49:52 +0800 -Subject: [PATCH] mips backend - -Rebase to 0.177 -Upstream-Status: Pending [from debian] - -Signed-off-by: Hongxu Jia - ---- - backends/Makefile.am | 6 +- - backends/mips_init.c | 59 ++++++++ - backends/mips_regs.c | 104 +++++++++++++ - backends/mips_reloc.def | 79 ++++++++++ - backends/mips_retval.c | 321 ++++++++++++++++++++++++++++++++++++++++ - backends/mips_symbol.c | 53 +++++++ - libebl/eblopenbackend.c | 3 + - 7 files changed, 623 insertions(+), 2 deletions(-) - create mode 100644 backends/mips_init.c - create mode 100644 backends/mips_regs.c - create mode 100644 backends/mips_reloc.def - create mode 100644 backends/mips_retval.c - create mode 100644 backends/mips_symbol.c - -diff --git a/backends/Makefile.am b/backends/Makefile.am -index 4755f61..07d45d7 100644 ---- a/backends/Makefile.am -+++ b/backends/Makefile.am -@@ -37,7 +37,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/libebl -I$(top_srcdir)/libasm \ - noinst_LIBRARIES = libebl_backends.a libebl_backends_pic.a - - modules = i386 sh x86_64 ia64 alpha arm aarch64 sparc ppc ppc64 s390 \ -- tilegx m68k bpf riscv csky parisc -+ tilegx m68k bpf riscv csky parisc mips - - parisc_SRCS = parisc_init.c parisc_symbol.c parisc_regs.c parisc_retval.c - -@@ -100,12 +100,14 @@ riscv_SRCS = riscv_init.c riscv_symbol.c riscv_cfi.c riscv_regs.c \ - csky_SRCS = csky_attrs.c csky_init.c csky_symbol.c csky_cfi.c \ - csky_regs.c csky_initreg.c csky_corenote.c - -+mips_SRCS = mips_init.c mips_symbol.c mips_regs.c mips_retval.c -+ - libebl_backends_a_SOURCES = $(i386_SRCS) $(sh_SRCS) $(x86_64_SRCS) \ - $(ia64_SRCS) $(alpha_SRCS) $(arm_SRCS) \ - $(aarch64_SRCS) $(sparc_SRCS) $(ppc_SRCS) \ - $(ppc64_SRCS) $(s390_SRCS) $(tilegx_SRCS) \ - $(m68k_SRCS) $(bpf_SRCS) $(riscv_SRCS) $(csky_SRCS) \ -- $(parisc_SRCS) -+ $(parisc_SRCS) $(mips_SRCS) - - libebl_backends_pic_a_SOURCES = - am_libebl_backends_pic_a_OBJECTS = $(libebl_backends_a_SOURCES:.c=.os) -diff --git a/backends/mips_init.c b/backends/mips_init.c -new file mode 100644 -index 0000000..975c04e ---- /dev/null -+++ b/backends/mips_init.c -@@ -0,0 +1,59 @@ -+/* Initialization of mips specific backend library. -+ Copyright (C) 2006 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#define BACKEND mips_ -+#define RELOC_PREFIX R_MIPS_ -+#include "libebl_CPU.h" -+ -+/* This defines the common reloc hooks based on mips_reloc.def. */ -+#include "common-reloc.c" -+ -+const char * -+mips_init (Elf *elf __attribute__ ((unused)), -+ GElf_Half machine __attribute__ ((unused)), -+ Ebl *eh, -+ size_t ehlen) -+{ -+ /* Check whether the Elf_BH object has a sufficent size. */ -+ if (ehlen < sizeof (Ebl)) -+ return NULL; -+ -+ /* We handle it. */ -+ if (machine == EM_MIPS) -+ eh->name = "MIPS R3000 big-endian"; -+ else if (machine == EM_MIPS_RS3_LE) -+ eh->name = "MIPS R3000 little-endian"; -+ -+ mips_init_reloc (eh); -+ HOOK (eh, reloc_simple_type); -+ HOOK (eh, return_value_location); -+ HOOK (eh, register_info); -+ -+ return MODVERSION; -+} -diff --git a/backends/mips_regs.c b/backends/mips_regs.c -new file mode 100644 -index 0000000..44f86cb ---- /dev/null -+++ b/backends/mips_regs.c -@@ -0,0 +1,104 @@ -+/* Register names and numbers for MIPS DWARF. -+ Copyright (C) 2006 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+ -+#define BACKEND mips_ -+#include "libebl_CPU.h" -+ -+ssize_t -+mips_register_info (Ebl *ebl __attribute__((unused)), -+ int regno, char *name, size_t namelen, -+ const char **prefix, const char **setname, -+ int *bits, int *type) -+{ -+ if (name == NULL) -+ return 66; -+ -+ if (regno < 0 || regno > 65 || namelen < 4) -+ return -1; -+ -+ *prefix = "$"; -+ -+ if (regno < 32) -+ { -+ *setname = "integer"; -+ *type = DW_ATE_signed; -+ *bits = 32; -+ if (regno < 32 + 10) -+ { -+ name[0] = regno + '0'; -+ namelen = 1; -+ } -+ else -+ { -+ name[0] = (regno / 10) + '0'; -+ name[1] = (regno % 10) + '0'; -+ namelen = 2; -+ } -+ } -+ else if (regno < 64) -+ { -+ *setname = "FPU"; -+ *type = DW_ATE_float; -+ *bits = 32; -+ name[0] = 'f'; -+ if (regno < 32 + 10) -+ { -+ name[1] = (regno - 32) + '0'; -+ namelen = 2; -+ } -+ else -+ { -+ name[1] = (regno - 32) / 10 + '0'; -+ name[2] = (regno - 32) % 10 + '0'; -+ namelen = 3; -+ } -+ } -+ else if (regno == 64) -+ { -+ *type = DW_ATE_signed; -+ *bits = 32; -+ name[0] = 'h'; -+ name[1] = 'i'; -+ namelen = 2; -+ } -+ else -+ { -+ *type = DW_ATE_signed; -+ *bits = 32; -+ name[0] = 'l'; -+ name[1] = 'o'; -+ namelen = 2; -+ } -+ -+ name[namelen++] = '\0'; -+ return namelen; -+} -diff --git a/backends/mips_reloc.def b/backends/mips_reloc.def -new file mode 100644 -index 0000000..4579970 ---- /dev/null -+++ b/backends/mips_reloc.def -@@ -0,0 +1,79 @@ -+/* List the relocation types for mips. -*- C -*- -+ Copyright (C) 2006 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+/* NAME, REL|EXEC|DYN */ -+ -+RELOC_TYPE (NONE, 0) -+RELOC_TYPE (16, 0) -+RELOC_TYPE (32, 0) -+RELOC_TYPE (REL32, 0) -+RELOC_TYPE (26, 0) -+RELOC_TYPE (HI16, 0) -+RELOC_TYPE (LO16, 0) -+RELOC_TYPE (GPREL16, 0) -+RELOC_TYPE (LITERAL, 0) -+RELOC_TYPE (GOT16, 0) -+RELOC_TYPE (PC16, 0) -+RELOC_TYPE (CALL16, 0) -+RELOC_TYPE (GPREL32, 0) -+ -+RELOC_TYPE (SHIFT5, 0) -+RELOC_TYPE (SHIFT6, 0) -+RELOC_TYPE (64, 0) -+RELOC_TYPE (GOT_DISP, 0) -+RELOC_TYPE (GOT_PAGE, 0) -+RELOC_TYPE (GOT_OFST, 0) -+RELOC_TYPE (GOT_HI16, 0) -+RELOC_TYPE (GOT_LO16, 0) -+RELOC_TYPE (SUB, 0) -+RELOC_TYPE (INSERT_A, 0) -+RELOC_TYPE (INSERT_B, 0) -+RELOC_TYPE (DELETE, 0) -+RELOC_TYPE (HIGHER, 0) -+RELOC_TYPE (HIGHEST, 0) -+RELOC_TYPE (CALL_HI16, 0) -+RELOC_TYPE (CALL_LO16, 0) -+RELOC_TYPE (SCN_DISP, 0) -+RELOC_TYPE (REL16, 0) -+RELOC_TYPE (ADD_IMMEDIATE, 0) -+RELOC_TYPE (PJUMP, 0) -+RELOC_TYPE (RELGOT, 0) -+RELOC_TYPE (JALR, 0) -+RELOC_TYPE (TLS_DTPMOD32, 0) -+RELOC_TYPE (TLS_DTPREL32, 0) -+RELOC_TYPE (TLS_DTPMOD64, 0) -+RELOC_TYPE (TLS_DTPREL64, 0) -+RELOC_TYPE (TLS_GD, 0) -+RELOC_TYPE (TLS_LDM, 0) -+RELOC_TYPE (TLS_DTPREL_HI16, 0) -+RELOC_TYPE (TLS_DTPREL_LO16, 0) -+RELOC_TYPE (TLS_GOTTPREL, 0) -+RELOC_TYPE (TLS_TPREL32, 0) -+RELOC_TYPE (TLS_TPREL64, 0) -+RELOC_TYPE (TLS_TPREL_HI16, 0) -+RELOC_TYPE (TLS_TPREL_LO16, 0) -+ -+#define NO_COPY_RELOC 1 -+#define NO_RELATIVE_RELOC 1 -diff --git a/backends/mips_retval.c b/backends/mips_retval.c -new file mode 100644 -index 0000000..656cd1f ---- /dev/null -+++ b/backends/mips_retval.c -@@ -0,0 +1,321 @@ -+/* Function return value location for Linux/mips ABI. -+ Copyright (C) 2005 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+#include -+#include -+ -+#include "../libebl/libeblP.h" -+#include "../libdw/libdwP.h" -+ -+#define BACKEND mips_ -+#include "libebl_CPU.h" -+ -+/* The ABI of the file. Also see EF_MIPS_ABI2 above. */ -+#define EF_MIPS_ABI 0x0000F000 -+ -+/* The original o32 abi. */ -+#define E_MIPS_ABI_O32 0x00001000 -+ -+/* O32 extended to work on 64 bit architectures */ -+#define E_MIPS_ABI_O64 0x00002000 -+ -+/* EABI in 32 bit mode */ -+#define E_MIPS_ABI_EABI32 0x00003000 -+ -+/* EABI in 64 bit mode */ -+#define E_MIPS_ABI_EABI64 0x00004000 -+ -+/* All the possible MIPS ABIs. */ -+enum mips_abi -+ { -+ MIPS_ABI_UNKNOWN = 0, -+ MIPS_ABI_N32, -+ MIPS_ABI_O32, -+ MIPS_ABI_N64, -+ MIPS_ABI_O64, -+ MIPS_ABI_EABI32, -+ MIPS_ABI_EABI64, -+ MIPS_ABI_LAST -+ }; -+ -+/* Find the mips ABI of the current file */ -+enum mips_abi find_mips_abi(Elf *elf) -+{ -+ GElf_Ehdr ehdr_mem; -+ GElf_Ehdr *ehdr = gelf_getehdr (elf, &ehdr_mem); -+ -+ if (ehdr == NULL) -+ return MIPS_ABI_LAST; -+ -+ GElf_Word elf_flags = ehdr->e_flags; -+ -+ /* Check elf_flags to see if it specifies the ABI being used. */ -+ switch ((elf_flags & EF_MIPS_ABI)) -+ { -+ case E_MIPS_ABI_O32: -+ return MIPS_ABI_O32; -+ case E_MIPS_ABI_O64: -+ return MIPS_ABI_O64; -+ case E_MIPS_ABI_EABI32: -+ return MIPS_ABI_EABI32; -+ case E_MIPS_ABI_EABI64: -+ return MIPS_ABI_EABI64; -+ default: -+ if ((elf_flags & EF_MIPS_ABI2)) -+ return MIPS_ABI_N32; -+ } -+ -+ /* GCC creates a pseudo-section whose name describes the ABI. */ -+ size_t shstrndx; -+ if (elf_getshdrstrndx (elf, &shstrndx) < 0) -+ return MIPS_ABI_LAST; -+ -+ const char *name; -+ Elf_Scn *scn = NULL; -+ while ((scn = elf_nextscn (elf, scn)) != NULL) -+ { -+ GElf_Shdr shdr_mem; -+ GElf_Shdr *shdr = gelf_getshdr (scn, &shdr_mem); -+ if (shdr == NULL) -+ return MIPS_ABI_LAST; -+ -+ name = elf_strptr (elf, shstrndx, shdr->sh_name) ?: ""; -+ if (strncmp (name, ".mdebug.", 8) != 0) -+ continue; -+ -+ if (strcmp (name, ".mdebug.abi32") == 0) -+ return MIPS_ABI_O32; -+ else if (strcmp (name, ".mdebug.abiN32") == 0) -+ return MIPS_ABI_N32; -+ else if (strcmp (name, ".mdebug.abi64") == 0) -+ return MIPS_ABI_N64; -+ else if (strcmp (name, ".mdebug.abiO64") == 0) -+ return MIPS_ABI_O64; -+ else if (strcmp (name, ".mdebug.eabi32") == 0) -+ return MIPS_ABI_EABI32; -+ else if (strcmp (name, ".mdebug.eabi64") == 0) -+ return MIPS_ABI_EABI64; -+ else -+ return MIPS_ABI_UNKNOWN; -+ } -+ -+ return MIPS_ABI_UNKNOWN; -+} -+ -+unsigned int -+mips_abi_regsize (enum mips_abi abi) -+{ -+ switch (abi) -+ { -+ case MIPS_ABI_EABI32: -+ case MIPS_ABI_O32: -+ return 4; -+ case MIPS_ABI_N32: -+ case MIPS_ABI_N64: -+ case MIPS_ABI_O64: -+ case MIPS_ABI_EABI64: -+ return 8; -+ case MIPS_ABI_UNKNOWN: -+ case MIPS_ABI_LAST: -+ default: -+ return 0; -+ } -+} -+ -+ -+/* $v0 or pair $v0, $v1 */ -+static const Dwarf_Op loc_intreg_o32[] = -+ { -+ { .atom = DW_OP_reg2 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_reg3 }, { .atom = DW_OP_piece, .number = 4 }, -+ }; -+ -+static const Dwarf_Op loc_intreg[] = -+ { -+ { .atom = DW_OP_reg2 }, { .atom = DW_OP_piece, .number = 8 }, -+ { .atom = DW_OP_reg3 }, { .atom = DW_OP_piece, .number = 8 }, -+ }; -+#define nloc_intreg 1 -+#define nloc_intregpair 4 -+ -+/* $f0 (float), or pair $f0, $f1 (double). -+ * f2/f3 are used for COMPLEX (= 2 doubles) returns in Fortran */ -+static const Dwarf_Op loc_fpreg_o32[] = -+ { -+ { .atom = DW_OP_regx, .number = 32 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_regx, .number = 33 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_regx, .number = 34 }, { .atom = DW_OP_piece, .number = 4 }, -+ { .atom = DW_OP_regx, .number = 35 }, { .atom = DW_OP_piece, .number = 4 }, -+ }; -+ -+/* $f0, or pair $f0, $f2. */ -+static const Dwarf_Op loc_fpreg[] = -+ { -+ { .atom = DW_OP_regx, .number = 32 }, { .atom = DW_OP_piece, .number = 8 }, -+ { .atom = DW_OP_regx, .number = 34 }, { .atom = DW_OP_piece, .number = 8 }, -+ }; -+#define nloc_fpreg 1 -+#define nloc_fpregpair 4 -+#define nloc_fpregquad 8 -+ -+/* The return value is a structure and is actually stored in stack space -+ passed in a hidden argument by the caller. But, the compiler -+ helpfully returns the address of that space in $v0. */ -+static const Dwarf_Op loc_aggregate[] = -+ { -+ { .atom = DW_OP_breg2, .number = 0 } -+ }; -+#define nloc_aggregate 1 -+ -+int -+mips_return_value_location (Dwarf_Die *functypedie, const Dwarf_Op **locp) -+{ -+ /* First find the ABI used by the elf object */ -+ enum mips_abi abi = find_mips_abi(functypedie->cu->dbg->elf); -+ -+ /* Something went seriously wrong while trying to figure out the ABI */ -+ if (abi == MIPS_ABI_LAST) -+ return -1; -+ -+ /* We couldn't identify the ABI, but the file seems valid */ -+ if (abi == MIPS_ABI_UNKNOWN) -+ return -2; -+ -+ /* Can't handle EABI variants */ -+ if ((abi == MIPS_ABI_EABI32) || (abi == MIPS_ABI_EABI64)) -+ return -2; -+ -+ unsigned int regsize = mips_abi_regsize (abi); -+ if (!regsize) -+ return -2; -+ -+ /* Start with the function's type, and get the DW_AT_type attribute, -+ which is the type of the return value. */ -+ -+ Dwarf_Attribute attr_mem; -+ Dwarf_Attribute *attr = dwarf_attr_integrate (functypedie, DW_AT_type, &attr_mem); -+ if (attr == NULL) -+ /* The function has no return value, like a `void' function in C. */ -+ return 0; -+ -+ Dwarf_Die die_mem; -+ Dwarf_Die *typedie = dwarf_formref_die (attr, &die_mem); -+ int tag = dwarf_tag (typedie); -+ -+ /* Follow typedefs and qualifiers to get to the actual type. */ -+ while (tag == DW_TAG_typedef -+ || tag == DW_TAG_const_type || tag == DW_TAG_volatile_type -+ || tag == DW_TAG_restrict_type) -+ { -+ attr = dwarf_attr_integrate (typedie, DW_AT_type, &attr_mem); -+ typedie = dwarf_formref_die (attr, &die_mem); -+ tag = dwarf_tag (typedie); -+ } -+ -+ switch (tag) -+ { -+ case -1: -+ return -1; -+ -+ case DW_TAG_subrange_type: -+ if (! dwarf_hasattr_integrate (typedie, DW_AT_byte_size)) -+ { -+ attr = dwarf_attr_integrate (typedie, DW_AT_type, &attr_mem); -+ typedie = dwarf_formref_die (attr, &die_mem); -+ tag = dwarf_tag (typedie); -+ } -+ /* Fall through. */ -+ -+ case DW_TAG_base_type: -+ case DW_TAG_enumeration_type: -+ case DW_TAG_pointer_type: -+ case DW_TAG_ptr_to_member_type: -+ { -+ Dwarf_Word size; -+ if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_byte_size, -+ &attr_mem), &size) != 0) -+ { -+ if (tag == DW_TAG_pointer_type || tag == DW_TAG_ptr_to_member_type) -+ size = regsize; -+ else -+ return -1; -+ } -+ if (tag == DW_TAG_base_type) -+ { -+ Dwarf_Word encoding; -+ if (dwarf_formudata (dwarf_attr_integrate (typedie, DW_AT_encoding, -+ &attr_mem), &encoding) != 0) -+ return -1; -+ -+#define ABI_LOC(loc, regsize) ((regsize) == 4 ? (loc ## _o32) : (loc)) -+ -+ if (encoding == DW_ATE_float) -+ { -+ *locp = ABI_LOC(loc_fpreg, regsize); -+ if (size <= regsize) -+ return nloc_fpreg; -+ -+ if (size <= 2*regsize) -+ return nloc_fpregpair; -+ -+ if (size <= 4*regsize && abi == MIPS_ABI_O32) -+ return nloc_fpregquad; -+ -+ goto aggregate; -+ } -+ } -+ *locp = ABI_LOC(loc_intreg, regsize); -+ if (size <= regsize) -+ return nloc_intreg; -+ if (size <= 2*regsize) -+ return nloc_intregpair; -+ -+ /* Else fall through. Shouldn't happen though (at least with gcc) */ -+ } -+ -+ case DW_TAG_structure_type: -+ case DW_TAG_class_type: -+ case DW_TAG_union_type: -+ case DW_TAG_array_type: -+ aggregate: -+ /* XXX TODO: Can't handle structure return with other ABI's yet :-/ */ -+ if ((abi != MIPS_ABI_O32) && (abi != MIPS_ABI_O64)) -+ return -2; -+ -+ *locp = loc_aggregate; -+ return nloc_aggregate; -+ } -+ -+ /* XXX We don't have a good way to return specific errors from ebl calls. -+ This value means we do not understand the type, but it is well-formed -+ DWARF and might be valid. */ -+ return -2; -+} -diff --git a/backends/mips_symbol.c b/backends/mips_symbol.c -new file mode 100644 -index 0000000..261b05d ---- /dev/null -+++ b/backends/mips_symbol.c -@@ -0,0 +1,53 @@ -+/* MIPS specific symbolic name handling. -+ Copyright (C) 2002, 2003, 2005 Red Hat, Inc. -+ This file is part of Red Hat elfutils. -+ Written by Jakub Jelinek , 2002. -+ -+ Red Hat elfutils is free software; you can redistribute it and/or modify -+ it under the terms of the GNU General Public License as published by the -+ Free Software Foundation; version 2 of the License. -+ -+ Red Hat elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received a copy of the GNU General Public License along -+ with Red Hat elfutils; if not, write to the Free Software Foundation, -+ Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA. -+ -+ Red Hat elfutils is an included package of the Open Invention Network. -+ An included package of the Open Invention Network is a package for which -+ Open Invention Network licensees cross-license their patents. No patent -+ license is granted, either expressly or impliedly, by designation as an -+ included package. Should you wish to participate in the Open Invention -+ Network licensing program, please visit www.openinventionnetwork.com -+ . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+#include -+ -+#define BACKEND mips_ -+#include "libebl_CPU.h" -+ -+/* Check for the simple reloc types. */ -+Elf_Type -+mips_reloc_simple_type (Ebl *ebl __attribute__ ((unused)), int type, -+ int *addsub __attribute__ ((unused))) -+{ -+ switch (type) -+ { -+ case R_MIPS_16: -+ return ELF_T_HALF; -+ case R_MIPS_32: -+ return ELF_T_WORD; -+ case R_MIPS_64: -+ return ELF_T_XWORD; -+ default: -+ return ELF_T_NUM; -+ } -+} -diff --git a/libebl/eblopenbackend.c b/libebl/eblopenbackend.c -index 210b47e..1feac13 100644 ---- a/libebl/eblopenbackend.c -+++ b/libebl/eblopenbackend.c -@@ -57,6 +57,7 @@ const char *m68k_init (Elf *, GElf_Half, Ebl *, size_t); - const char *bpf_init (Elf *, GElf_Half, Ebl *, size_t); - const char *riscv_init (Elf *, GElf_Half, Ebl *, size_t); - const char *csky_init (Elf *, GElf_Half, Ebl *, size_t); -+const char *mips_init (Elf *, GElf_Half, Ebl *, size_t); - - /* This table should contain the complete list of architectures as far - as the ELF specification is concerned. */ -@@ -87,6 +88,8 @@ static const struct - { sparc_init, "elf_sparc", "sparc", 5, EM_SPARC, 0, 0 }, - { sparc_init, "elf_sparcv8plus", "sparc", 5, EM_SPARC32PLUS, 0, 0 }, - { s390_init, "ebl_s390", "s390", 4, EM_S390, 0, 0 }, -+ { mips_init, "elf_mips", "mips", 4, EM_MIPS, 0, 0 }, -+ { mips_init, "elf_mipsel", "mipsel", 4, EM_MIPS_RS3_LE, 0, 0 }, - - { NULL, "elf_m32", "m32", 3, EM_M32, 0, 0 }, - { m68k_init, "elf_m68k", "m68k", 4, EM_68K, ELFCLASS32, ELFDATA2MSB }, --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/mips_cfi.patch b/poky/meta/recipes-devtools/elfutils/files/debian/mips_cfi.patch deleted file mode 100644 index dd8f88a45..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/mips_cfi.patch +++ /dev/null @@ -1,129 +0,0 @@ -From 5bf6117a6eaf9007ce80adbb8b66a95ca98047a4 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 17:00:30 +0800 -Subject: [PATCH] mips_cfi - -Upstream-Status: Pending [from debian] - -Rebase to 0.177 - -Signed-off-by: Hongxu Jia - ---- - backends/Makefile.am | 2 +- - backends/mips_cfi.c | 80 ++++++++++++++++++++++++++++++++++++++++++++ - backends/mips_init.c | 1 + - 3 files changed, 82 insertions(+), 1 deletion(-) - create mode 100644 backends/mips_cfi.c - -diff --git a/backends/Makefile.am b/backends/Makefile.am -index 07d45d7..dec3080 100644 ---- a/backends/Makefile.am -+++ b/backends/Makefile.am -@@ -100,7 +100,7 @@ riscv_SRCS = riscv_init.c riscv_symbol.c riscv_cfi.c riscv_regs.c \ - csky_SRCS = csky_attrs.c csky_init.c csky_symbol.c csky_cfi.c \ - csky_regs.c csky_initreg.c csky_corenote.c - --mips_SRCS = mips_init.c mips_symbol.c mips_regs.c mips_retval.c -+mips_SRCS = mips_init.c mips_symbol.c mips_regs.c mips_retval.c mips_cfi.c - - libebl_backends_a_SOURCES = $(i386_SRCS) $(sh_SRCS) $(x86_64_SRCS) \ - $(ia64_SRCS) $(alpha_SRCS) $(arm_SRCS) \ -diff --git a/backends/mips_cfi.c b/backends/mips_cfi.c -new file mode 100644 -index 0000000..9ffdab5 ---- /dev/null -+++ b/backends/mips_cfi.c -@@ -0,0 +1,80 @@ -+/* MIPS ABI-specified defaults for DWARF CFI. -+ Copyright (C) 2018 Kurt Roeckx, Inc. -+ This file is part of elfutils. -+ -+ This file is free software; you can redistribute it and/or modify -+ it under the terms of either -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at -+ your option) any later version -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at -+ your option) any later version -+ -+ or both in parallel, as here. -+ -+ elfutils is distributed in the hope that it will be useful, but -+ WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see . */ -+ -+#ifdef HAVE_CONFIG_H -+# include -+#endif -+ -+#include -+ -+#define BACKEND mips_ -+#include "libebl_CPU.h" -+ -+int -+mips_abi_cfi (Ebl *ebl __attribute__ ((unused)), Dwarf_CIE *abi_info) -+{ -+ static const uint8_t abi_cfi[] = -+ { -+ /* Call-saved regs. */ -+ DW_CFA_same_value, ULEB128_7 (16), /* $16 */ -+ DW_CFA_same_value, ULEB128_7 (17), /* $17 */ -+ DW_CFA_same_value, ULEB128_7 (18), /* $18 */ -+ DW_CFA_same_value, ULEB128_7 (19), /* $19 */ -+ DW_CFA_same_value, ULEB128_7 (20), /* $20 */ -+ DW_CFA_same_value, ULEB128_7 (21), /* $21 */ -+ DW_CFA_same_value, ULEB128_7 (22), /* $22 */ -+ DW_CFA_same_value, ULEB128_7 (23), /* $23 */ -+ DW_CFA_same_value, ULEB128_7 (28), /* $28 */ -+ DW_CFA_same_value, ULEB128_7 (29), /* $29 */ -+ DW_CFA_same_value, ULEB128_7 (30), /* $30 */ -+ -+ DW_CFA_same_value, ULEB128_7 (52), /* $f20 */ -+ DW_CFA_same_value, ULEB128_7 (53), /* $f21 */ -+ DW_CFA_same_value, ULEB128_7 (54), /* $f22 */ -+ DW_CFA_same_value, ULEB128_7 (55), /* $f23 */ -+ DW_CFA_same_value, ULEB128_7 (56), /* $f24 */ -+ DW_CFA_same_value, ULEB128_7 (57), /* $f25 */ -+ DW_CFA_same_value, ULEB128_7 (58), /* $f26 */ -+ DW_CFA_same_value, ULEB128_7 (59), /* $f27 */ -+ DW_CFA_same_value, ULEB128_7 (60), /* $f28 */ -+ DW_CFA_same_value, ULEB128_7 (61), /* $f29 */ -+ DW_CFA_same_value, ULEB128_7 (62), /* $f30 */ -+ DW_CFA_same_value, ULEB128_7 (63), /* $f31 */ -+ -+ /* The CFA is the SP. */ -+ DW_CFA_def_cfa, ULEB128_7 (29), ULEB128_7 (0), -+ }; -+ -+ abi_info->initial_instructions = abi_cfi; -+ abi_info->initial_instructions_end = &abi_cfi[sizeof abi_cfi]; -+ abi_info->data_alignment_factor = 4; -+ -+ abi_info->return_address_register = 31; /* $31 */ -+ -+ return 0; -+} -diff --git a/backends/mips_init.c b/backends/mips_init.c -index 8482e7f..bce5abe 100644 ---- a/backends/mips_init.c -+++ b/backends/mips_init.c -@@ -50,6 +50,7 @@ mips_init (Elf *elf __attribute__ ((unused)), - HOOK (eh, reloc_simple_type); - HOOK (eh, return_value_location); - HOOK (eh, register_info); -+ HOOK (eh, abi_cfi); - - return MODVERSION; - } diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/mips_readelf_w.patch b/poky/meta/recipes-devtools/elfutils/files/debian/mips_readelf_w.patch deleted file mode 100644 index c6d42e163..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/mips_readelf_w.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 7bdc83296865cf2b2a5615dbdb7ac0d441fb1849 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 16:55:01 +0800 -Subject: [PATCH] mips_readelf_w - -Upstream-Status: Pending [from debian] - -Rebase to 0.177 - -Signed-off-by: Hongxu Jia ---- - src/readelf.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/readelf.c b/src/readelf.c -index 2084fb1..5c02a9b 100644 ---- a/src/readelf.c -+++ b/src/readelf.c -@@ -11256,7 +11256,8 @@ print_debug (Dwfl_Module *dwflmod, Ebl *ebl, GElf_Ehdr *ehdr) - GElf_Shdr shdr_mem; - GElf_Shdr *shdr = gelf_getshdr (scn, &shdr_mem); - -- if (shdr != NULL && shdr->sh_type == SHT_PROGBITS) -+ if (shdr != NULL && ( -+ (shdr->sh_type == SHT_PROGBITS) || (shdr->sh_type == SHT_MIPS_DWARF))) - { - static const struct - { --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/elfutils/files/debian/testsuite-ignore-elflint.diff b/poky/meta/recipes-devtools/elfutils/files/debian/testsuite-ignore-elflint.diff deleted file mode 100644 index e6d7948c6..000000000 --- a/poky/meta/recipes-devtools/elfutils/files/debian/testsuite-ignore-elflint.diff +++ /dev/null @@ -1,52 +0,0 @@ -From 6393b0e57872b3ffedf0dbd6784cd29694010878 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 21 Aug 2019 16:59:01 +0800 -Subject: [PATCH 1/2] testsuite-ignore-elflint - -Upstream-Status: Pending [from debian] - -Rebase to 0.177 - -Signed-off-by: Hongxu Jia ---- - tests/run-elflint-self.sh | 2 +- - tests/test-subr.sh | 15 +++++++++++++++ - 2 files changed, 16 insertions(+), 1 deletion(-) - -diff --git a/tests/run-elflint-self.sh b/tests/run-elflint-self.sh -index 58fa7d0..85d21a5 100755 ---- a/tests/run-elflint-self.sh -+++ b/tests/run-elflint-self.sh -@@ -18,5 +18,5 @@ - - . $srcdir/test-subr.sh - --testrun_on_self ${abs_top_builddir}/src/elflint --quiet --gnu-ld -+testrun_on_self_skip ${abs_top_builddir}/src/elflint --quiet --gnu-ld - testrun_on_self_compressed ${abs_top_builddir}/src/elflint --quiet --gnu-ld -diff --git a/tests/test-subr.sh b/tests/test-subr.sh -index 09f428d..26f61f1 100644 ---- a/tests/test-subr.sh -+++ b/tests/test-subr.sh -@@ -201,3 +201,18 @@ testrun_on_self_quiet() - # Only exit if something failed - if test $exit_status != 0; then exit $exit_status; fi - } -+ -+# Same as testrun_on_self(), but skip on failure. -+testrun_on_self_skip() -+{ -+ exit_status=0 -+ -+ for file in $self_test_files; do -+ testrun $* $file \ -+ || { echo "*** failure in $* $file"; exit_status=77; } -+ done -+ -+ # Only exit if something failed -+ if test $exit_status != 0; then exit $exit_status; fi -+} -+ --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb index 9d1874c7b..54eb5e48a 100644 --- a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb +++ b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb @@ -8,8 +8,8 @@ DEPENDS_class-native = "hostperl-runtime-native" INHIBIT_DEFAULT_DEPS = "1" -SRCREV = "e78c96e5288993aaea3ec44e5c6ee755c668da79" -PV = "20200515+git${SRCPV}" +SRCREV = "696cd4a4eab1ee9fefbb7e38dbab291d741d0c5a" +PV = "20200621+git${SRCPV}" SRC_URI = "git://git.savannah.gnu.org/config.git \ file://gnu-configize.in" diff --git a/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb b/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb index 3c535e14c..72c988944 100644 --- a/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb +++ b/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb @@ -11,6 +11,9 @@ SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz SRC_URI[go_linux_amd64.sha256sum] = "aed845e4185a0b2a3c3d5e1d0a35491702c55889192bb9c30e67a3de6849c067" SRC_URI[go_linux_arm64.sha256sum] = "05dc46ada4e23a1f58e72349f7c366aae2e9c7a7f1e7653095538bc5bba5e077" +UPSTREAM_CHECK_URI = "https://golang.org/dl/" +UPSTREAM_CHECK_REGEX = "go(?P\d+(\.\d+)+)\.linux" + S = "${WORKDIR}/go" inherit goarch native diff --git a/poky/meta/recipes-devtools/jquery/jquery_3.5.1.bb b/poky/meta/recipes-devtools/jquery/jquery_3.5.1.bb index b4d7e80af..62f0dfb0c 100644 --- a/poky/meta/recipes-devtools/jquery/jquery_3.5.1.bb +++ b/poky/meta/recipes-devtools/jquery/jquery_3.5.1.bb @@ -2,12 +2,14 @@ SUMMARY = "jQuery is a fast, small, and feature-rich JavaScript library" HOMEPAGE = "https://jquery.com/" LICENSE = "MIT" SECTION = "devel" -LIC_FILES_CHKSUM = "file://${WORKDIR}/${BP}.js;beginline=8;endline=10;md5=ebd7bc5d23ab165188e526a0c65d24bb" +LIC_FILES_CHKSUM = "file://${S}/${BP}.js;beginline=8;endline=10;md5=ebd7bc5d23ab165188e526a0c65d24bb" +# unpack items to ${S} so the archiver can see them +# SRC_URI = "\ - https://code.jquery.com/${BP}.js;name=js \ - https://code.jquery.com/${BP}.min.js;name=min \ - https://code.jquery.com/${BP}.min.map;name=map \ + https://code.jquery.com/${BP}.js;name=js;subdir=${BP} \ + https://code.jquery.com/${BP}.min.js;name=min;subdir=${BP} \ + https://code.jquery.com/${BP}.min.map;name=map;subdir=${BP} \ " SRC_URI[js.sha256sum] = "416a3b2c3bf16d64f6b5b6d0f7b079df2267614dd6847fc2f3271b4409233c37" @@ -20,9 +22,9 @@ inherit allarch do_install() { install -d ${D}${datadir}/javascript/${BPN}/ - install -m 644 ${WORKDIR}/${BP}.js ${D}${datadir}/javascript/${BPN}/${BPN}.js - install -m 644 ${WORKDIR}/${BP}.min.js ${D}${datadir}/javascript/${BPN}/${BPN}.min.js - install -m 644 ${WORKDIR}/${BP}.min.map ${D}${datadir}/javascript/${BPN}/${BPN}.min.map + install -m 644 ${S}/${BP}.js ${D}${datadir}/javascript/${BPN}/${BPN}.js + install -m 644 ${S}/${BP}.min.js ${D}${datadir}/javascript/${BPN}/${BPN}.min.js + install -m 644 ${S}/${BP}.min.map ${D}${datadir}/javascript/${BPN}/${BPN}.min.map } PACKAGES = "${PN}" diff --git a/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch b/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch new file mode 100644 index 000000000..a45cfb61b --- /dev/null +++ b/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch @@ -0,0 +1,160 @@ +From 099016b7e8d70a6d5dd814e788bba08d33d48426 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann +Date: Mon, 4 May 2020 19:41:16 +0200 +Subject: [PATCH 1/3] Protect array_list_del_idx against size_t overflow. + +If the assignment of stop overflows due to idx and count being +larger than SIZE_T_MAX in sum, out of boundary access could happen. + +It takes invalid usage of this function for this to happen, but +I decided to add this check so array_list_del_idx is as safe against +bad usage as the other arraylist functions. + +Upstream-Status: Backport [https://github.com/json-c/json-c/commit/31243e4d1204ef78be34b0fcae73221eee6b83be] +CVE: CVE-2020-12762 +Signed-off-by: Chee Yang Lee + +--- + arraylist.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/arraylist.c b/arraylist.c +index 12ad8af6d3..e5524aca75 100644 +--- a/arraylist.c ++++ b/arraylist.c +@@ -136,6 +136,9 @@ int array_list_del_idx(struct array_list *arr, size_t idx, size_t count) + { + size_t i, stop; + ++ /* Avoid overflow in calculation with large indices. */ ++ if (idx > SIZE_T_MAX - count) ++ return -1; + stop = idx + count; + if (idx >= arr->length || stop > arr->length) + return -1; + +From 77d935b7ae7871a1940cd827e850e6063044ec45 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann +Date: Mon, 4 May 2020 19:46:45 +0200 +Subject: [PATCH 2/3] Prevent division by zero in linkhash. + +If a linkhash with a size of zero is created, then modulo operations +are prone to division by zero operations. + +Purely protective measure against bad usage. +--- + linkhash.c | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/linkhash.c b/linkhash.c +index 7ea58c0abf..f05cc38030 100644 +--- a/linkhash.c ++++ b/linkhash.c +@@ -12,6 +12,7 @@ + + #include "config.h" + ++#include + #include + #include + #include +@@ -499,6 +500,8 @@ struct lh_table *lh_table_new(int size, lh_entry_free_fn *free_fn, lh_hash_fn *h + int i; + struct lh_table *t; + ++ /* Allocate space for elements to avoid divisions by zero. */ ++ assert(size > 0); + t = (struct lh_table *)calloc(1, sizeof(struct lh_table)); + if (!t) + return NULL; + +From d07b91014986900a3a75f306d302e13e005e9d67 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann +Date: Mon, 4 May 2020 19:47:25 +0200 +Subject: [PATCH 3/3] Fix integer overflows. + +The data structures linkhash and printbuf are limited to 2 GB in size +due to a signed integer being used to track their current size. + +If too much data is added, then size variable can overflow, which is +an undefined behaviour in C programming language. + +Assuming that a signed int overflow just leads to a negative value, +like it happens on many sytems (Linux i686/amd64 with gcc), then +printbuf is vulnerable to an out of boundary write on 64 bit systems. +--- + linkhash.c | 7 +++++-- + printbuf.c | 19 ++++++++++++++++--- + 2 files changed, 21 insertions(+), 5 deletions(-) + +diff --git a/linkhash.c b/linkhash.c +index f05cc38030..51e90b13a2 100644 +--- a/linkhash.c ++++ b/linkhash.c +@@ -580,9 +580,12 @@ int lh_table_insert_w_hash(struct lh_table *t, const void *k, const void *v, con + { + unsigned long n; + +- if (t->count >= t->size * LH_LOAD_FACTOR) +- if (lh_table_resize(t, t->size * 2) != 0) ++ if (t->count >= t->size * LH_LOAD_FACTOR) { ++ /* Avoid signed integer overflow with large tables. */ ++ int new_size = INT_MAX / 2 < t->size ? t->size * 2 : INT_MAX; ++ if (t->size == INT_MAX || lh_table_resize(t, new_size) != 0) + return -1; ++ } + + n = h % t->size; + +diff --git a/printbuf.c b/printbuf.c +index 976c12dde5..00822fac4f 100644 +--- a/printbuf.c ++++ b/printbuf.c +@@ -15,6 +15,7 @@ + + #include "config.h" + ++#include + #include + #include + #include +@@ -65,10 +66,16 @@ static int printbuf_extend(struct printbuf *p, int min_size) + + if (p->size >= min_size) + return 0; +- +- new_size = p->size * 2; +- if (new_size < min_size + 8) ++ /* Prevent signed integer overflows with large buffers. */ ++ if (min_size > INT_MAX - 8) ++ return -1; ++ if (p->size > INT_MAX / 2) + new_size = min_size + 8; ++ else { ++ new_size = p->size * 2; ++ if (new_size < min_size + 8) ++ new_size = min_size + 8; ++ } + #ifdef PRINTBUF_DEBUG + MC_DEBUG("printbuf_memappend: realloc " + "bpos=%d min_size=%d old_size=%d new_size=%d\n", +@@ -83,6 +90,9 @@ static int printbuf_extend(struct printbuf *p, int min_size) + + int printbuf_memappend(struct printbuf *p, const char *buf, int size) + { ++ /* Prevent signed integer overflows with large buffers. */ ++ if (size > INT_MAX - p->bpos - 1) ++ return -1; + if (p->size <= p->bpos + size + 1) + { + if (printbuf_extend(p, p->bpos + size + 1) < 0) +@@ -100,6 +110,9 @@ int printbuf_memset(struct printbuf *pb, int offset, int charvalue, int len) + + if (offset == -1) + offset = pb->bpos; ++ /* Prevent signed integer overflows with large buffers. */ ++ if (len > INT_MAX - offset) ++ return -1; + size_needed = offset + len; + if (pb->size < size_needed) + { diff --git a/poky/meta/recipes-devtools/json-c/json-c_0.14.bb b/poky/meta/recipes-devtools/json-c/json-c_0.14.bb index 99fde873b..1d501d129 100644 --- a/poky/meta/recipes-devtools/json-c/json-c_0.14.bb +++ b/poky/meta/recipes-devtools/json-c/json-c_0.14.bb @@ -4,7 +4,10 @@ HOMEPAGE = "https://github.com/json-c/json-c/wiki" LICENSE = "MIT" LIC_FILES_CHKSUM = "file://COPYING;md5=de54b60fbbc35123ba193fea8ee216f2" -SRC_URI = "https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz" +SRC_URI = "https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz \ + file://CVE-2020-12762.patch \ +" + SRC_URI[sha256sum] = "b377de08c9b23ca3b37d9a9828107dff1de5ce208ff4ebb35005a794f30c6870" UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" diff --git a/poky/meta/recipes-devtools/libdnf/libdnf/0001-Add-WITH_TESTS-option.patch b/poky/meta/recipes-devtools/libdnf/libdnf/0001-Add-WITH_TESTS-option.patch index ac9400c48..384add5e1 100644 --- a/poky/meta/recipes-devtools/libdnf/libdnf/0001-Add-WITH_TESTS-option.patch +++ b/poky/meta/recipes-devtools/libdnf/libdnf/0001-Add-WITH_TESTS-option.patch @@ -1,4 +1,4 @@ -From 56fa2bbdbd29377a6ef0d0b7aadbac8b5ea8c95b Mon Sep 17 00:00:00 2001 +From 7d60d62b5c5374156703ca7262fb2f85ec5db119 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Tue, 6 Nov 2018 13:54:43 +0100 Subject: [PATCH] Add WITH_TESTS option @@ -14,7 +14,7 @@ Signed-off-by: Alexander Kanavin 2 files changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt -index 881152a..965c992 100644 +index 9c6e1b2f..8599c540 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -33,6 +33,7 @@ option(WITH_MAN "Enables hawkey man page generation" ON) @@ -24,8 +24,8 @@ index 881152a..965c992 100644 +option(WITH_TESTS "Enables unit tests" ON) - # load pkg-config first; it's required by other modules -@@ -165,8 +166,10 @@ endif() + # build options - debugging +@@ -179,8 +180,10 @@ endif() # build tests @@ -37,7 +37,7 @@ index 881152a..965c992 100644 add_subdirectory(python/hawkey) endif() diff --git a/python/hawkey/CMakeLists.txt b/python/hawkey/CMakeLists.txt -index d964534..84d1720 100644 +index d9645346..84d17204 100644 --- a/python/hawkey/CMakeLists.txt +++ b/python/hawkey/CMakeLists.txt @@ -50,4 +50,6 @@ target_link_libraries(_hawkeymodule ${PYTHON_LIBRARY}) diff --git a/poky/meta/recipes-devtools/libdnf/libdnf/0001-Use-single-quotes-around-string-literals-used-in-SQL.patch b/poky/meta/recipes-devtools/libdnf/libdnf/0001-Use-single-quotes-around-string-literals-used-in-SQL.patch deleted file mode 100644 index 6be484fea..000000000 --- a/poky/meta/recipes-devtools/libdnf/libdnf/0001-Use-single-quotes-around-string-literals-used-in-SQL.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 96ca7d0049461df2293dd0000edcbc69b64255e0 Mon Sep 17 00:00:00 2001 -From: Peter Kjellerstedt -Date: Sun, 3 May 2020 22:40:39 +0200 -Subject: [PATCH] Use single-quotes around string literals used in SQL - statements - -If sqlite is built with -DSQLITE_DQS=0 in accordance with -https://sqlite.org/quirks.html#dblquote, migration to version 1.2 of the -history database would fail with: - - History database cannot be created: /var/lib/dnf/history.sqlite. - Error: SQLite error on ":memory:": Executing an SQL statement failed: - no such column: 1.2 - -Upstream-Status: Submitted [https://github.com/rpm-software-management/libdnf/pull/951] -Signed-off-by: Peter Kjellerstedt ---- - libdnf/transaction/sql/migrate_tables_1_2.sql | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/libdnf/transaction/sql/migrate_tables_1_2.sql b/libdnf/transaction/sql/migrate_tables_1_2.sql -index 49b75315..f80ad1c4 100644 ---- a/libdnf/transaction/sql/migrate_tables_1_2.sql -+++ b/libdnf/transaction/sql/migrate_tables_1_2.sql -@@ -1,9 +1,9 @@ - R"**( - BEGIN TRANSACTION; - ALTER TABLE trans -- ADD comment TEXT DEFAULT ""; -+ ADD comment TEXT DEFAULT ''; - UPDATE config -- SET value = "1.2" -+ SET value = '1.2' - WHERE key = 'version'; - COMMIT; - )**" diff --git a/poky/meta/recipes-devtools/libdnf/libdnf_0.47.0.bb b/poky/meta/recipes-devtools/libdnf/libdnf_0.47.0.bb deleted file mode 100644 index 1b06d66b1..000000000 --- a/poky/meta/recipes-devtools/libdnf/libdnf_0.47.0.bb +++ /dev/null @@ -1,34 +0,0 @@ -SUMMARY = "Library providing simplified C and Python API to libsolv" -LICENSE = "LGPLv2.1" -LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" - -SRC_URI = "git://github.com/rpm-software-management/libdnf;branch=dnf-4-master \ - file://0001-FindGtkDoc.cmake-drop-the-requirement-for-GTKDOC_SCA.patch \ - file://0004-Set-libsolv-variables-with-pkg-config-cmake-s-own-mo.patch \ - file://0001-Get-parameters-for-both-libsolv-and-libsolvext-libdn.patch \ - file://0001-Add-WITH_TESTS-option.patch \ - file://0001-Look-fo-sphinx-only-if-documentation-is-actually-ena.patch \ - file://0001-Use-single-quotes-around-string-literals-used-in-SQL.patch \ - " - -SRCREV = "8330eea6985c4e4b53796f858de5b6b38b1ddf5c" -UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+)" - -S = "${WORKDIR}/git" - -DEPENDS = "glib-2.0 libsolv libcheck librepo rpm gtk-doc libmodulemd json-c swig-native" - -inherit gtk-doc gobject-introspection cmake pkgconfig distutils3-base - -EXTRA_OECMAKE = " -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} -DWITH_MAN=OFF -DPYTHON_DESIRED=3 \ - ${@bb.utils.contains('GI_DATA_ENABLED', 'True', '-DWITH_GIR=ON', '-DWITH_GIR=OFF', d)} \ - -DWITH_TESTS=OFF \ - -DWITH_ZCHUNK=OFF \ - -DWITH_HTML=OFF \ - " -EXTRA_OECMAKE_append_class-native = " -DWITH_GIR=OFF" -EXTRA_OECMAKE_append_class-nativesdk = " -DWITH_GIR=OFF" - -BBCLASSEXTEND = "native nativesdk" -PNBLACKLIST[libdnf] ?= "${@bb.utils.contains('PACKAGE_CLASSES', 'package_rpm', '', 'Does not build without package_rpm in PACKAGE_CLASSES due disabled rpm support in libsolv', d)}" - diff --git a/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb b/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb new file mode 100644 index 000000000..947b2f234 --- /dev/null +++ b/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb @@ -0,0 +1,33 @@ +SUMMARY = "Library providing simplified C and Python API to libsolv" +LICENSE = "LGPLv2.1" +LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" + +SRC_URI = "git://github.com/rpm-software-management/libdnf;branch=dnf-4-master \ + file://0001-FindGtkDoc.cmake-drop-the-requirement-for-GTKDOC_SCA.patch \ + file://0004-Set-libsolv-variables-with-pkg-config-cmake-s-own-mo.patch \ + file://0001-Get-parameters-for-both-libsolv-and-libsolvext-libdn.patch \ + file://0001-Add-WITH_TESTS-option.patch \ + file://0001-Look-fo-sphinx-only-if-documentation-is-actually-ena.patch \ + " + +SRCREV = "46a28d0cf09277fffc11392e5e362a2eda0d53a8" +UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+)" + +S = "${WORKDIR}/git" + +DEPENDS = "glib-2.0 libsolv libcheck librepo rpm gtk-doc libmodulemd json-c swig-native" + +inherit gtk-doc gobject-introspection cmake pkgconfig distutils3-base + +EXTRA_OECMAKE = " -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} -DWITH_MAN=OFF -DPYTHON_DESIRED=3 \ + ${@bb.utils.contains('GI_DATA_ENABLED', 'True', '-DWITH_GIR=ON', '-DWITH_GIR=OFF', d)} \ + -DWITH_TESTS=OFF \ + -DWITH_ZCHUNK=OFF \ + -DWITH_HTML=OFF \ + " +EXTRA_OECMAKE_append_class-native = " -DWITH_GIR=OFF" +EXTRA_OECMAKE_append_class-nativesdk = " -DWITH_GIR=OFF" + +BBCLASSEXTEND = "native nativesdk" +PNBLACKLIST[libdnf] ?= "${@bb.utils.contains('PACKAGE_CLASSES', 'package_rpm', '', 'Does not build without package_rpm in PACKAGE_CLASSES due disabled rpm support in libsolv', d)}" + diff --git a/poky/meta/recipes-devtools/meson/meson.inc b/poky/meta/recipes-devtools/meson/meson.inc index a0b54f57d..ffa17b306 100644 --- a/poky/meta/recipes-devtools/meson/meson.inc +++ b/poky/meta/recipes-devtools/meson/meson.inc @@ -15,9 +15,8 @@ SRC_URI = "https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${P file://cross-prop-default.patch \ file://0001-modules-python.py-do-not-substitute-python-s-install.patch \ file://0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch \ - file://0001-boost-Always-sort-shared-before-static-fixes-7171.patch \ " -SRC_URI[sha256sum] = "a7716eeae8f8dff002e4147642589ab6496ff839e4376a5aed761f83c1fa0455" +SRC_URI[sha256sum] = "f2bdf4cf0694e696b48261cdd14380fb1d0fe33d24744d8b2df0c12f33ebb662" SRC_URI_append_class-native = " \ file://0001-Make-CPU-family-warnings-fatal.patch \ diff --git a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch index 01a8bb3d5..39b1af52e 100644 --- a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch +++ b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch @@ -1,4 +1,4 @@ -From 830db2c7a136b2446d740c9ca025374352ffd16d Mon Sep 17 00:00:00 2001 +From 62c415eedb62905de76e2e0bbd156a947705cab2 Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Tue, 3 Jul 2018 13:59:09 +0100 Subject: [PATCH] Make CPU family warnings fatal diff --git a/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch b/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch deleted file mode 100644 index 217218180..000000000 --- a/poky/meta/recipes-devtools/meson/meson/0001-boost-Always-sort-shared-before-static-fixes-7171.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 5862ad6965c60caa861dfdcd29e499c34c4d00da Mon Sep 17 00:00:00 2001 -From: Daniel Mensinger -Date: Thu, 21 May 2020 13:35:27 +0200 -Subject: [PATCH] boost: Always sort shared before static (fixes #7171) - -Upstream-Status: Backport [https://github.com/mesonbuild/meson/commit/5862ad6965c60caa861dfdcd29e499c34c4d00da] - -Signed-off-by: Andrew Geissler ---- - mesonbuild/dependencies/boost.py | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/mesonbuild/dependencies/boost.py b/mesonbuild/dependencies/boost.py -index 6e85c534..38497041 100644 ---- a/mesonbuild/dependencies/boost.py -+++ b/mesonbuild/dependencies/boost.py -@@ -189,13 +189,13 @@ class BoostLibraryFile(): - def __lt__(self, other: T.Any) -> bool: - if isinstance(other, BoostLibraryFile): - return ( -- self.mod_name, self.version_lib, self.arch, self.static, -+ self.mod_name, self.static, self.version_lib, self.arch, - not self.mt, not self.runtime_static, - not self.debug, self.runtime_debug, self.python_debug, - self.stlport, self.deprecated_iostreams, - self.name, - ) < ( -- other.mod_name, other.version_lib, other.arch, other.static, -+ other.mod_name, other.static, other.version_lib, other.arch, - not other.mt, not other.runtime_static, - not other.debug, other.runtime_debug, other.python_debug, - other.stlport, other.deprecated_iostreams, --- -2.26.2 - diff --git a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch index 246c4d7be..bb06d9924 100644 --- a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch +++ b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch @@ -1,4 +1,4 @@ -From 45df8f0a71c6b60cb98a336f8690af04965dfa9b Mon Sep 17 00:00:00 2001 +From d976d5a8abd6d42edf794d2a4c211fc6697fb14c Mon Sep 17 00:00:00 2001 From: Peter Kjellerstedt Date: Thu, 26 Jul 2018 16:32:49 +0200 Subject: [PATCH] Support building allarch recipes again diff --git a/poky/meta/recipes-devtools/meson/meson_0.54.2.bb b/poky/meta/recipes-devtools/meson/meson_0.54.2.bb deleted file mode 100644 index de9b905c1..000000000 --- a/poky/meta/recipes-devtools/meson/meson_0.54.2.bb +++ /dev/null @@ -1,4 +0,0 @@ -include meson.inc - -BBCLASSEXTEND = "native" - diff --git a/poky/meta/recipes-devtools/meson/meson_0.54.3.bb b/poky/meta/recipes-devtools/meson/meson_0.54.3.bb new file mode 100644 index 000000000..de9b905c1 --- /dev/null +++ b/poky/meta/recipes-devtools/meson/meson_0.54.3.bb @@ -0,0 +1,4 @@ +include meson.inc + +BBCLASSEXTEND = "native" + diff --git a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.2.bb b/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.2.bb deleted file mode 100644 index 67add2c25..000000000 --- a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.2.bb +++ /dev/null @@ -1,65 +0,0 @@ -include meson.inc - -inherit nativesdk -inherit siteinfo - -SRC_URI += "file://meson-setup.py \ - file://meson-wrapper" - -def meson_endian(prefix, d): - arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS") - sitedata = siteinfo_data_for_machine(arch, os, d) - if "endian-little" in sitedata: - return "little" - elif "endian-big" in sitedata: - return "big" - else: - bb.fatal("Cannot determine endianism for %s-%s" % (arch, os)) - -# The cross file logic is similar but not identical to that in meson.bbclass, -# since it's generating for an SDK rather than a cross-compile. Important -# differences are: -# - We can't set vars like CC, CXX, etc. yet because they will be filled in with -# real paths by meson-setup.sh when the SDK is extracted. -# - Some overrides aren't needed, since the SDK injects paths that take care of -# them. -do_install_append() { - install -d ${D}${datadir}/meson - cat >${D}${datadir}/meson/meson.cross.template <${D}${datadir}/meson/meson.cross.template < - -Index: opkg-utils-0.4.2/opkg-build -=================================================================== ---- opkg-utils-0.4.2.orig/opkg-build -+++ opkg-utils-0.4.2/opkg-build -@@ -305,8 +305,10 @@ if [ ! -z "$SOURCE_DATE_EPOCH" ]; then - mtime_args="--mtime=@$build_date --clamp-mtime" - fi - --( cd $pkg_dir/$CONTROL && find . -type f > $tmp_dir/control_list ) --( cd $pkg_dir && find . -path ./$CONTROL -prune -o -path . -o -print > $tmp_dir/file_list ) -+export LANG=C -+export LC_ALL=C -+( cd $pkg_dir/$CONTROL && find . -type f | sort > $tmp_dir/control_list ) -+( cd $pkg_dir && find . -path ./$CONTROL -prune -o -path . -o -print | sort > $tmp_dir/file_list ) - ( cd $pkg_dir && tar $ogargs $tsortargs --no-recursion $mtime_args -c $tarformat -T $tmp_dir/file_list | $compressor $compressorargs > $tmp_dir/data.tar.$cext ) - ( cd $pkg_dir/$CONTROL && tar $ogargs $tsortargs --no-recursion --mtime=@$build_date -c $tarformat -T $tmp_dir/control_list | gzip $zipargs > $tmp_dir/control.tar.gz ) - rm $tmp_dir/file_list diff --git a/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb b/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb deleted file mode 100644 index 931524019..000000000 --- a/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.2.bb +++ /dev/null @@ -1,66 +0,0 @@ -SUMMARY = "Additional utilities for the opkg package manager" -SUMMARY_update-alternatives-opkg = "Utility for managing the alternatives system" -SECTION = "base" -HOMEPAGE = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ - file://opkg.py;beginline=2;endline=18;md5=ffa11ff3c15eb31c6a7ceaa00cc9f986" -PROVIDES += "${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'virtual/update-alternatives', '', d)}" - -SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \ - file://fix-reproducibility.patch \ -" -UPSTREAM_CHECK_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils/refs/" - -SRC_URI[md5sum] = "cc210650644fcb9bba06ad5ec95a63ec" -SRC_URI[sha256sum] = "5929ad87d541789e0b82d626db01a1201ac48df6f49f2262fcfb86cf815e5d6c" - -TARGET_CC_ARCH += "${LDFLAGS}" - -RDEPENDS_${PN} += "bash" - -inherit perlnative - -# For native builds we use the host Python -PYTHONRDEPS = "python3 python3-shell python3-io python3-math python3-crypt python3-logging python3-fcntl python3-pickle python3-compression python3-stringold" -PYTHONRDEPS_class-native = "" - -PACKAGECONFIG = "python update-alternatives" -PACKAGECONFIG[python] = ",,,${PYTHONRDEPS}" -PACKAGECONFIG[update-alternatives] = ",,," - -do_install() { - oe_runmake PREFIX=${prefix} DESTDIR=${D} install - if ! ${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'true', 'false', d)}; then - rm -f "${D}${bindir}/update-alternatives" - fi -} - -do_install_append_class-target() { - if ! ${@bb.utils.contains('PACKAGECONFIG', 'python', 'true', 'false', d)}; then - grep -lZ "/usr/bin/env.*python" ${D}${bindir}/* | xargs -0 rm - fi - - if [ -e "${D}${bindir}/update-alternatives" ]; then - sed -i ${D}${bindir}/update-alternatives -e 's,/usr/bin,${bindir},g; s,/usr/lib,${nonarch_libdir},g' - fi -} - -# These are empty and will pull python3-dev into images where it wouldn't -# have been otherwise, so don't generate them. -PACKAGES_remove = "${PN}-dev ${PN}-staticdev" - -PACKAGES =+ "update-alternatives-opkg" -FILES_update-alternatives-opkg = "${bindir}/update-alternatives" -RPROVIDES_update-alternatives-opkg = "update-alternatives update-alternatives-cworth" -RREPLACES_update-alternatives-opkg = "update-alternatives-cworth" -RCONFLICTS_update-alternatives-opkg = "update-alternatives-cworth" - -pkg_postrm_update-alternatives-opkg() { - rm -rf $D${nonarch_libdir}/opkg/alternatives - rmdir $D${nonarch_libdir}/opkg || true -} - -BBCLASSEXTEND = "native nativesdk" - -CLEANBROKEN = "1" diff --git a/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.3.bb b/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.3.bb new file mode 100644 index 000000000..f9df58a29 --- /dev/null +++ b/poky/meta/recipes-devtools/opkg-utils/opkg-utils_0.4.3.bb @@ -0,0 +1,65 @@ +SUMMARY = "Additional utilities for the opkg package manager" +SUMMARY_update-alternatives-opkg = "Utility for managing the alternatives system" +SECTION = "base" +HOMEPAGE = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ + file://opkg.py;beginline=2;endline=18;md5=ffa11ff3c15eb31c6a7ceaa00cc9f986" +PROVIDES += "${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'virtual/update-alternatives', '', d)}" + +SRC_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/${BPN}/snapshot/${BPN}-${PV}.tar.gz \ +" +UPSTREAM_CHECK_URI = "http://git.yoctoproject.org/cgit/cgit.cgi/opkg-utils/refs/" + +SRC_URI[md5sum] = "7bbadb3c381f3ea935b21d3bb8cc4671" +SRC_URI[sha256sum] = "046517600fb0aed6c4645edefe02281f4fa2f1c02f71596152d93172452c0b01" + +TARGET_CC_ARCH += "${LDFLAGS}" + +RDEPENDS_${PN} += "bash" + +inherit perlnative + +# For native builds we use the host Python +PYTHONRDEPS = "python3 python3-shell python3-io python3-math python3-crypt python3-logging python3-fcntl python3-pickle python3-compression python3-stringold" +PYTHONRDEPS_class-native = "" + +PACKAGECONFIG = "python update-alternatives" +PACKAGECONFIG[python] = ",,,${PYTHONRDEPS}" +PACKAGECONFIG[update-alternatives] = ",,," + +do_install() { + oe_runmake PREFIX=${prefix} DESTDIR=${D} install + if ! ${@bb.utils.contains('PACKAGECONFIG', 'update-alternatives', 'true', 'false', d)}; then + rm -f "${D}${bindir}/update-alternatives" + fi +} + +do_install_append_class-target() { + if ! ${@bb.utils.contains('PACKAGECONFIG', 'python', 'true', 'false', d)}; then + grep -lZ "/usr/bin/env.*python" ${D}${bindir}/* | xargs -0 rm + fi + + if [ -e "${D}${bindir}/update-alternatives" ]; then + sed -i ${D}${bindir}/update-alternatives -e 's,/usr/bin,${bindir},g; s,/usr/lib,${nonarch_libdir},g' + fi +} + +# These are empty and will pull python3-dev into images where it wouldn't +# have been otherwise, so don't generate them. +PACKAGES_remove = "${PN}-dev ${PN}-staticdev" + +PACKAGES =+ "update-alternatives-opkg" +FILES_update-alternatives-opkg = "${bindir}/update-alternatives" +RPROVIDES_update-alternatives-opkg = "update-alternatives update-alternatives-cworth" +RREPLACES_update-alternatives-opkg = "update-alternatives-cworth" +RCONFLICTS_update-alternatives-opkg = "update-alternatives-cworth" + +pkg_postrm_update-alternatives-opkg() { + rm -rf $D${nonarch_libdir}/opkg/alternatives + rmdir $D${nonarch_libdir}/opkg || true +} + +BBCLASSEXTEND = "native nativesdk" + +CLEANBROKEN = "1" diff --git a/poky/meta/recipes-devtools/opkg/opkg_0.4.2.bb b/poky/meta/recipes-devtools/opkg/opkg_0.4.2.bb deleted file mode 100644 index 66a74dc5e..000000000 --- a/poky/meta/recipes-devtools/opkg/opkg_0.4.2.bb +++ /dev/null @@ -1,74 +0,0 @@ -SUMMARY = "Open Package Manager" -SUMMARY_libopkg = "Open Package Manager library" -SECTION = "base" -HOMEPAGE = "http://code.google.com/p/opkg/" -BUGTRACKER = "http://code.google.com/p/opkg/issues/list" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ - file://src/opkg.c;beginline=4;endline=18;md5=d6200b0f2b41dee278aa5fad333eecae" - -DEPENDS = "libarchive" - -PE = "1" - -SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz \ - file://opkg.conf \ - file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \ - file://run-ptest \ -" - -SRC_URI[md5sum] = "bd13e5dfc1c2536f0c7b2e15f795278e" -SRC_URI[sha256sum] = "86887852c43457edfff9d8b6d9520f3f1cdd55f25eb600a6eb31e1c4e151e106" - -# This needs to be before ptest inherit, otherwise all ptest files end packaged -# in libopkg package if OPKGLIBDIR == libdir, because default -# PTEST_PATH ?= "${libdir}/${BPN}/ptest" -PACKAGES =+ "libopkg" - -inherit autotools pkgconfig ptest - -target_localstatedir := "${localstatedir}" -OPKGLIBDIR ??= "${target_localstatedir}/lib" - -PACKAGECONFIG ??= "libsolv" - -PACKAGECONFIG[gpg] = "--enable-gpg,--disable-gpg,\ - gnupg gpgme libgpg-error,\ - ${@ "gnupg" if ("native" in d.getVar("PN")) else "gnupg-gpg"}\ - " -PACKAGECONFIG[curl] = "--enable-curl,--disable-curl,curl" -PACKAGECONFIG[ssl-curl] = "--enable-ssl-curl,--disable-ssl-curl,curl openssl" -PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" -PACKAGECONFIG[sha256] = "--enable-sha256,--disable-sha256" -PACKAGECONFIG[libsolv] = "--with-libsolv,--without-libsolv,libsolv" - -EXTRA_OECONF += " --disable-pathfinder" -EXTRA_OECONF_class-native = "--localstatedir=/${@os.path.relpath('${localstatedir}', '${STAGING_DIR_NATIVE}')} --sysconfdir=/${@os.path.relpath('${sysconfdir}', '${STAGING_DIR_NATIVE}')}" - -do_install_append () { - install -d ${D}${sysconfdir}/opkg - install -m 0644 ${WORKDIR}/opkg.conf ${D}${sysconfdir}/opkg/opkg.conf - echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf - - # We need to create the lock directory - install -d ${D}${OPKGLIBDIR}/opkg -} - -do_install_ptest () { - sed -i -e '/@echo $^/d' ${D}${PTEST_PATH}/tests/Makefile - sed -i -e '/@PYTHONPATH=. $(PYTHON) $^/a\\t@if [ "$$?" != "0" ];then echo "FAIL:"$^;else echo "PASS:"$^;fi' ${D}${PTEST_PATH}/tests/Makefile -} - -RDEPENDS_${PN} = "${VIRTUAL-RUNTIME_update-alternatives} opkg-arch-config libarchive" -RDEPENDS_${PN}_class-native = "" -RDEPENDS_${PN}_class-nativesdk = "" -RDEPENDS_${PN}-ptest += "make binutils python3-core python3-compression" -RREPLACES_${PN} = "opkg-nogpg opkg-collateral" -RCONFLICTS_${PN} = "opkg-collateral" -RPROVIDES_${PN} = "opkg-collateral" - -FILES_libopkg = "${libdir}/*.so.* ${OPKGLIBDIR}/opkg/" - -BBCLASSEXTEND = "native nativesdk" - -CONFFILES_${PN} = "${sysconfdir}/opkg/opkg.conf" diff --git a/poky/meta/recipes-devtools/opkg/opkg_0.4.3.bb b/poky/meta/recipes-devtools/opkg/opkg_0.4.3.bb new file mode 100644 index 000000000..46b7aa252 --- /dev/null +++ b/poky/meta/recipes-devtools/opkg/opkg_0.4.3.bb @@ -0,0 +1,74 @@ +SUMMARY = "Open Package Manager" +SUMMARY_libopkg = "Open Package Manager library" +SECTION = "base" +HOMEPAGE = "http://code.google.com/p/opkg/" +BUGTRACKER = "http://code.google.com/p/opkg/issues/list" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ + file://src/opkg.c;beginline=4;endline=18;md5=d6200b0f2b41dee278aa5fad333eecae" + +DEPENDS = "libarchive" + +PE = "1" + +SRC_URI = "http://downloads.yoctoproject.org/releases/${BPN}/${BPN}-${PV}.tar.gz \ + file://opkg.conf \ + file://0001-opkg_conf-create-opkg.lock-in-run-instead-of-var-run.patch \ + file://run-ptest \ +" + +SRC_URI[md5sum] = "86ec5eee9362aca0990994a402e077e9" +SRC_URI[sha256sum] = "dda452854bc0cd1334f7ba18a66003d1c12a98600c894111b56919b1ea434718" + +# This needs to be before ptest inherit, otherwise all ptest files end packaged +# in libopkg package if OPKGLIBDIR == libdir, because default +# PTEST_PATH ?= "${libdir}/${BPN}/ptest" +PACKAGES =+ "libopkg" + +inherit autotools pkgconfig ptest + +target_localstatedir := "${localstatedir}" +OPKGLIBDIR ??= "${target_localstatedir}/lib" + +PACKAGECONFIG ??= "libsolv" + +PACKAGECONFIG[gpg] = "--enable-gpg,--disable-gpg,\ + gnupg gpgme libgpg-error,\ + ${@ "gnupg" if ("native" in d.getVar("PN")) else "gnupg-gpg"}\ + " +PACKAGECONFIG[curl] = "--enable-curl,--disable-curl,curl" +PACKAGECONFIG[ssl-curl] = "--enable-ssl-curl,--disable-ssl-curl,curl openssl" +PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" +PACKAGECONFIG[sha256] = "--enable-sha256,--disable-sha256" +PACKAGECONFIG[libsolv] = "--with-libsolv,--without-libsolv,libsolv" + +EXTRA_OECONF += " --disable-pathfinder" +EXTRA_OECONF_class-native = "--localstatedir=/${@os.path.relpath('${localstatedir}', '${STAGING_DIR_NATIVE}')} --sysconfdir=/${@os.path.relpath('${sysconfdir}', '${STAGING_DIR_NATIVE}')}" + +do_install_append () { + install -d ${D}${sysconfdir}/opkg + install -m 0644 ${WORKDIR}/opkg.conf ${D}${sysconfdir}/opkg/opkg.conf + echo "option lists_dir ${OPKGLIBDIR}/opkg/lists" >>${D}${sysconfdir}/opkg/opkg.conf + + # We need to create the lock directory + install -d ${D}${OPKGLIBDIR}/opkg +} + +do_install_ptest () { + sed -i -e '/@echo $^/d' ${D}${PTEST_PATH}/tests/Makefile + sed -i -e '/@PYTHONPATH=. $(PYTHON) $^/a\\t@if [ "$$?" != "0" ];then echo "FAIL:"$^;else echo "PASS:"$^;fi' ${D}${PTEST_PATH}/tests/Makefile +} + +RDEPENDS_${PN} = "${VIRTUAL-RUNTIME_update-alternatives} opkg-arch-config libarchive" +RDEPENDS_${PN}_class-native = "" +RDEPENDS_${PN}_class-nativesdk = "" +RDEPENDS_${PN}-ptest += "make binutils python3-core python3-compression" +RREPLACES_${PN} = "opkg-nogpg opkg-collateral" +RCONFLICTS_${PN} = "opkg-collateral" +RPROVIDES_${PN} = "opkg-collateral" + +FILES_libopkg = "${libdir}/*.so.* ${OPKGLIBDIR}/opkg/" + +BBCLASSEXTEND = "native nativesdk" + +CONFFILES_${PN} = "${sysconfdir}/opkg/opkg.conf" diff --git a/poky/meta/recipes-devtools/perl/files/0001-PATCH-perl-134117-Close-DATA-in-loc_tools.pl.patch b/poky/meta/recipes-devtools/perl/files/0001-PATCH-perl-134117-Close-DATA-in-loc_tools.pl.patch deleted file mode 100644 index 79cae0d6f..000000000 --- a/poky/meta/recipes-devtools/perl/files/0001-PATCH-perl-134117-Close-DATA-in-loc_tools.pl.patch +++ /dev/null @@ -1,30 +0,0 @@ -From a04a75f20f03aa08ce8118b3b0b3f93eb3e997c5 Mon Sep 17 00:00:00 2001 -From: Richard Leach -Date: Sun, 19 May 2019 20:16:41 +0000 -Subject: [PATCH] PATCH: [perl #134117] Close DATA in loc_tools.pl - -This prevents unexpected text and fixes test lib/warnings.t - -Upstream-Status: Backport [a04a75f20f03aa08ce8118b3b0b3f93eb3e997c5] - -Signed-off-by: Matthew Zeng - ---- - t/loc_tools.pl | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/t/loc_tools.pl b/t/loc_tools.pl -index c76e29388a..beebd98d88 100644 ---- a/t/loc_tools.pl -+++ b/t/loc_tools.pl -@@ -421,7 +421,7 @@ sub find_locales ($;$) { - } - - # The rest of the locales are in this file. -- push @Data, ; -+ push @Data, ; close DATA; - - foreach my $line (@Data) { - my ($locale_name, $language_codes, $country_codes, $encodings) = --- -2.25.0 diff --git a/poky/meta/recipes-devtools/perl/files/0001-enc2xs-Add-environment-variable-to-suppress-comments.patch b/poky/meta/recipes-devtools/perl/files/0001-enc2xs-Add-environment-variable-to-suppress-comments.patch deleted file mode 100644 index 1a531072a..000000000 --- a/poky/meta/recipes-devtools/perl/files/0001-enc2xs-Add-environment-variable-to-suppress-comments.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 31a2c5555f9ef32f35d7d5ce1fd09a010ba5f5c6 Mon Sep 17 00:00:00 2001 -From: Joshua Watt -Date: Mon, 17 Jun 2019 10:47:15 -0500 -Subject: [PATCH 1/2] enc2xs: Add environment variable to suppress comments - -Comment generation in enc2xs can now be suppressed by setting the -ENC2XS_NO_COMMENTS environment variable. This allows enc2xs to produce -reproducible output by omitting the name of the generating program. - -Signed-off-by: Joshua Watt -Upstream-Status: Backport [https://github.com/dankogai/p5-encode/pull/145] ---- - cpan/Encode/bin/enc2xs | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/cpan/Encode/bin/enc2xs b/cpan/Encode/bin/enc2xs -index 619b64b757..bfce9ee735 100644 ---- a/cpan/Encode/bin/enc2xs -+++ b/cpan/Encode/bin/enc2xs -@@ -144,6 +144,7 @@ getopts('CM:SQqOo:f:n:v',\%opt); - $opt{M} and make_makefile_pl($opt{M}, @ARGV); - $opt{C} and make_configlocal_pm($opt{C}, @ARGV); - $opt{v} ||= $ENV{ENC2XS_VERBOSE}; -+$opt{q} ||= $ENV{ENC2XS_NO_COMMENTS}; - - sub verbose { - print STDERR @_ if $opt{v}; --- -2.21.0 - diff --git a/poky/meta/recipes-devtools/perl/files/perl-rdepends.txt b/poky/meta/recipes-devtools/perl/files/perl-rdepends.txt index 9ecfce001..e7cd55198 100644 --- a/poky/meta/recipes-devtools/perl/files/perl-rdepends.txt +++ b/poky/meta/recipes-devtools/perl/files/perl-rdepends.txt @@ -59,7 +59,6 @@ RDEPENDS_perl-module-archive-tar += "perl-module-io-file" RDEPENDS_perl-module-archive-tar += "perl-module-io-zlib" RDEPENDS_perl-module-archive-tar += "perl-module-strict" RDEPENDS_perl-module-archive-tar += "perl-module-vars" -RDEPENDS_perl-module-arybase += "perl-module-xsloader" RDEPENDS_perl-module-attribute-handlers += "perl-module-strict" RDEPENDS_perl-module-attribute-handlers += "perl-module-warnings" RDEPENDS_perl-module-attributes += "perl-module-exporter" @@ -109,9 +108,6 @@ RDEPENDS_perl-module-b-concise += "perl-module-exporter" RDEPENDS_perl-module-b-concise += "perl-module-feature" RDEPENDS_perl-module-b-concise += "perl-module-strict" RDEPENDS_perl-module-b-concise += "perl-module-warnings" -RDEPENDS_perl-module-b-debug += "perl-module-b" -RDEPENDS_perl-module-b-debug += "perl-module-config" -RDEPENDS_perl-module-b-debug += "perl-module-strict" RDEPENDS_perl-module-benchmark += "perl-module-exporter" RDEPENDS_perl-module-benchmark += "perl-module-strict" RDEPENDS_perl-module-bigint += "perl-module-constant" @@ -153,9 +149,6 @@ RDEPENDS_perl-module-b-xref += "perl-module-b" RDEPENDS_perl-module-b-xref += "perl-module-config" RDEPENDS_perl-module-b-xref += "perl-module-strict" RDEPENDS_perl-module-bytes += "perl-module-bytes-heavy" -RDEPENDS_perl-module-carp += "perl-module-exporter" -RDEPENDS_perl-module-carp += "perl-module-strict" -RDEPENDS_perl-module-carp += "perl-module-warnings" RDEPENDS_perl-module--charnames += "perl-module-bytes" RDEPENDS_perl-module-charnames += "perl-module-bytes" RDEPENDS_perl-module-charnames += "perl-module--charnames" @@ -196,2313 +189,6 @@ RDEPENDS_perl-module-compress-zlib += "perl-module-warnings " RDEPENDS_perl-module-config-extensions += "perl-module-config" RDEPENDS_perl-module-config-extensions += "perl-module-exporter" RDEPENDS_perl-module-config-extensions += "perl-module-strict" -RDEPENDS_perl-module-config += "perl-module-strict" -RDEPENDS_perl-module-config += "perl-module-warnings" -RDEPENDS_perl-module-config-perl-v += "perl-module-config" -RDEPENDS_perl-module-config-perl-v += "perl-module-exporter" -RDEPENDS_perl-module-config-perl-v += "perl-module-strict" -RDEPENDS_perl-module-config-perl-v += "perl-module-vars" -RDEPENDS_perl-module-config-perl-v += "perl-module-warnings" -RDEPENDS_perl-module-constant += "perl-module-strict" -RDEPENDS_perl-module-constant += "perl-module-warnings-register" -RDEPENDS_perl-module-corelist += "perl-module-list-util" -RDEPENDS_perl-module-corelist += "perl-module-corelist" -RDEPENDS_perl-module-corelist += "perl-module-strict" -RDEPENDS_perl-module-corelist += "perl-module-version" -RDEPENDS_perl-module-corelist += "perl-module-warnings" -RDEPENDS_perl-module-cpan += "perl-module-b" -RDEPENDS_perl-module-cpan += "perl-module-config" -RDEPENDS_perl-module-cpan += "perl-module-cwd" -RDEPENDS_perl-module-cpan += "perl-module-data-dumper" -RDEPENDS_perl-module-cpan += "perl-module-dirhandle" -RDEPENDS_perl-module-cpan += "perl-module-errno" -RDEPENDS_perl-module-cpan += "perl-module-exporter" -RDEPENDS_perl-module-cpan += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-cpan += "perl-module-extutils-manifest" -RDEPENDS_perl-module-cpan += "perl-module-fcntl" -RDEPENDS_perl-module-cpan += "perl-module-file-basename" -RDEPENDS_perl-module-cpan += "perl-module-file-copy" -RDEPENDS_perl-module-cpan += "perl-module-file-find" -RDEPENDS_perl-module-cpan += "perl-module-filehandle" -RDEPENDS_perl-module-cpan += "perl-module-file-path" -RDEPENDS_perl-module-cpan += "perl-module-lib" -RDEPENDS_perl-module-cpan += "perl-module-net-ping" -RDEPENDS_perl-module-cpan += "perl-module-overload" -RDEPENDS_perl-module-cpan += "perl-module-posix" -RDEPENDS_perl-module-cpan += "perl-module-safe" -RDEPENDS_perl-module-cpan += "perl-module-strict" -RDEPENDS_perl-module-cpan += "perl-module-sys-hostname" -RDEPENDS_perl-module-cpan += "perl-module-term-readline" -RDEPENDS_perl-module-cpan += "perl-module-text-parsewords" -RDEPENDS_perl-module-cpan += "perl-module-text-wrap" -RDEPENDS_perl-module-cpan += "perl-module-time-local" -RDEPENDS_perl-module-cpan += "perl-module-vars" -RDEPENDS_perl-module-cpan += "perl-module-warnings" -RDEPENDS_perl-module-cwd += "perl-module-errno" -RDEPENDS_perl-module-cwd += "perl-module-exporter" -RDEPENDS_perl-module-cwd += "perl-module-strict" -RDEPENDS_perl-module-cwd += "perl-module-xsloader" -RDEPENDS_perl-module-data-dumper += "perl-module-bytes" -RDEPENDS_perl-module-data-dumper += "perl-module-config" -RDEPENDS_perl-module-data-dumper += "perl-module-constant" -RDEPENDS_perl-module-data-dumper += "perl-module-exporter" -RDEPENDS_perl-module-data-dumper += "perl-module-xsloader" -RDEPENDS_perl-module-dbm-filter-compress += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter-compress += "perl-module-warnings" -RDEPENDS_perl-module-dbm-filter-encode += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter-encode += "perl-module-warnings" -RDEPENDS_perl-module-dbm-filter-int32 += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter-int32 += "perl-module-warnings" -RDEPENDS_perl-module-dbm-filter-null += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter-null += "perl-module-warnings" -RDEPENDS_perl-module-dbm-filter += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter += "perl-module-warnings" -RDEPENDS_perl-module-dbm-filter-utf8 += "perl-module-strict" -RDEPENDS_perl-module-dbm-filter-utf8 += "perl-module-warnings" -RDEPENDS_perl-module-db += "perl-module-strict" -RDEPENDS_perl-module-deprecate += "perl-module-strict" -RDEPENDS_perl-module-deprecate += "perl-module-warnings" -RDEPENDS_perl-module-devel-peek += "perl-module-exporter" -RDEPENDS_perl-module-devel-peek += "perl-module-xsloader" -RDEPENDS_perl-module-devel-ppport += "perl-module-file-find" -RDEPENDS_perl-module-devel-ppport += "perl-module-getopt-long" -RDEPENDS_perl-module-devel-ppport += "perl-module-strict" -RDEPENDS_perl-module-devel-ppport += "perl-module-vars" -RDEPENDS_perl-module-devel-selfstubber += "perl-module-selfloader" -RDEPENDS_perl-module-diagnostics += "perl-module-config" -RDEPENDS_perl-module-diagnostics += "perl-module-getopt-std" -RDEPENDS_perl-module-diagnostics += "perl-module-strict" -RDEPENDS_perl-module-diagnostics += "perl-module-text-tabs" -RDEPENDS_perl-module-digest-base += "perl-module-mime-base64" -RDEPENDS_perl-module-digest-base += "perl-module-strict" -RDEPENDS_perl-module-digest-base += "perl-module-vars" -RDEPENDS_perl-module-digest-file += "perl-module-digest" -RDEPENDS_perl-module-digest-file += "perl-module-exporter" -RDEPENDS_perl-module-digest-file += "perl-module-strict" -RDEPENDS_perl-module-digest-file += "perl-module-vars" -RDEPENDS_perl-module-digest-md5 += "perl-module-digest-base" -RDEPENDS_perl-module-digest-md5 += "perl-module-exporter" -RDEPENDS_perl-module-digest-md5 += "perl-module-strict" -RDEPENDS_perl-module-digest-md5 += "perl-module-vars" -RDEPENDS_perl-module-digest-md5 += "perl-module-xsloader" -RDEPENDS_perl-module-digest += "perl-module-strict" -RDEPENDS_perl-module-digest += "perl-module-vars" -RDEPENDS_perl-module-digest-sha += "perl-module-digest-base" -RDEPENDS_perl-module-digest-sha += "perl-module-dynaloader" -RDEPENDS_perl-module-digest-sha += "perl-module-exporter" -RDEPENDS_perl-module-digest-sha += "perl-module-fcntl" -RDEPENDS_perl-module-digest-sha += "perl-module-integer" -RDEPENDS_perl-module-digest-sha += "perl-module-strict" -RDEPENDS_perl-module-digest-sha += "perl-module-vars" -RDEPENDS_perl-module-digest-sha += "perl-module-warnings" -RDEPENDS_perl-module-digest-sha += "perl-module-xsloader" -RDEPENDS_perl-module-dynaloader += "perl-module-config" -RDEPENDS_perl-module-encode-alias += "perl-module-constant" -RDEPENDS_perl-module-encode-alias += "perl-module-encode" -RDEPENDS_perl-module-encode-alias += "perl-module-exporter" -RDEPENDS_perl-module-encode-alias += "perl-module-strict" -RDEPENDS_perl-module-encode-alias += "perl-module-warnings" -RDEPENDS_perl-module-encode-byte += "perl-module-encode" -RDEPENDS_perl-module-encode-byte += "perl-module-strict" -RDEPENDS_perl-module-encode-byte += "perl-module-warnings" -RDEPENDS_perl-module-encode-byte += "perl-module-xsloader" -RDEPENDS_perl-module-encode-cjkconstants += "perl-module-exporter" -RDEPENDS_perl-module-encode-cjkconstants += "perl-module-strict" -RDEPENDS_perl-module-encode-cjkconstants += "perl-module-warnings" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-encode" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-parent" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-strict" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-utf8" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-vars" -RDEPENDS_perl-module-encode-cn-hz += "perl-module-warnings" -RDEPENDS_perl-module-encode-cn += "perl-module-encode" -RDEPENDS_perl-module-encode-cn += "perl-module-encode-cn-hz" -RDEPENDS_perl-module-encode-cn += "perl-module-strict" -RDEPENDS_perl-module-encode-cn += "perl-module-warnings" -RDEPENDS_perl-module-encode-cn += "perl-module-xsloader" -RDEPENDS_perl-module-encode-config += "perl-module-strict" -RDEPENDS_perl-module-encode-config += "perl-module-warnings" -RDEPENDS_perl-module-encode-ebcdic += "perl-module-encode" -RDEPENDS_perl-module-encode-ebcdic += "perl-module-strict" -RDEPENDS_perl-module-encode-ebcdic += "perl-module-warnings" -RDEPENDS_perl-module-encode-ebcdic += "perl-module-xsloader" -RDEPENDS_perl-module-encode-encoder += "perl-module-constant" -RDEPENDS_perl-module-encode-encoder += "perl-module-encode" -RDEPENDS_perl-module-encode-encoder += "perl-module-exporter" -RDEPENDS_perl-module-encode-encoder += "perl-module-overload" -RDEPENDS_perl-module-encode-encoder += "perl-module-strict" -RDEPENDS_perl-module-encode-encoder += "perl-module-warnings" -RDEPENDS_perl-module-encode-encoding += "perl-module-constant" -RDEPENDS_perl-module-encode-encoding += "perl-module-encode" -RDEPENDS_perl-module-encode-encoding += "perl-module-encode-mime-name" -RDEPENDS_perl-module-encode-encoding += "perl-module-strict" -RDEPENDS_perl-module-encode-encoding += "perl-module-warnings" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-encode" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-parent" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-strict" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-utf8" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-vars" -RDEPENDS_perl-module-encode-gsm0338 += "perl-module-warnings" -RDEPENDS_perl-module-encode-guess += "perl-module-bytes" -RDEPENDS_perl-module-encode-guess += "perl-module-constant" -RDEPENDS_perl-module-encode-guess += "perl-module-encode" -RDEPENDS_perl-module-encode-guess += "perl-module-encode-unicode" -RDEPENDS_perl-module-encode-guess += "perl-module-parent" -RDEPENDS_perl-module-encode-guess += "perl-module-strict" -RDEPENDS_perl-module-encode-guess += "perl-module-warnings" -RDEPENDS_perl-module-encode-jp-h2z += "perl-module-encode-cjkconstants" -RDEPENDS_perl-module-encode-jp-h2z += "perl-module-strict" -RDEPENDS_perl-module-encode-jp-h2z += "perl-module-vars" -RDEPENDS_perl-module-encode-jp-h2z += "perl-module-warnings" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-bytes" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-encode" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-encode-cjkconstants" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-encode-jp-h2z" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-parent" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-strict" -RDEPENDS_perl-module-encode-jp-jis7 += "perl-module-warnings" -RDEPENDS_perl-module-encode-jp += "perl-module-encode" -RDEPENDS_perl-module-encode-jp += "perl-module-encode-jp-jis7" -RDEPENDS_perl-module-encode-jp += "perl-module-strict" -RDEPENDS_perl-module-encode-jp += "perl-module-warnings" -RDEPENDS_perl-module-encode-jp += "perl-module-xsloader" -RDEPENDS_perl-module-encode-kr-2022-kr += "perl-module-encode" -RDEPENDS_perl-module-encode-kr-2022-kr += "perl-module-encode-cjkconstants" -RDEPENDS_perl-module-encode-kr-2022-kr += "perl-module-parent" -RDEPENDS_perl-module-encode-kr-2022-kr += "perl-module-strict" -RDEPENDS_perl-module-encode-kr-2022-kr += "perl-module-warnings" -RDEPENDS_perl-module-encode-kr += "perl-module-encode" -RDEPENDS_perl-module-encode-kr += "perl-module-encode-kr-2022-kr" -RDEPENDS_perl-module-encode-kr += "perl-module-strict" -RDEPENDS_perl-module-encode-kr += "perl-module-warnings" -RDEPENDS_perl-module-encode-kr += "perl-module-xsloader" -RDEPENDS_perl-module-encode-mime-header-iso-2022-jp += "perl-module-constant" -RDEPENDS_perl-module-encode-mime-header-iso-2022-jp += "perl-module-encode-cjkconstants" -RDEPENDS_perl-module-encode-mime-header-iso-2022-jp += "perl-module-parent" -RDEPENDS_perl-module-encode-mime-header-iso-2022-jp += "perl-module-strict" -RDEPENDS_perl-module-encode-mime-header-iso-2022-jp += "perl-module-warnings" -RDEPENDS_perl-module-encode-mime-header += "perl-module-encode" -RDEPENDS_perl-module-encode-mime-header += "perl-module-mime-base64" -RDEPENDS_perl-module-encode-mime-header += "perl-module-parent" -RDEPENDS_perl-module-encode-mime-header += "perl-module-strict" -RDEPENDS_perl-module-encode-mime-header += "perl-module-warnings" -RDEPENDS_perl-module-encode-mime-name += "perl-module-strict" -RDEPENDS_perl-module-encode-mime-name += "perl-module-warnings" -RDEPENDS_perl-module-encode += "perl-module-bytes" -RDEPENDS_perl-module-encode += "perl-module-constant" -RDEPENDS_perl-module-encode += "perl-module-encode-alias" -RDEPENDS_perl-module-encode += "perl-module-encode-config" -RDEPENDS_perl-module-encode += "perl-module-encode-configlocal-pm" -RDEPENDS_perl-module-encode += "perl-module-encode-mime-name" -RDEPENDS_perl-module-encode += "perl-module-exporter" -RDEPENDS_perl-module-encode += "perl-module-parent" -RDEPENDS_perl-module-encode += "perl-module-storable" -RDEPENDS_perl-module-encode += "perl-module-strict" -RDEPENDS_perl-module-encode += "perl-module-warnings" -RDEPENDS_perl-module-encode += "perl-module-xsloader" -RDEPENDS_perl-module-encode-symbol += "perl-module-encode" -RDEPENDS_perl-module-encode-symbol += "perl-module-strict" -RDEPENDS_perl-module-encode-symbol += "perl-module-warnings" -RDEPENDS_perl-module-encode-symbol += "perl-module-xsloader" -RDEPENDS_perl-module-encode-tw += "perl-module-encode" -RDEPENDS_perl-module-encode-tw += "perl-module-strict" -RDEPENDS_perl-module-encode-tw += "perl-module-warnings" -RDEPENDS_perl-module-encode-tw += "perl-module-xsloader" -RDEPENDS_perl-module-encode-unicode += "perl-module-encode" -RDEPENDS_perl-module-encode-unicode += "perl-module-parent" -RDEPENDS_perl-module-encode-unicode += "perl-module-strict" -RDEPENDS_perl-module-encode-unicode += "perl-module-warnings" -RDEPENDS_perl-module-encode-unicode += "perl-module-xsloader" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-encode" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-mime-base64" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-parent" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-re" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-strict" -RDEPENDS_perl-module-encode-unicode-utf7 += "perl-module-warnings" -RDEPENDS_perl-module-encoding += "perl-module-config" -RDEPENDS_perl-module-encoding += "perl-module-constant" -RDEPENDS_perl-module-encoding += "perl-module-encode" -RDEPENDS_perl-module-encoding += "perl-module-filter-util-call" -RDEPENDS_perl-module-encoding += "perl-module-i18n-langinfo" -RDEPENDS_perl-module-encoding += "perl-module-posix" -RDEPENDS_perl-module-encoding += "perl-module-strict" -RDEPENDS_perl-module-encoding += "perl-module-utf8" -RDEPENDS_perl-module-encoding += "perl-module-warnings" -RDEPENDS_perl-module-encoding-warnings += "perl-module-strict" -RDEPENDS_perl-module-encoding-warnings += "perl-module-warnings" -RDEPENDS_perl-module-english += "perl-module-exporter" -RDEPENDS_perl-module-env += "perl-module-config" -RDEPENDS_perl-module-env += "perl-module-tie-array" -RDEPENDS_perl-module-errno += "perl-module-exporter" -RDEPENDS_perl-module-errno += "perl-module-strict" -RDEPENDS_perl-module-experimental += "perl-module-strict" -RDEPENDS_perl-module-experimental += "perl-module-version" -RDEPENDS_perl-module-experimental += "perl-module-warnings" -RDEPENDS_perl-module-exporter-heavy += "perl-module-exporter" -RDEPENDS_perl-module-exporter-heavy += "perl-module-strict" -RDEPENDS_perl-module-exporter += "perl-module-exporter-heavy" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-config" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-cwd" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-dynaloader" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-extutils-mksymlists" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-file-temp" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-ipc-cmd" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-text-parsewords" -RDEPENDS_perl-module-extutils-cbuilder-base += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder += "perl-module-extutils-cbuilder-base" -RDEPENDS_perl-module-extutils-cbuilder += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-cbuilder += "perl-module-file-path" -RDEPENDS_perl-module-extutils-cbuilder += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-aix += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-aix += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-aix += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-android += "perl-module-config" -RDEPENDS_perl-module-extutils-cbuilder-platform-android += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-android += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-android += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-cygwin += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-cygwin += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-cygwin += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-darwin += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-darwin += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-darwin += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-dec-osf += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-dec-osf += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-dec-osf += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-os2 += "perl-module-extutils-cbuilder-platform-unix" -RDEPENDS_perl-module-extutils-cbuilder-platform-os2 += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-os2 += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-unix += "perl-module-extutils-cbuilder-base" -RDEPENDS_perl-module-extutils-cbuilder-platform-unix += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-unix += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-vms += "perl-module-config" -RDEPENDS_perl-module-extutils-cbuilder-platform-vms += "perl-module-extutils-cbuilder-base" -RDEPENDS_perl-module-extutils-cbuilder-platform-vms += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-vms += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-bcc += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-bcc += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-gcc += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-gcc += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-msvc += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows-msvc += "perl-module-warnings" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows += "perl-module-extutils-cbuilder-base" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows += "perl-module-io-file" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows += "perl-module-strict" -RDEPENDS_perl-module-extutils-cbuilder-platform-windows += "perl-module-warnings" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-exporter" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-extutils-command" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-extutils-install" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-getopt-long" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-strict" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-test-harness" -RDEPENDS_perl-module-extutils-command-mm += "perl-module-warnings" -RDEPENDS_perl-module-extutils-command += "perl-module-exporter" -RDEPENDS_perl-module-extutils-command += "perl-module-file-copy" -RDEPENDS_perl-module-extutils-command += "perl-module-file-find" -RDEPENDS_perl-module-extutils-command += "perl-module-file-path" -RDEPENDS_perl-module-extutils-command += "perl-module-strict" -RDEPENDS_perl-module-extutils-command += "perl-module-vars" -RDEPENDS_perl-module-extutils-constant-base += "perl-module-constant" -RDEPENDS_perl-module-extutils-constant-base += "perl-module-extutils-constant-utils" -RDEPENDS_perl-module-extutils-constant-base += "perl-module-strict" -RDEPENDS_perl-module-extutils-constant-base += "perl-module-text-wrap" -RDEPENDS_perl-module-extutils-constant-base += "perl-module-vars" -RDEPENDS_perl-module-extutils-constant += "perl-module-exporter" -RDEPENDS_perl-module-extutils-constant += "perl-module-extutils-constant-proxysubs" -RDEPENDS_perl-module-extutils-constant += "perl-module-extutils-constant-utils" -RDEPENDS_perl-module-extutils-constant += "perl-module-extutils-constant-xs" -RDEPENDS_perl-module-extutils-constant += "perl-module-filehandle" -RDEPENDS_perl-module-extutils-constant += "perl-module-strict" -RDEPENDS_perl-module-extutils-constant += "perl-module-vars" -RDEPENDS_perl-module-extutils-constant-proxysubs += "perl-module-extutils-constant-utils" -RDEPENDS_perl-module-extutils-constant-proxysubs += "perl-module-extutils-constant-xs" -RDEPENDS_perl-module-extutils-constant-proxysubs += "perl-module-strict" -RDEPENDS_perl-module-extutils-constant-proxysubs += "perl-module-vars" -RDEPENDS_perl-module-extutils-constant-utils += "perl-module-constant" -RDEPENDS_perl-module-extutils-constant-utils += "perl-module-posix" -RDEPENDS_perl-module-extutils-constant-utils += "perl-module-strict" -RDEPENDS_perl-module-extutils-constant-utils += "perl-module-vars" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-data-dumper" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-extutils-constant" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-extutils-constant-base" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-extutils-constant-utils" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-strict" -RDEPENDS_perl-module-extutils-constant-xs += "perl-module-vars" -RDEPENDS_perl-module-extutils-embed += "perl-module-config" -RDEPENDS_perl-module-extutils-embed += "perl-module-exporter" -RDEPENDS_perl-module-extutils-embed += "perl-module-extutils-liblist" -RDEPENDS_perl-module-extutils-embed += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-embed += "perl-module-getopt-std" -RDEPENDS_perl-module-extutils-embed += "perl-module-strict" -RDEPENDS_perl-module-extutils-installed += "perl-module-config" -RDEPENDS_perl-module-extutils-installed += "perl-module-data-dumper" -RDEPENDS_perl-module-extutils-installed += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-installed += "perl-module-extutils-packlist" -RDEPENDS_perl-module-extutils-installed += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-installed += "perl-module-file-find" -RDEPENDS_perl-module-extutils-installed += "perl-module-strict" -RDEPENDS_perl-module-extutils-installed += "perl-module-vars" -RDEPENDS_perl-module-extutils-install += "perl-module-autosplit" -RDEPENDS_perl-module-extutils-install += "perl-module-config" -RDEPENDS_perl-module-extutils-install += "perl-module-cwd" -RDEPENDS_perl-module-extutils-install += "perl-module-exporter" -RDEPENDS_perl-module-extutils-install += "perl-module-extutils-packlist" -RDEPENDS_perl-module-extutils-install += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-install += "perl-module-file-compare" -RDEPENDS_perl-module-extutils-install += "perl-module-file-copy" -RDEPENDS_perl-module-extutils-install += "perl-module-file-find" -RDEPENDS_perl-module-extutils-install += "perl-module-file-path" -RDEPENDS_perl-module-extutils-install += "perl-module-strict" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-cwd" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-strict" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-text-parsewords" -RDEPENDS_perl-module-extutils-liblist-kid += "perl-module-warnings" -RDEPENDS_perl-module-extutils-liblist += "perl-module-extutils-liblist-kid" -RDEPENDS_perl-module-extutils-liblist += "perl-module-strict" -RDEPENDS_perl-module-extutils-makemaker-config += "perl-module-config" -RDEPENDS_perl-module-extutils-makemaker-config += "perl-module-strict" -RDEPENDS_perl-module-extutils-makemaker-locale += "perl-module-base" -RDEPENDS_perl-module-extutils-makemaker-locale += "perl-module-encode" -RDEPENDS_perl-module-extutils-makemaker-locale += "perl-module-encode-alias" -RDEPENDS_perl-module-extutils-makemaker-locale += "perl-module-i18n-langinfo" -RDEPENDS_perl-module-extutils-makemaker-locale += "perl-module-strict" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-b" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-cpan" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-cwd" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-exporter" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-makemaker-version" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-manifest" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-mm" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-my" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-file-path" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-strict" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-version" -RDEPENDS_perl-module-extutils-makemaker-version += "perl-module-strict" -RDEPENDS_perl-module-extutils-makemaker-version += "perl-module-vars" -RDEPENDS_perl-module-extutils-manifest += "perl-module-config" -RDEPENDS_perl-module-extutils-manifest += "perl-module-exporter" -RDEPENDS_perl-module-extutils-manifest += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-manifest += "perl-module-file-copy" -RDEPENDS_perl-module-extutils-manifest += "perl-module-file-find" -RDEPENDS_perl-module-extutils-manifest += "perl-module-file-path" -RDEPENDS_perl-module-extutils-manifest += "perl-module-strict" -RDEPENDS_perl-module-extutils-manifest += "perl-module-warnings" -RDEPENDS_perl-module-extutils-miniperl += "perl-module-exporter" -RDEPENDS_perl-module-extutils-miniperl += "perl-module-extutils-embed" -RDEPENDS_perl-module-extutils-miniperl += "perl-module-strict" -RDEPENDS_perl-module-extutils-mkbootstrap += "perl-module-config" -RDEPENDS_perl-module-extutils-mkbootstrap += "perl-module-dynaloader" -RDEPENDS_perl-module-extutils-mkbootstrap += "perl-module-exporter" -RDEPENDS_perl-module-extutils-mkbootstrap += "perl-module-strict" -RDEPENDS_perl-module-extutils-mksymlists += "perl-module-config" -RDEPENDS_perl-module-extutils-mksymlists += "perl-module-exporter" -RDEPENDS_perl-module-extutils-mksymlists += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-aix += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-aix += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-aix += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-autosplit" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-cpan" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-data-dumper" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-file-find" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-any += "perl-module-version" -RDEPENDS_perl-module-extutils-mm-beos += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-beos += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-beos += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-beos += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-cygwin += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-cygwin += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-cygwin += "perl-module-extutils-mm-win32" -RDEPENDS_perl-module-extutils-mm-cygwin += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-darwin += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-darwin += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-dos += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-dos += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-dos += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-macos += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-nw5 += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-nw5 += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-nw5 += "perl-module-extutils-mm-win32" -RDEPENDS_perl-module-extutils-mm-nw5 += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-mm-nw5 += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-os2 += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-os2 += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-os2 += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-os2 += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm += "perl-module-extutils-liblist" -RDEPENDS_perl-module-extutils-mm += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-qnx += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-qnx += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-cwd" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-encode" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-extutils-liblist" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-file-find" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-vars" -RDEPENDS_perl-module-extutils-mm-unix += "perl-module-version" -RDEPENDS_perl-module-extutils-mm-uwin += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-uwin += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-exporter" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-extutils-liblist-kid" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-file-find" -RDEPENDS_perl-module-extutils-mm-vms += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-vos += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-vos += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-extutils-mm-any" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-extutils-mm-unix" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-mm-win32 += "perl-module-strict" -RDEPENDS_perl-module-extutils-mm-win95 += "perl-module-extutils-makemaker-config" -RDEPENDS_perl-module-extutils-mm-win95 += "perl-module-extutils-mm-win32" -RDEPENDS_perl-module-extutils-mm-win95 += "perl-module-strict" -RDEPENDS_perl-module-extutils-my += "perl-module-extutils-mm" -RDEPENDS_perl-module-extutils-my += "perl-module-strict" -RDEPENDS_perl-module-extutils-packlist += "perl-module-config" -RDEPENDS_perl-module-extutils-packlist += "perl-module-cwd" -RDEPENDS_perl-module-extutils-packlist += "perl-module-strict" -RDEPENDS_perl-module-extutils-packlist += "perl-module-vars" -RDEPENDS_perl-module-extutils-parsexs-constants += "perl-module-strict" -RDEPENDS_perl-module-extutils-parsexs-constants += "perl-module-warnings" -RDEPENDS_perl-module-extutils-parsexs-countlines += "perl-module-strict" -RDEPENDS_perl-module-extutils-parsexs-eval += "perl-module-strict" -RDEPENDS_perl-module-extutils-parsexs-eval += "perl-module-warnings" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-config" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-cwd" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-exporter" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-extutils-parsexs-constants" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-extutils-parsexs-countlines" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-extutils-parsexs-eval" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-extutils-parsexs-utilities" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-file-basename" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-re" -RDEPENDS_perl-module-extutils-parsexs += "perl-module-strict" -RDEPENDS_perl-module-extutils-parsexs-utilities += "perl-module-exporter" -RDEPENDS_perl-module-extutils-parsexs-utilities += "perl-module-extutils-parsexs-constants" -RDEPENDS_perl-module-extutils-parsexs-utilities += "perl-module-extutils-typemaps" -RDEPENDS_perl-module-extutils-parsexs-utilities += "perl-module-strict" -RDEPENDS_perl-module-extutils-parsexs-utilities += "perl-module-warnings" -RDEPENDS_perl-module-extutils-testlib += "perl-module-cwd" -RDEPENDS_perl-module-extutils-testlib += "perl-module-lib" -RDEPENDS_perl-module-extutils-testlib += "perl-module-strict" -RDEPENDS_perl-module-extutils-testlib += "perl-module-warnings" -RDEPENDS_perl-module-extutils-typemaps-cmd += "perl-module-exporter" -RDEPENDS_perl-module-extutils-typemaps-cmd += "perl-module-extutils-typemaps" -RDEPENDS_perl-module-extutils-typemaps-cmd += "perl-module-strict" -RDEPENDS_perl-module-extutils-typemaps-cmd += "perl-module-warnings" -RDEPENDS_perl-module-extutils-typemaps-inputmap += "perl-module-strict" -RDEPENDS_perl-module-extutils-typemaps-inputmap += "perl-module-warnings" -RDEPENDS_perl-module-extutils-typemaps-outputmap += "perl-module-re" -RDEPENDS_perl-module-extutils-typemaps-outputmap += "perl-module-strict" -RDEPENDS_perl-module-extutils-typemaps-outputmap += "perl-module-warnings" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-extutils-parsexs" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-extutils-parsexs-constants" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-extutils-typemaps-inputmap" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-extutils-typemaps-outputmap" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-extutils-typemaps-type" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-strict" -RDEPENDS_perl-module-extutils-typemaps += "perl-module-warnings" -RDEPENDS_perl-module-extutils-typemaps-type += "perl-module-extutils-typemaps" -RDEPENDS_perl-module-extutils-typemaps-type += "perl-module-strict" -RDEPENDS_perl-module-extutils-typemaps-type += "perl-module-warnings" -RDEPENDS_perl-module-fatal += "perl-module-autodie" -RDEPENDS_perl-module-fatal += "perl-module-autodie-exception-system" -RDEPENDS_perl-module-fatal += "perl-module-autodie-hints" -RDEPENDS_perl-module-fatal += "perl-module-autodie-util" -RDEPENDS_perl-module-fatal += "perl-module-config" -RDEPENDS_perl-module-fatal += "perl-module-constant" -RDEPENDS_perl-module-fatal += "perl-module-fcntl" -RDEPENDS_perl-module-fatal += "perl-module-posix" -RDEPENDS_perl-module-fatal += "perl-module-strict" -RDEPENDS_perl-module-fatal += "perl-module-tie-refhash" -RDEPENDS_perl-module-fatal += "perl-module-warnings" -RDEPENDS_perl-module-fcntl += "perl-module-exporter" -RDEPENDS_perl-module-fcntl += "perl-module-strict" -RDEPENDS_perl-module-fcntl += "perl-module-xsloader" -RDEPENDS_perl-module-fields += "perl-module-base" -RDEPENDS_perl-module-fields += "perl-module-hash-util" -RDEPENDS_perl-module-fields += "perl-module-strict" -RDEPENDS_perl-module-file-basename += "perl-module-exporter" -RDEPENDS_perl-module-file-basename += "perl-module-re" -RDEPENDS_perl-module-file-basename += "perl-module-strict" -RDEPENDS_perl-module-file-basename += "perl-module-warnings" -RDEPENDS_perl-module-filecache += "perl-module-parent" -RDEPENDS_perl-module-filecache += "perl-module-strict" -RDEPENDS_perl-module-file-compare += "perl-module-exporter" -RDEPENDS_perl-module-file-compare += "perl-module-strict" -RDEPENDS_perl-module-file-compare += "perl-module-warnings" -RDEPENDS_perl-module-file-copy += "perl-module-config" -RDEPENDS_perl-module-file-copy += "perl-module-exporter" -RDEPENDS_perl-module-file-copy += "perl-module-file-basename" -RDEPENDS_perl-module-file-copy += "perl-module-strict" -RDEPENDS_perl-module-file-copy += "perl-module-warnings" -RDEPENDS_perl-module-file-dosglob += "perl-module-strict" -RDEPENDS_perl-module-file-dosglob += "perl-module-text-parsewords" -RDEPENDS_perl-module-file-dosglob += "perl-module-warnings" -RDEPENDS_perl-module-file-dosglob += "perl-module-xsloader" -RDEPENDS_perl-module-file-fetch += "perl-module-constant" -RDEPENDS_perl-module-file-fetch += "perl-module-cwd" -RDEPENDS_perl-module-file-fetch += "perl-module-file-basename" -RDEPENDS_perl-module-file-fetch += "perl-module-file-copy" -RDEPENDS_perl-module-file-fetch += "perl-module-filehandle" -RDEPENDS_perl-module-file-fetch += "perl-module-file-path" -RDEPENDS_perl-module-file-fetch += "perl-module-file-temp" -RDEPENDS_perl-module-file-fetch += "perl-module-ipc-cmd" -RDEPENDS_perl-module-file-fetch += "perl-module-locale-maketext-simple" -RDEPENDS_perl-module-file-fetch += "perl-module-load" -RDEPENDS_perl-module-file-fetch += "perl-module-params-check" -RDEPENDS_perl-module-file-fetch += "perl-module-strict" -RDEPENDS_perl-module-file-fetch += "perl-module-vars" -RDEPENDS_perl-module-file-find += "perl-module-config" -RDEPENDS_perl-module-file-find += "perl-module-cwd" -RDEPENDS_perl-module-file-find += "perl-module-exporter" -RDEPENDS_perl-module-file-find += "perl-module-file-basename" -RDEPENDS_perl-module-file-find += "perl-module-strict" -RDEPENDS_perl-module-file-find += "perl-module-warnings" -RDEPENDS_perl-module-file-find += "perl-module-warnings-register" -RDEPENDS_perl-module-file-globmapper += "perl-module-file-glob" -RDEPENDS_perl-module-file-globmapper += "perl-module-strict" -RDEPENDS_perl-module-file-globmapper += "perl-module-warnings" -RDEPENDS_perl-module-file-glob += "perl-module-exporter" -RDEPENDS_perl-module-file-glob += "perl-module-strict" -RDEPENDS_perl-module-file-glob += "perl-module-warnings" -RDEPENDS_perl-module-file-glob += "perl-module-xsloader" -RDEPENDS_perl-module-filehandle += "perl-module-exporter" -RDEPENDS_perl-module-filehandle += "perl-module-fcntl" -RDEPENDS_perl-module-filehandle += "perl-module-io-file" -RDEPENDS_perl-module-filehandle += "perl-module-strict" -RDEPENDS_perl-module-file-path += "perl-module-cwd" -RDEPENDS_perl-module-file-path += "perl-module-exporter" -RDEPENDS_perl-module-file-path += "perl-module-file-basename" -RDEPENDS_perl-module-file-path += "perl-module-strict" -RDEPENDS_perl-module-file-path += "perl-module-vars" -RDEPENDS_perl-module-file-spec += "perl-module-constant" -RDEPENDS_perl-module-file-spec += "perl-module-cwd" -RDEPENDS_perl-module-file-spec += "perl-module-strict" -RDEPENDS_perl-module-file-spec += "perl-module-file-spec-unix" -RDEPENDS_perl-module-file-stat += "perl-module-class-struct" -RDEPENDS_perl-module-file-stat += "perl-module-constant" -RDEPENDS_perl-module-file-stat += "perl-module-exporter" -RDEPENDS_perl-module-file-stat += "perl-module-fcntl" -RDEPENDS_perl-module-file-stat += "perl-module-overload " -RDEPENDS_perl-module-file-stat += "perl-module-strict" -RDEPENDS_perl-module-file-stat += "perl-module-warnings" -RDEPENDS_perl-module-file-stat += "perl-module-warnings-register" -RDEPENDS_perl-module-file-temp += "perl-module-carp" -RDEPENDS_perl-module-file-temp += "perl-module-constant" -RDEPENDS_perl-module-file-temp += "perl-module-cwd" -RDEPENDS_perl-module-file-temp += "perl-module-errno" -RDEPENDS_perl-module-file-temp += "perl-module-exporter" -RDEPENDS_perl-module-file-temp += "perl-module-fcntl" -RDEPENDS_perl-module-file-temp += "perl-module-file-path" -RDEPENDS_perl-module-file-temp += "perl-module-file-spec" -RDEPENDS_perl-module-file-temp += "perl-module-io-seekable" -RDEPENDS_perl-module-file-temp += "perl-module-overload" -RDEPENDS_perl-module-file-temp += "perl-module-parent" -RDEPENDS_perl-module-file-temp += "perl-module-posix" -RDEPENDS_perl-module-file-temp += "perl-module-scalar-util" -RDEPENDS_perl-module-file-temp += "perl-module-strict" -RDEPENDS_perl-module-file-temp += "perl-module-vars" -RDEPENDS_perl-module-filter-simple += "perl-module-filter-util-call" -RDEPENDS_perl-module-filter-simple += "perl-module-text-balanced" -RDEPENDS_perl-module-filter-util-call += "perl-module-exporter" -RDEPENDS_perl-module-filter-util-call += "perl-module-strict" -RDEPENDS_perl-module-filter-util-call += "perl-module-warnings" -RDEPENDS_perl-module-filter-util-call += "perl-module-xsloader" -RDEPENDS_perl-module-findbin += "perl-module-cwd" -RDEPENDS_perl-module-findbin += "perl-module-exporter" -RDEPENDS_perl-module-findbin += "perl-module-file-basename" -RDEPENDS_perl-module-getopt-long += "perl-module-constant" -RDEPENDS_perl-module-getopt-long += "perl-module-exporter" -RDEPENDS_perl-module-getopt-long += "perl-module-overload" -RDEPENDS_perl-module-getopt-long += "perl-module-pod-usage" -RDEPENDS_perl-module-getopt-long += "perl-module-strict" -RDEPENDS_perl-module-getopt-long += "perl-module-text-parsewords" -RDEPENDS_perl-module-getopt-long += "perl-module-vars" -RDEPENDS_perl-module-getopt-long += "perl-module-warnings" -RDEPENDS_perl-module-getopt-std += "perl-module-exporter" -RDEPENDS_perl-module-hash-util-fieldhash += "perl-module-exporter" -RDEPENDS_perl-module-hash-util-fieldhash += "perl-module-strict" -RDEPENDS_perl-module-hash-util-fieldhash += "perl-module-warnings" -RDEPENDS_perl-module-hash-util-fieldhash += "perl-module-xsloader" -RDEPENDS_perl-module-hash-util += "perl-module-exporter" -RDEPENDS_perl-module-hash-util += "perl-module-hash-util-fieldhash" -RDEPENDS_perl-module-hash-util += "perl-module-strict" -RDEPENDS_perl-module-hash-util += "perl-module-warnings" -RDEPENDS_perl-module-hash-util += "perl-module-warnings-register" -RDEPENDS_perl-module-hash-util += "perl-module-xsloader" -RDEPENDS_perl-module-i18n-collate += "perl-module-exporter" -RDEPENDS_perl-module-i18n-collate += "perl-module-overload" -RDEPENDS_perl-module-i18n-collate += "perl-module-posix" -RDEPENDS_perl-module-i18n-collate += "perl-module-strict" -RDEPENDS_perl-module-i18n-collate += "perl-module-warnings-register" -RDEPENDS_perl-module-i18n-langinfo += "perl-module-exporter" -RDEPENDS_perl-module-i18n-langinfo += "perl-module-strict" -RDEPENDS_perl-module-i18n-langinfo += "perl-module-warnings" -RDEPENDS_perl-module-i18n-langinfo += "perl-module-xsloader" -RDEPENDS_perl-module-i18n-langtags-detect += "perl-module-i18n-langtags" -RDEPENDS_perl-module-i18n-langtags-detect += "perl-module-strict" -RDEPENDS_perl-module-i18n-langtags-list += "perl-module-strict" -RDEPENDS_perl-module-i18n-langtags += "perl-module-exporter" -RDEPENDS_perl-module-i18n-langtags += "perl-module-strict" -RDEPENDS_perl-module-io-compress-adapter-bzip2 += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-adapter-bzip2 += "perl-module-compress-raw-bzip2" -RDEPENDS_perl-module-io-compress-adapter-bzip2 += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-adapter-bzip2 += "perl-module-strict" -RDEPENDS_perl-module-io-compress-adapter-bzip2 += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-exporter" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-strict" -RDEPENDS_perl-module-io-compress-adapter-deflate += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-adapter-identity += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-adapter-identity += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-adapter-identity += "perl-module-strict" -RDEPENDS_perl-module-io-compress-adapter-identity += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-constant" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-encode" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-exporter" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-file-globmapper" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-strict " -RDEPENDS_perl-module-io-compress-base-common += "perl-module-strict" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-utf8" -RDEPENDS_perl-module-io-compress-base-common += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-base += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-base += "perl-module-io-file" -RDEPENDS_perl-module-io-compress-base += "perl-module-io-handle " -RDEPENDS_perl-module-io-compress-base += "perl-module-strict " -RDEPENDS_perl-module-io-compress-base += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-exporter " -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-io-compress-adapter-bzip2" -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-io-compress-base" -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-strict " -RDEPENDS_perl-module-io-compress-bzip2 += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-exporter " -RDEPENDS_perl-module-io-compress-deflate += "perl-module-io-compress-adapter-deflate" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-io-compress-rawdeflate" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-io-compress-zlib-constants" -RDEPENDS_perl-module-io-compress-deflate += "perl-module-strict " -RDEPENDS_perl-module-io-compress-deflate += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-gzip-constants += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-gzip-constants += "perl-module-constant" -RDEPENDS_perl-module-io-compress-gzip-constants += "perl-module-exporter" -RDEPENDS_perl-module-io-compress-gzip-constants += "perl-module-strict " -RDEPENDS_perl-module-io-compress-gzip-constants += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-exporter " -RDEPENDS_perl-module-io-compress-gzip += "perl-module-io-compress-adapter-deflate" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-io-compress-gzip-constants" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-io-compress-rawdeflate" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-io-compress-zlib-extra" -RDEPENDS_perl-module-io-compress-gzip += "perl-module-strict " -RDEPENDS_perl-module-io-compress-gzip += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-exporter " -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-fcntl" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-io-compress-adapter-deflate" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-io-compress-base" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-strict " -RDEPENDS_perl-module-io-compress-rawdeflate += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-zip-constants += "perl-module-constant" -RDEPENDS_perl-module-io-compress-zip-constants += "perl-module-exporter" -RDEPENDS_perl-module-io-compress-zip-constants += "perl-module-strict " -RDEPENDS_perl-module-io-compress-zip-constants += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-zip += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-zip += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-compress-zip += "perl-module-config" -RDEPENDS_perl-module-io-compress-zip += "perl-module-exporter " -RDEPENDS_perl-module-io-compress-zip += "perl-module-fcntl" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-adapter-deflate" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-adapter-identity" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-bzip2 " -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-rawdeflate" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-zip-constants" -RDEPENDS_perl-module-io-compress-zip += "perl-module-io-compress-zlib-extra" -RDEPENDS_perl-module-io-compress-zip += "perl-module-strict " -RDEPENDS_perl-module-io-compress-zip += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-zlib-constants += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-zlib-constants += "perl-module-constant" -RDEPENDS_perl-module-io-compress-zlib-constants += "perl-module-exporter" -RDEPENDS_perl-module-io-compress-zlib-constants += "perl-module-strict " -RDEPENDS_perl-module-io-compress-zlib-constants += "perl-module-warnings" -RDEPENDS_perl-module-io-compress-zlib-extra += "perl-module-bytes" -RDEPENDS_perl-module-io-compress-zlib-extra += "perl-module-io-compress-gzip-constants" -RDEPENDS_perl-module-io-compress-zlib-extra += "perl-module-strict " -RDEPENDS_perl-module-io-compress-zlib-extra += "perl-module-warnings" -RDEPENDS_perl-module-io-dir += "perl-module-exporter" -RDEPENDS_perl-module-io-dir += "perl-module-file-stat" -RDEPENDS_perl-module-io-dir += "perl-module-io-file" -RDEPENDS_perl-module-io-dir += "perl-module-strict" -RDEPENDS_perl-module-io-dir += "perl-module-tie-hash" -RDEPENDS_perl-module-io-file += "perl-module-carp" -RDEPENDS_perl-module-io-file += "perl-module-exporter" -RDEPENDS_perl-module-io-file += "perl-module-fcntl" -RDEPENDS_perl-module-io-file += "perl-module-io-seekable" -RDEPENDS_perl-module-io-file += "perl-module-selectsaver" -RDEPENDS_perl-module-io-file += "perl-module-strict" -RDEPENDS_perl-module-io-file += "perl-module-symbol" -RDEPENDS_perl-module-io-handle += "perl-module-exporter" -RDEPENDS_perl-module-io-handle += "perl-module-io" -RDEPENDS_perl-module-io-handle += "perl-module-io-file" -RDEPENDS_perl-module-io-handle += "perl-module-selectsaver" -RDEPENDS_perl-module-io-handle += "perl-module-strict" -RDEPENDS_perl-module-io += "perl-module-strict" -RDEPENDS_perl-module-io += "perl-module-warnings" -RDEPENDS_perl-module-io += "perl-module-xsloader" -RDEPENDS_perl-module-io-pipe += "perl-module-fcntl" -RDEPENDS_perl-module-io-pipe += "perl-module-io-handle" -RDEPENDS_perl-module-io-pipe += "perl-module-strict" -RDEPENDS_perl-module-io-poll += "perl-module-exporter" -RDEPENDS_perl-module-io-poll += "perl-module-io-handle" -RDEPENDS_perl-module-io-poll += "perl-module-strict" -RDEPENDS_perl-module-io-seekable += "perl-module-exporter" -RDEPENDS_perl-module-io-seekable += "perl-module-fcntl" -RDEPENDS_perl-module-io-seekable += "perl-module-io-handle" -RDEPENDS_perl-module-io-seekable += "perl-module-strict" -RDEPENDS_perl-module-io-select += "perl-module-exporter" -RDEPENDS_perl-module-io-select += "perl-module-strict" -RDEPENDS_perl-module-io-select += "perl-module-warnings-register" -RDEPENDS_perl-module-io-socket-inet += "perl-module-errno" -RDEPENDS_perl-module-io-socket-inet += "perl-module-exporter" -RDEPENDS_perl-module-io-socket-inet += "perl-module-io-socket" -RDEPENDS_perl-module-io-socket-inet += "perl-module-socket" -RDEPENDS_perl-module-io-socket-inet += "perl-module-strict" -RDEPENDS_perl-module-io-socket-ip += "perl-module-base" -RDEPENDS_perl-module-io-socket-ip += "perl-module-constant" -RDEPENDS_perl-module-io-socket-ip += "perl-module-errno" -RDEPENDS_perl-module-io-socket-ip += "perl-module-posix" -RDEPENDS_perl-module-io-socket-ip += "perl-module-socket" -RDEPENDS_perl-module-io-socket-ip += "perl-module-strict" -RDEPENDS_perl-module-io-socket-ip += "perl-module-warnings" -RDEPENDS_perl-module-io-socket += "perl-module-errno" -RDEPENDS_perl-module-io-socket += "perl-module-exporter" -RDEPENDS_perl-module-io-socket += "perl-module-io-handle" -RDEPENDS_perl-module-io-socket += "perl-module-io-select" -RDEPENDS_perl-module-io-socket += "perl-module-io-socket-inet" -RDEPENDS_perl-module-io-socket += "perl-module-io-socket-unix" -RDEPENDS_perl-module-io-socket += "perl-module-socket" -RDEPENDS_perl-module-io-socket += "perl-module-strict" -RDEPENDS_perl-module-io-socket-unix += "perl-module-io-socket" -RDEPENDS_perl-module-io-socket-unix += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-adapter-bunzip2 += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-adapter-bunzip2 += "perl-module-compress-raw-bzip2" -RDEPENDS_perl-module-io-uncompress-adapter-bunzip2 += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-adapter-bunzip2 += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-adapter-bunzip2 += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-io-compress-zip-constants " -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-adapter-identity += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-adapter-inflate += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-adapter-inflate += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-uncompress-adapter-inflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-adapter-inflate += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-adapter-inflate += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-adapter-inflate" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-base" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-gunzip" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-inflate" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-rawinflate" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-io-uncompress-unzip" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-anyinflate += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-io-uncompress-base" -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-strict" -RDEPENDS_perl-module-io-uncompress-anyuncompress += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-base += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-base += "perl-module-constant" -RDEPENDS_perl-module-io-uncompress-base += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-base += "perl-module-io-file " -RDEPENDS_perl-module-io-uncompress-base += "perl-module-list-util" -RDEPENDS_perl-module-io-uncompress-base += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-base += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-io-uncompress-adapter-bunzip2" -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-io-uncompress-base" -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-bunzip2 += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-io-compress-gzip-constants" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-io-compress-zlib-extra" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-io-uncompress-rawinflate" -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-gunzip += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-io-compress-zlib-constants" -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-io-uncompress-rawinflate" -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-inflate += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-io-uncompress-adapter-inflate" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-io-uncompress-base" -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-warnings" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-bytes" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-constant" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-exporter " -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-fcntl" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-compress-zip-constants" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-compress-zlib-extra" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-file" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-uncompress-adapter-identity" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-uncompress-adapter-inflate" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-uncompress-rawinflate" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-posix" -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-strict " -RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-warnings" -RDEPENDS_perl-module-io-zlib += "perl-module-fcntl" -RDEPENDS_perl-module-io-zlib += "perl-module-io-handle" -RDEPENDS_perl-module-io-zlib += "perl-module-strict" -RDEPENDS_perl-module-io-zlib += "perl-module-tie-handle" -RDEPENDS_perl-module-io-zlib += "perl-module-vars" -RDEPENDS_perl-module-ipc-cmd += "perl-module-constant" -RDEPENDS_perl-module-ipc-cmd += "perl-module-exporter" -RDEPENDS_perl-module-ipc-cmd += "perl-module-extutils-makemaker" -RDEPENDS_perl-module-ipc-cmd += "perl-module-filehandle" -RDEPENDS_perl-module-ipc-cmd += "perl-module-io-handle" -RDEPENDS_perl-module-ipc-cmd += "perl-module-io-select" -RDEPENDS_perl-module-ipc-cmd += "perl-module-ipc-open3" -RDEPENDS_perl-module-ipc-cmd += "perl-module-locale-maketext-simple" -RDEPENDS_perl-module-ipc-cmd += "perl-module-load" -RDEPENDS_perl-module-ipc-cmd += "perl-module-params-check" -RDEPENDS_perl-module-ipc-cmd += "perl-module-posix" -RDEPENDS_perl-module-ipc-cmd += "perl-module-socket" -RDEPENDS_perl-module-ipc-cmd += "perl-module-strict" -RDEPENDS_perl-module-ipc-cmd += "perl-module-text-parsewords" -RDEPENDS_perl-module-ipc-cmd += "perl-module-time-hires" -RDEPENDS_perl-module-ipc-cmd += "perl-module-vars" -RDEPENDS_perl-module-ipc-msg += "perl-module-class-struct" -RDEPENDS_perl-module-ipc-msg += "perl-module-ipc-sysv" -RDEPENDS_perl-module-ipc-msg += "perl-module-strict" -RDEPENDS_perl-module-ipc-msg += "perl-module-vars" -RDEPENDS_perl-module-ipc-open2 += "perl-module-exporter" -RDEPENDS_perl-module-ipc-open2 += "perl-module-ipc-open3" -RDEPENDS_perl-module-ipc-open2 += "perl-module-strict" -RDEPENDS_perl-module-ipc-open3 += "perl-module-constant" -RDEPENDS_perl-module-ipc-open3 += "perl-module-exporter" -RDEPENDS_perl-module-ipc-open3 += "perl-module-fcntl" -RDEPENDS_perl-module-ipc-open3 += "perl-module-io-pipe" -RDEPENDS_perl-module-ipc-open3 += "perl-module-posix" -RDEPENDS_perl-module-ipc-open3 += "perl-module-strict" -RDEPENDS_perl-module-ipc-semaphore += "perl-module-class-struct" -RDEPENDS_perl-module-ipc-semaphore += "perl-module-ipc-sysv" -RDEPENDS_perl-module-ipc-semaphore += "perl-module-strict" -RDEPENDS_perl-module-ipc-semaphore += "perl-module-vars" -RDEPENDS_perl-module-ipc-sharedmem += "perl-module-class-struct" -RDEPENDS_perl-module-ipc-sharedmem += "perl-module-ipc-sysv" -RDEPENDS_perl-module-ipc-sharedmem += "perl-module-strict" -RDEPENDS_perl-module-ipc-sharedmem += "perl-module-vars" -RDEPENDS_perl-module-ipc-sysv += "perl-module-config" -RDEPENDS_perl-module-ipc-sysv += "perl-module-dynaloader" -RDEPENDS_perl-module-ipc-sysv += "perl-module-exporter" -RDEPENDS_perl-module-ipc-sysv += "perl-module-strict" -RDEPENDS_perl-module-ipc-sysv += "perl-module-vars" -RDEPENDS_perl-module-json-pp-boolean += "perl-module-overload" -RDEPENDS_perl-module-json-pp-boolean += "perl-module-strict" -RDEPENDS_perl-module-json-pp += "perl-module-b" -RDEPENDS_perl-module-json-pp += "perl-module-bytes" -RDEPENDS_perl-module-json-pp += "perl-module-constant" -RDEPENDS_perl-module-json-pp += "perl-module-encode" -RDEPENDS_perl-module-json-pp += "perl-module-exporter" -RDEPENDS_perl-module-json-pp += "perl-module-json-pp-boolean" -RDEPENDS_perl-module-json-pp += "perl-module-math-bigfloat" -RDEPENDS_perl-module-json-pp += "perl-module-math-bigint" -RDEPENDS_perl-module-json-pp += "perl-module-overload" -RDEPENDS_perl-module-json-pp += "perl-module-strict" -RDEPENDS_perl-module-json-pp += "perl-module-subs" -RDEPENDS_perl-module-less += "perl-module-strict" -RDEPENDS_perl-module-less += "perl-module-warnings" -RDEPENDS_perl-module-lib += "perl-module-config" -RDEPENDS_perl-module-lib += "perl-module-strict" -RDEPENDS_perl-module-list-util += "perl-module-exporter" -RDEPENDS_perl-module-list-util += "perl-module-strict" -RDEPENDS_perl-module-list-util += "perl-module-warnings" -RDEPENDS_perl-module-list-util += "perl-module-xsloader" -RDEPENDS_perl-module-list-util-xs += "perl-module-list-util" -RDEPENDS_perl-module-list-util-xs += "perl-module-strict" -RDEPENDS_perl-module-list-util-xs += "perl-module-warnings" -RDEPENDS_perl-module-loaded += "perl-module-strict" -RDEPENDS_perl-module-loaded += "perl-module-vars" -RDEPENDS_perl-module-load += "perl-module-config" -RDEPENDS_perl-module-load += "perl-module-constant" -RDEPENDS_perl-module-load += "perl-module-exporter" -RDEPENDS_perl-module-load += "perl-module-filehandle" -RDEPENDS_perl-module-load += "perl-module-locale-maketext-simple" -RDEPENDS_perl-module-load += "perl-module-corelist" -RDEPENDS_perl-module-load += "perl-module-load" -RDEPENDS_perl-module-load += "perl-module-params-check" -RDEPENDS_perl-module-load += "perl-module-strict" -RDEPENDS_perl-module-load += "perl-module-vars" -RDEPENDS_perl-module-load += "perl-module-version" -RDEPENDS_perl-module-load += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-constant" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-country += "perl-module-if" -RDEPENDS_perl-module-locale-codes-country += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-country += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-country += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-if" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-language += "perl-module-if" -RDEPENDS_perl-module-locale-codes-language += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-language += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-language += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes += "perl-module-if" -RDEPENDS_perl-module-locale-codes += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-script += "perl-module-if" -RDEPENDS_perl-module-locale-codes-script += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-script += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-script += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-country += "perl-module-exporter" -RDEPENDS_perl-module-locale-country += "perl-module-if" -RDEPENDS_perl-module-locale-country += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-country += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-country += "perl-module-strict" -RDEPENDS_perl-module-locale-country += "perl-module-warnings" -RDEPENDS_perl-module-locale-currency += "perl-module-exporter" -RDEPENDS_perl-module-locale-currency += "perl-module-if" -RDEPENDS_perl-module-locale-currency += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-currency += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-currency += "perl-module-strict" -RDEPENDS_perl-module-locale-currency += "perl-module-warnings" -RDEPENDS_perl-module-locale-language += "perl-module-exporter" -RDEPENDS_perl-module-locale-language += "perl-module-if" -RDEPENDS_perl-module-locale-language += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-language += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-language += "perl-module-strict" -RDEPENDS_perl-module-locale-language += "perl-module-warnings" -RDEPENDS_perl-module-locale-maketext-gutsloader += "perl-module-locale-maketext" -RDEPENDS_perl-module-locale-maketext-guts += "perl-module-locale-maketext" -RDEPENDS_perl-module-locale-maketext += "perl-module-i18n-langtags" -RDEPENDS_perl-module-locale-maketext += "perl-module-i18n-langtags-detect" -RDEPENDS_perl-module-locale-maketext += "perl-module-integer" -RDEPENDS_perl-module-locale-maketext += "perl-module-strict" -RDEPENDS_perl-module-locale-maketext-simple += "perl-module-base" -RDEPENDS_perl-module-locale-maketext-simple += "perl-module-strict" -RDEPENDS_perl-module-locale += "perl-module-config" -RDEPENDS_perl-module-locale-script += "perl-module-exporter" -RDEPENDS_perl-module-locale-script += "perl-module-if" -RDEPENDS_perl-module-locale-script += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-script += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-script += "perl-module-strict" -RDEPENDS_perl-module-locale-script += "perl-module-warnings" -RDEPENDS_perl-module-math-bigfloat += "perl-module-exporter" -RDEPENDS_perl-module-math-bigfloat += "perl-module-math-bigint" -RDEPENDS_perl-module-math-bigfloat += "perl-module-math-complex" -RDEPENDS_perl-module-math-bigfloat += "perl-module-overload" -RDEPENDS_perl-module-math-bigfloat += "perl-module-strict" -RDEPENDS_perl-module-math-bigfloat += "perl-module-warnings" -RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-exporter" -RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-math-bigfloat" -RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-overload" -RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-strict" -RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-calcemu += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-calcemu += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-calc += "perl-module-constant" -RDEPENDS_perl-module-math-bigint-calc += "perl-module-integer" -RDEPENDS_perl-module-math-bigint-calc += "perl-module-math-bigint-lib" -RDEPENDS_perl-module-math-bigint-calc += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-calc += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-fastcalc += "perl-module-math-bigint-calc" -RDEPENDS_perl-module-math-bigint-fastcalc += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-fastcalc += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-fastcalc += "perl-module-xsloader" -RDEPENDS_perl-module-math-bigint-lib += "perl-module-overload" -RDEPENDS_perl-module-math-bigint-lib += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-lib += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint += "perl-module-exporter" -RDEPENDS_perl-module-math-bigint += "perl-module-math-bigfloat" -RDEPENDS_perl-module-math-bigint += "perl-module-math-complex" -RDEPENDS_perl-module-math-bigint += "perl-module-overload" -RDEPENDS_perl-module-math-bigint += "perl-module-strict" -RDEPENDS_perl-module-math-bigint += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-trace += "perl-module-exporter" -RDEPENDS_perl-module-math-bigint-trace += "perl-module-math-bigint" -RDEPENDS_perl-module-math-bigint-trace += "perl-module-overload" -RDEPENDS_perl-module-math-bigint-trace += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-trace += "perl-module-warnings" -RDEPENDS_perl-module-math-bigrat += "perl-module-math-bigfloat" -RDEPENDS_perl-module-math-bigrat += "perl-module-math-bigint" -RDEPENDS_perl-module-math-bigrat += "perl-module-overload" -RDEPENDS_perl-module-math-bigrat += "perl-module-strict" -RDEPENDS_perl-module-math-bigrat += "perl-module-warnings" -RDEPENDS_perl-module-math-complex += "perl-module-config" -RDEPENDS_perl-module-math-complex += "perl-module-exporter" -RDEPENDS_perl-module-math-complex += "perl-module-overload" -RDEPENDS_perl-module-math-complex += "perl-module-strict" -RDEPENDS_perl-module-math-complex += "perl-module-warnings" -RDEPENDS_perl-module-math-trig += "perl-module-exporter" -RDEPENDS_perl-module-math-trig += "perl-module-math-complex" -RDEPENDS_perl-module-math-trig += "perl-module-strict" -RDEPENDS_perl-module-metadata += "perl-module-version" -RDEPENDS_perl-module-memoize-anydbm-file += "perl-module-vars" -RDEPENDS_perl-module-memoize += "perl-module-config" -RDEPENDS_perl-module-memoize += "perl-module-exporter" -RDEPENDS_perl-module-memoize += "perl-module-strict" -RDEPENDS_perl-module-memoize += "perl-module-vars" -RDEPENDS_perl-module-memoize-sdbm-file += "perl-module-sdbm-file" -RDEPENDS_perl-module-memoize-storable += "perl-module-storable" -RDEPENDS_perl-module-mime-base64 += "perl-module-exporter" -RDEPENDS_perl-module-mime-base64 += "perl-module-strict" -RDEPENDS_perl-module-mime-base64 += "perl-module-vars" -RDEPENDS_perl-module-mime-base64 += "perl-module-xsloader" -RDEPENDS_perl-module-mime-quotedprint += "perl-module-exporter" -RDEPENDS_perl-module-mime-quotedprint += "perl-module-mime-base64" -RDEPENDS_perl-module-mime-quotedprint += "perl-module-strict" -RDEPENDS_perl-module-mime-quotedprint += "perl-module-vars" -RDEPENDS_perl-module-mro += "perl-module-strict" -RDEPENDS_perl-module-mro += "perl-module-warnings" -RDEPENDS_perl-module-mro += "perl-module-xsloader" -RDEPENDS_perl-module-net-cmd += "perl-module-constant" -RDEPENDS_perl-module-net-cmd += "perl-module-errno" -RDEPENDS_perl-module-net-cmd += "perl-module-exporter" -RDEPENDS_perl-module-net-cmd += "perl-module-strict" -RDEPENDS_perl-module-net-cmd += "perl-module-warnings" -RDEPENDS_perl-module-net-config += "perl-module-exporter" -RDEPENDS_perl-module-net-config += "perl-module-socket" -RDEPENDS_perl-module-net-config += "perl-module-strict" -RDEPENDS_perl-module-net-config += "perl-module-warnings" -RDEPENDS_perl-module-net-domain += "perl-module-exporter" -RDEPENDS_perl-module-net-domain += "perl-module-net-config" -RDEPENDS_perl-module-net-domain += "perl-module-posix" -RDEPENDS_perl-module-net-domain += "perl-module-socket" -RDEPENDS_perl-module-net-domain += "perl-module-strict" -RDEPENDS_perl-module-net-domain += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp-a += "perl-module-net-ftp-dataconn" -RDEPENDS_perl-module-net-ftp-a += "perl-module-strict" -RDEPENDS_perl-module-net-ftp-a += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp-dataconn += "perl-module-errno" -RDEPENDS_perl-module-net-ftp-dataconn += "perl-module-net-cmd" -RDEPENDS_perl-module-net-ftp-dataconn += "perl-module-strict" -RDEPENDS_perl-module-net-ftp-dataconn += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp-e += "perl-module-net-ftp-i" -RDEPENDS_perl-module-net-ftp-e += "perl-module-strict" -RDEPENDS_perl-module-net-ftp-e += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp-i += "perl-module-net-ftp-dataconn" -RDEPENDS_perl-module-net-ftp-i += "perl-module-strict" -RDEPENDS_perl-module-net-ftp-i += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp-l += "perl-module-net-ftp-i" -RDEPENDS_perl-module-net-ftp-l += "perl-module-strict" -RDEPENDS_perl-module-net-ftp-l += "perl-module-warnings" -RDEPENDS_perl-module-net-ftp += "perl-module-constant" -RDEPENDS_perl-module-net-ftp += "perl-module-fcntl" -RDEPENDS_perl-module-net-ftp += "perl-module-file-basename" -RDEPENDS_perl-module-net-ftp += "perl-module-io-socket" -RDEPENDS_perl-module-net-ftp += "perl-module-io-socket-ip" -RDEPENDS_perl-module-net-ftp += "perl-module-net-cmd" -RDEPENDS_perl-module-net-ftp += "perl-module-net-config" -RDEPENDS_perl-module-net-ftp += "perl-module-net-ftp-a" -RDEPENDS_perl-module-net-ftp += "perl-module-net-netrc" -RDEPENDS_perl-module-net-ftp += "perl-module-socket" -RDEPENDS_perl-module-net-ftp += "perl-module-strict" -RDEPENDS_perl-module-net-ftp += "perl-module-time-local" -RDEPENDS_perl-module-net-ftp += "perl-module-warnings" -RDEPENDS_perl-module-net-hostent += "perl-module-class-struct" -RDEPENDS_perl-module-net-hostent += "perl-module-exporter" -RDEPENDS_perl-module-net-hostent += "perl-module-socket" -RDEPENDS_perl-module-net-hostent += "perl-module-strict" -RDEPENDS_perl-module-net-netent += "perl-module-class-struct" -RDEPENDS_perl-module-net-netent += "perl-module-exporter" -RDEPENDS_perl-module-net-netent += "perl-module-socket" -RDEPENDS_perl-module-net-netent += "perl-module-strict" -RDEPENDS_perl-module-net-netrc += "perl-module-filehandle" -RDEPENDS_perl-module-net-netrc += "perl-module-strict" -RDEPENDS_perl-module-net-netrc += "perl-module-warnings" -RDEPENDS_perl-module-net-nntp += "perl-module-io-socket" -RDEPENDS_perl-module-net-nntp += "perl-module-io-socket-ip" -RDEPENDS_perl-module-net-nntp += "perl-module-net-cmd" -RDEPENDS_perl-module-net-nntp += "perl-module-net-config" -RDEPENDS_perl-module-net-nntp += "perl-module-strict" -RDEPENDS_perl-module-net-nntp += "perl-module-time-local" -RDEPENDS_perl-module-net-nntp += "perl-module-warnings" -RDEPENDS_perl-module-net-ping += "perl-module-constant" -RDEPENDS_perl-module-net-ping += "perl-module-exporter" -RDEPENDS_perl-module-net-ping += "perl-module-fcntl" -RDEPENDS_perl-module-net-ping += "perl-module-filehandle" -RDEPENDS_perl-module-net-ping += "perl-module-io-socket-inet" -RDEPENDS_perl-module-net-ping += "perl-module-posix" -RDEPENDS_perl-module-net-ping += "perl-module-socket" -RDEPENDS_perl-module-net-ping += "perl-module-strict" -RDEPENDS_perl-module-net-ping += "perl-module-time-hires" -RDEPENDS_perl-module-net-pop3 += "perl-module-io-socket" -RDEPENDS_perl-module-net-pop3 += "perl-module-io-socket-ip" -RDEPENDS_perl-module-net-pop3 += "perl-module-mime-base64" -RDEPENDS_perl-module-net-pop3 += "perl-module-net-cmd" -RDEPENDS_perl-module-net-pop3 += "perl-module-net-config" -RDEPENDS_perl-module-net-pop3 += "perl-module-net-netrc" -RDEPENDS_perl-module-net-pop3 += "perl-module-strict" -RDEPENDS_perl-module-net-pop3 += "perl-module-warnings" -RDEPENDS_perl-module-net-protoent += "perl-module-class-struct" -RDEPENDS_perl-module-net-protoent += "perl-module-exporter" -RDEPENDS_perl-module-net-protoent += "perl-module-strict" -RDEPENDS_perl-module-net-servent += "perl-module-class-struct" -RDEPENDS_perl-module-net-servent += "perl-module-exporter" -RDEPENDS_perl-module-net-servent += "perl-module-strict" -RDEPENDS_perl-module-net-smtp += "perl-module-io-socket" -RDEPENDS_perl-module-net-smtp += "perl-module-io-socket-ip" -RDEPENDS_perl-module-net-smtp += "perl-module-mime-base64" -RDEPENDS_perl-module-net-smtp += "perl-module-net-cmd" -RDEPENDS_perl-module-net-smtp += "perl-module-net-config" -RDEPENDS_perl-module-net-smtp += "perl-module-socket" -RDEPENDS_perl-module-net-smtp += "perl-module-strict" -RDEPENDS_perl-module-net-smtp += "perl-module-warnings" -RDEPENDS_perl-module-net-time += "perl-module-exporter" -RDEPENDS_perl-module-net-time += "perl-module-io-select" -RDEPENDS_perl-module-net-time += "perl-module-io-socket" -RDEPENDS_perl-module-net-time += "perl-module-net-config" -RDEPENDS_perl-module-net-time += "perl-module-strict" -RDEPENDS_perl-module-net-time += "perl-module-warnings" -RDEPENDS_perl-module-next += "perl-module-overload" -RDEPENDS_perl-module-next += "perl-module-strict" -RDEPENDS_perl-module-next += "perl-module-warnings" -RDEPENDS_perl-module-ok += "perl-module-strict" -RDEPENDS_perl-module-ok += "perl-module-test-more" -RDEPENDS_perl-module-opcode += "perl-module-exporter" -RDEPENDS_perl-module-opcode += "perl-module-strict" -RDEPENDS_perl-module-opcode += "perl-module-subs" -RDEPENDS_perl-module-opcode += "perl-module-xsloader" -RDEPENDS_perl-module-open += "perl-module-encode" -RDEPENDS_perl-module-open += "perl-module-encoding" -RDEPENDS_perl-module-open += "perl-module-warnings" -RDEPENDS_perl-module-o += "perl-module-b" -RDEPENDS_perl-module-ops += "perl-module-opcode" -RDEPENDS_perl-module-overloading += "perl-module-overload-numbers" -RDEPENDS_perl-module-overloading += "perl-module-warnings" -RDEPENDS_perl-module-overload += "perl-module-mro" -RDEPENDS_perl-module-overload += "perl-module-warnings-register" -RDEPENDS_perl-module-overload += "perl-module-overloading" -RDEPENDS_perl-module-params-check += "perl-module-exporter" -RDEPENDS_perl-module-params-check += "perl-module-locale-maketext-simple" -RDEPENDS_perl-module-params-check += "perl-module-strict" -RDEPENDS_perl-module-params-check += "perl-module-vars" -RDEPENDS_perl-module-parent += "perl-module-strict" -RDEPENDS_perl-module-parent += "perl-module-vars" -RDEPENDS_perl-module-perlfaq += "perl-module-strict" -RDEPENDS_perl-module-perlfaq += "perl-module-warnings" -RDEPENDS_perl-module-perlio-encoding += "perl-module-strict" -RDEPENDS_perl-module-perlio-encoding += "perl-module-xsloader" -RDEPENDS_perl-module-perlio-mmap += "perl-module-strict" -RDEPENDS_perl-module-perlio-mmap += "perl-module-warnings" -RDEPENDS_perl-module-perlio-mmap += "perl-module-xsloader" -RDEPENDS_perl-module-perlio-scalar += "perl-module-xsloader" -RDEPENDS_perl-module-perlio-via += "perl-module-xsloader" -RDEPENDS_perl-module-perlio-via-quotedprint += "perl-module-mime-quotedprint" -RDEPENDS_perl-module-perlio-via-quotedprint += "perl-module-strict" -RDEPENDS_perl-module-pod-checker += "perl-module-base" -RDEPENDS_perl-module-pod-checker += "perl-module-exporter" -RDEPENDS_perl-module-pod-checker += "perl-module-strict" -RDEPENDS_perl-module-pod-checker += "perl-module-warnings" -RDEPENDS_perl-module-pod-escapes += "perl-module-exporter" -RDEPENDS_perl-module-pod-escapes += "perl-module-strict" -RDEPENDS_perl-module-pod-escapes += "perl-module-vars" -RDEPENDS_perl-module-pod-escapes += "perl-module-warnings" -RDEPENDS_perl-module-pod-find += "perl-module-config" -RDEPENDS_perl-module-pod-find += "perl-module-cwd" -RDEPENDS_perl-module-pod-find += "perl-module-exporter" -RDEPENDS_perl-module-pod-find += "perl-module-file-find" -RDEPENDS_perl-module-pod-find += "perl-module-strict" -RDEPENDS_perl-module-pod-find += "perl-module-vars" -RDEPENDS_perl-module-pod-functions += "perl-module-exporter" -RDEPENDS_perl-module-pod-functions += "perl-module-strict" -RDEPENDS_perl-module-pod-inputobjects += "perl-module-strict" -RDEPENDS_perl-module-pod-inputobjects += "perl-module-vars" -RDEPENDS_perl-module-pod-man += "perl-module-file-basename" -RDEPENDS_perl-module-pod-man += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-man += "perl-module-strict" -RDEPENDS_perl-module-pod-man += "perl-module-subs" -RDEPENDS_perl-module-pod-man += "perl-module-vars" -RDEPENDS_perl-module-pod-man += "perl-module-warnings" -RDEPENDS_perl-module-pod-parselink += "perl-module-exporter" -RDEPENDS_perl-module-pod-parselink += "perl-module-strict" -RDEPENDS_perl-module-pod-parselink += "perl-module-vars" -RDEPENDS_perl-module-pod-parselink += "perl-module-warnings" -RDEPENDS_perl-module-pod-parser += "perl-module-exporter" -RDEPENDS_perl-module-pod-parser += "perl-module-pod-inputobjects" -RDEPENDS_perl-module-pod-parser += "perl-module-strict" -RDEPENDS_perl-module-pod-parser += "perl-module-vars" -RDEPENDS_perl-module-pod-parseutils += "perl-module-strict" -RDEPENDS_perl-module-pod-parseutils += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-config" -RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-getoptsoo += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-getoptsoo += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc += "perl-module-config" -RDEPENDS_perl-module-pod-perldoc += "perl-module-encode" -RDEPENDS_perl-module-pod-perldoc += "perl-module-fcntl" -RDEPENDS_perl-module-pod-perldoc += "perl-module-file-basename" -RDEPENDS_perl-module-pod-perldoc += "perl-module-file-temp" -RDEPENDS_perl-module-pod-perldoc += "perl-module-pod-perldoc-getoptsoo" -RDEPENDS_perl-module-pod-perldoc += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc += "perl-module-text-parsewords" -RDEPENDS_perl-module-pod-perldoc += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-toansi += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-toansi += "perl-module-pod-text-color" -RDEPENDS_perl-module-pod-perldoc-toansi += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-toansi += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-toansi += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-tochecker += "perl-module-pod-checker" -RDEPENDS_perl-module-pod-perldoc-tochecker += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-tochecker += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-tochecker += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-encode" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-io-handle" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-io-select" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-ipc-open3" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-pod-man" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-pod-perldoc-topod" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-toman += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-tonroff += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-tonroff += "perl-module-pod-man" -RDEPENDS_perl-module-pod-perldoc-tonroff += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-tonroff += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-tonroff += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-topod += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-topod += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-topod += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-topod += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-tortf += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-tortf += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-tortf += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-tortf += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-toterm += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-toterm += "perl-module-pod-text-termcap" -RDEPENDS_perl-module-pod-perldoc-toterm += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-toterm += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-toterm += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-totext += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-totext += "perl-module-pod-text" -RDEPENDS_perl-module-pod-perldoc-totext += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-totext += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-totext += "perl-module-warnings" -RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-parent" -RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-strict" -RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-vars" -RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-warnings" -RDEPENDS_perl-module-pod-plaintext += "perl-module-pod-select" -RDEPENDS_perl-module-pod-plaintext += "perl-module-strict" -RDEPENDS_perl-module-pod-plaintext += "perl-module-vars" -RDEPENDS_perl-module-pod-select += "perl-module-pod-parser" -RDEPENDS_perl-module-pod-select += "perl-module-strict" -RDEPENDS_perl-module-pod-select += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-integer" -RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-pod-simple-transcode" -RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-checker += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-checker += "perl-module-pod-simple-methody" -RDEPENDS_perl-module-pod-simple-checker += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-checker += "perl-module-text-wrap" -RDEPENDS_perl-module-pod-simple-checker += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-debug += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-debug += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-debug += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-dumpastext += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-dumpastext += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-text-wrap" -RDEPENDS_perl-module-pod-simple-linksection += "perl-module-overload" -RDEPENDS_perl-module-pod-simple-linksection += "perl-module-pod-simple-blackbox" -RDEPENDS_perl-module-pod-simple-linksection += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-linksection += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-methody += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-methody += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-methody += "perl-module-vars" -RDEPENDS_perl-module-pod-simple += "perl-module-integer" -RDEPENDS_perl-module-pod-simple += "perl-module-pod-escapes" -RDEPENDS_perl-module-pod-simple += "perl-module-pod-simple-blackbox" -RDEPENDS_perl-module-pod-simple += "perl-module-pod-simple-linksection" -RDEPENDS_perl-module-pod-simple += "perl-module-pod-simple-tiedoutfh" -RDEPENDS_perl-module-pod-simple += "perl-module-strict" -RDEPENDS_perl-module-pod-simple += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-progress += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-pullparserendtoken += "perl-module-pod-simple-pullparsertoken" -RDEPENDS_perl-module-pod-simple-pullparserendtoken += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-pullparserendtoken += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-pullparser += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-pullparser += "perl-module-pod-simple-pullparserendtoken" -RDEPENDS_perl-module-pod-simple-pullparser += "perl-module-pod-simple-pullparserstarttoken" -RDEPENDS_perl-module-pod-simple-pullparser += "perl-module-pod-simple-pullparsertexttoken" -RDEPENDS_perl-module-pod-simple-pullparser += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-pullparserstarttoken += "perl-module-pod-simple-pullparsertoken" -RDEPENDS_perl-module-pod-simple-pullparserstarttoken += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-pullparserstarttoken += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-pod-simple-pullparsertoken" -RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-pullparsertoken += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-rtf += "perl-module-integer" -RDEPENDS_perl-module-pod-simple-rtf += "perl-module-pod-simple-pullparser" -RDEPENDS_perl-module-pod-simple-rtf += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-rtf += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-search += "perl-module-config" -RDEPENDS_perl-module-pod-simple-search += "perl-module-cwd" -RDEPENDS_perl-module-pod-simple-search += "perl-module-file-basename" -RDEPENDS_perl-module-pod-simple-search += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-search += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-simpletree += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-simpletree += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-simpletree += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-textcontent += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-textcontent += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-textcontent += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-text += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-text += "perl-module-pod-simple-methody" -RDEPENDS_perl-module-pod-simple-text += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-text += "perl-module-text-wrap" -RDEPENDS_perl-module-pod-simple-text += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-tiedoutfh += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-tiedoutfh += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-transcodedumb += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-transcodedumb += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-transcode += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-transcode += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-encode" -RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-vars" -RDEPENDS_perl-module-pod-simple-xmloutstream += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-simple-xmloutstream += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-xmloutstream += "perl-module-vars" -RDEPENDS_perl-module-pod-text-color += "perl-module-pod-text" -RDEPENDS_perl-module-pod-text-color += "perl-module-strict" -RDEPENDS_perl-module-pod-text-color += "perl-module-term-ansicolor" -RDEPENDS_perl-module-pod-text-color += "perl-module-vars" -RDEPENDS_perl-module-pod-text-color += "perl-module-warnings" -RDEPENDS_perl-module-pod-text-overstrike += "perl-module-pod-text" -RDEPENDS_perl-module-pod-text-overstrike += "perl-module-strict" -RDEPENDS_perl-module-pod-text-overstrike += "perl-module-vars" -RDEPENDS_perl-module-pod-text-overstrike += "perl-module-warnings" -RDEPENDS_perl-module-pod-text += "perl-module-encode" -RDEPENDS_perl-module-pod-text += "perl-module-exporter" -RDEPENDS_perl-module-pod-text += "perl-module-pod-simple" -RDEPENDS_perl-module-pod-text += "perl-module-strict" -RDEPENDS_perl-module-pod-text += "perl-module-vars" -RDEPENDS_perl-module-pod-text += "perl-module-warnings" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-pod-text" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-posix" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-strict" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-term-cap" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-vars" -RDEPENDS_perl-module-pod-text-termcap += "perl-module-warnings" -RDEPENDS_perl-module-pod-usage += "perl-module-config" -RDEPENDS_perl-module-pod-usage += "perl-module-exporter" -RDEPENDS_perl-module-pod-usage += "perl-module-strict" -RDEPENDS_perl-module-pod-usage += "perl-module-vars" -RDEPENDS_perl-module-posix += "perl-module-exporter" -RDEPENDS_perl-module-posix += "perl-module-fcntl" -RDEPENDS_perl-module-posix += "perl-module-strict" -RDEPENDS_perl-module-posix += "perl-module-tie-hash" -RDEPENDS_perl-module-posix += "perl-module-warnings" -RDEPENDS_perl-module-posix += "perl-module-xsloader" -RDEPENDS_perl-module-re += "perl-module-exporter" -RDEPENDS_perl-module-re += "perl-module-strict" -RDEPENDS_perl-module-re += "perl-module-term-cap" -RDEPENDS_perl-module-re += "perl-module-warnings" -RDEPENDS_perl-module-re += "perl-module-xsloader" -RDEPENDS_perl-module-safe += "perl-module-b" -RDEPENDS_perl-module-safe += "perl-module-opcode" -RDEPENDS_perl-module-safe += "perl-module-strict" -RDEPENDS_perl-module-safe += "perl-module-utf8" -RDEPENDS_perl-module-scalar-util += "perl-module-carp" -RDEPENDS_perl-module-scalar-util += "perl-module-exporter" -RDEPENDS_perl-module-scalar-util += "perl-module-list-util" -RDEPENDS_perl-module-scalar-util += "perl-module-strict" -RDEPENDS_perl-module-scalar-util += "perl-module-warnings" -RDEPENDS_perl-module-sdbm-file += "perl-module-exporter" -RDEPENDS_perl-module-sdbm-file += "perl-module-strict" -RDEPENDS_perl-module-sdbm-file += "perl-module-tie-hash" -RDEPENDS_perl-module-sdbm-file += "perl-module-warnings" -RDEPENDS_perl-module-sdbm-file += "perl-module-xsloader" -RDEPENDS_perl-module-search-dict += "perl-module-exporter" -RDEPENDS_perl-module-search-dict += "perl-module-feature" -RDEPENDS_perl-module-search-dict += "perl-module-strict" -RDEPENDS_perl-module-selfloader += "perl-module-exporter" -RDEPENDS_perl-module-selfloader += "perl-module-io-handle" -RDEPENDS_perl-module-selfloader += "perl-module-strict" -RDEPENDS_perl-module-socket += "perl-module-exporter" -RDEPENDS_perl-module-socket += "perl-module-strict" -RDEPENDS_perl-module-socket += "perl-module-warnings-register" -RDEPENDS_perl-module-socket += "perl-module-xsloader" -RDEPENDS_perl-module-sort += "perl-module-strict" -RDEPENDS_perl-module-storable += "perl-module-exporter" -RDEPENDS_perl-module-storable += "perl-module-io-file" -RDEPENDS_perl-module-storable += "perl-module-xsloader" -RDEPENDS_perl-module-sub-util += "perl-module-exporter" -RDEPENDS_perl-module-sub-util += "perl-module-list-util" -RDEPENDS_perl-module-sub-util += "perl-module-strict" -RDEPENDS_perl-module-sub-util += "perl-module-warnings" -RDEPENDS_perl-module-sys-hostname += "perl-module-exporter" -RDEPENDS_perl-module-sys-hostname += "perl-module-posix" -RDEPENDS_perl-module-sys-hostname += "perl-module-strict" -RDEPENDS_perl-module-sys-hostname += "perl-module-warnings" -RDEPENDS_perl-module-sys-hostname += "perl-module-xsloader" -RDEPENDS_perl-module-sys-syslog += "perl-module-config" -RDEPENDS_perl-module-sys-syslog += "perl-module-constant" -RDEPENDS_perl-module-sys-syslog += "perl-module-dynaloader" -RDEPENDS_perl-module-sys-syslog += "perl-module-exporter" -RDEPENDS_perl-module-sys-syslog += "perl-module-fcntl" -RDEPENDS_perl-module-sys-syslog += "perl-module-file-basename" -RDEPENDS_perl-module-sys-syslog += "perl-module-posix" -RDEPENDS_perl-module-sys-syslog += "perl-module-socket" -RDEPENDS_perl-module-sys-syslog += "perl-module-strict" -RDEPENDS_perl-module-sys-syslog += "perl-module-sys-hostname" -RDEPENDS_perl-module-sys-syslog += "perl-module-vars" -RDEPENDS_perl-module-sys-syslog += "perl-module-warnings" -RDEPENDS_perl-module-sys-syslog += "perl-module-warnings-register" -RDEPENDS_perl-module-sys-syslog += "perl-module-xsloader" -RDEPENDS_perl-module-tap-base += "perl-module-base" -RDEPENDS_perl-module-tap-base += "perl-module-constant" -RDEPENDS_perl-module-tap-base += "perl-module-strict" -RDEPENDS_perl-module-tap-base += "perl-module-tap-object" -RDEPENDS_perl-module-tap-base += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-base += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-base += "perl-module-posix" -RDEPENDS_perl-module-tap-formatter-base += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-base += "perl-module-tap-formatter-color" -RDEPENDS_perl-module-tap-formatter-base += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-color += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-color += "perl-module-constant" -RDEPENDS_perl-module-tap-formatter-color += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-color += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-console-parallelsession += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-console-parallelsession += "perl-module-constant" -RDEPENDS_perl-module-tap-formatter-console-parallelsession += "perl-module-file-path" -RDEPENDS_perl-module-tap-formatter-console-parallelsession += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-console-parallelsession += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-console += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-console += "perl-module-posix" -RDEPENDS_perl-module-tap-formatter-console += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-console += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-console-session += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-console-session += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-console-session += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-posix" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-tap-formatter-file-session" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-file-session += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-file-session += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-file-session += "perl-module-warnings" -RDEPENDS_perl-module-tap-formatter-session += "perl-module-base" -RDEPENDS_perl-module-tap-formatter-session += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-session += "perl-module-warnings" -RDEPENDS_perl-module-tap-harness-env += "perl-module-constant" -RDEPENDS_perl-module-tap-harness-env += "perl-module-strict" -RDEPENDS_perl-module-tap-harness-env += "perl-module-tap-object" -RDEPENDS_perl-module-tap-harness-env += "perl-module-text-parsewords" -RDEPENDS_perl-module-tap-harness-env += "perl-module-warnings" -RDEPENDS_perl-module-tap-harness += "perl-module-base" -RDEPENDS_perl-module-tap-harness += "perl-module-carp" -RDEPENDS_perl-module-tap-harness += "perl-module-file-path" -RDEPENDS_perl-module-tap-harness += "perl-module-file-spec" -RDEPENDS_perl-module-tap-harness += "perl-module-io-handle" -RDEPENDS_perl-module-tap-harness += "perl-module-strict" -RDEPENDS_perl-module-tap-harness += "perl-module-tap-base" -RDEPENDS_perl-module-tap-harness += "perl-module-warnings" -RDEPENDS_perl-module-tap-object += "perl-module-strict" -RDEPENDS_perl-module-tap-object += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-aggregator += "perl-module-base" -RDEPENDS_perl-module-tap-parser-aggregator += "perl-module-benchmark" -RDEPENDS_perl-module-tap-parser-aggregator += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-aggregator += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-aggregator += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-base" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-tap-parser-resultfactory" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-tap-parser-yamlish-reader" -RDEPENDS_perl-module-tap-parser-grammar += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-iterator-array += "perl-module-base" -RDEPENDS_perl-module-tap-parser-iterator-array += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-iterator-array += "perl-module-tap-parser-iterator" -RDEPENDS_perl-module-tap-parser-iterator-array += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-base" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-carp" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-file-basename" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-iteratorfactory += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-iterator += "perl-module-base" -RDEPENDS_perl-module-tap-parser-iterator += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-iterator += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-iterator += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-base" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-config" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-io-handle" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-tap-parser-iterator" -RDEPENDS_perl-module-tap-parser-iterator-process += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-iterator-stream += "perl-module-base" -RDEPENDS_perl-module-tap-parser-iterator-stream += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-iterator-stream += "perl-module-tap-parser-iterator" -RDEPENDS_perl-module-tap-parser-iterator-stream += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-multiplexer += "perl-module-base" -RDEPENDS_perl-module-tap-parser-multiplexer += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-multiplexer += "perl-module-io-select" -RDEPENDS_perl-module-tap-parser-multiplexer += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-multiplexer += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser += "perl-module-base" -RDEPENDS_perl-module-tap-parser += "perl-module-carp" -RDEPENDS_perl-module-tap-parser += "perl-module-strict" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-grammar" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-iterator" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-resultfactory" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-source" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-sourcehandler-executable" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-sourcehandler-file" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-sourcehandler-handle" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-sourcehandler-perl" -RDEPENDS_perl-module-tap-parser += "perl-module-tap-parser-sourcehandler-rawtap" -RDEPENDS_perl-module-tap-parser += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-bailout += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-bailout += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-bailout += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-bailout += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-comment += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-comment += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-comment += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-comment += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-base" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-bailout" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-comment" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-plan" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-pragma" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-test" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-unknown" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-version" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-tap-parser-result-yaml" -RDEPENDS_perl-module-tap-parser-resultfactory += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-result += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-plan += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-plan += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-plan += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-plan += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-pragma += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-pragma += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-pragma += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-pragma += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-test += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-test += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-test += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-test += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-unknown += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-unknown += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-unknown += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-unknown += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-version += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-version += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-version += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-version += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-result-yaml += "perl-module-base" -RDEPENDS_perl-module-tap-parser-result-yaml += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-result-yaml += "perl-module-tap-parser-result" -RDEPENDS_perl-module-tap-parser-result-yaml += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-scheduler-job += "perl-module-carp" -RDEPENDS_perl-module-tap-parser-scheduler-job += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-scheduler-job += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-scheduler += "perl-module-carp" -RDEPENDS_perl-module-tap-parser-scheduler += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-scheduler += "perl-module-tap-parser-scheduler-job" -RDEPENDS_perl-module-tap-parser-scheduler += "perl-module-tap-parser-scheduler-spinner" -RDEPENDS_perl-module-tap-parser-scheduler += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-scheduler-spinner += "perl-module-carp" -RDEPENDS_perl-module-tap-parser-scheduler-spinner += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-scheduler-spinner += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-tap-parser-iterator-process" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-tap-parser-sourcehandler" -RDEPENDS_perl-module-tap-parser-sourcehandler-executable += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-tap-parser-iterator-stream" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-tap-parser-sourcehandler" -RDEPENDS_perl-module-tap-parser-sourcehandler-file += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-tap-parser-iterator-stream" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-tap-parser-sourcehandler" -RDEPENDS_perl-module-tap-parser-sourcehandler-handle += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler += "perl-module-tap-parser-iterator" -RDEPENDS_perl-module-tap-parser-sourcehandler += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-config" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-iterator-process" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-sourcehandler-executable" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-text-parsewords" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-base" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-tap-parser-iterator-array" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-tap-parser-iteratorfactory" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-tap-parser-sourcehandler" -RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-source += "perl-module-base" -RDEPENDS_perl-module-tap-parser-source += "perl-module-constant" -RDEPENDS_perl-module-tap-parser-source += "perl-module-file-basename" -RDEPENDS_perl-module-tap-parser-source += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-source += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-source += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-yamlish-reader += "perl-module-base" -RDEPENDS_perl-module-tap-parser-yamlish-reader += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-yamlish-reader += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-yamlish-reader += "perl-module-warnings" -RDEPENDS_perl-module-tap-parser-yamlish-writer += "perl-module-base" -RDEPENDS_perl-module-tap-parser-yamlish-writer += "perl-module-strict" -RDEPENDS_perl-module-tap-parser-yamlish-writer += "perl-module-tap-object" -RDEPENDS_perl-module-tap-parser-yamlish-writer += "perl-module-warnings" -RDEPENDS_perl-module-term-ansicolor += "perl-module-exporter" -RDEPENDS_perl-module-term-ansicolor += "perl-module-strict" -RDEPENDS_perl-module-term-ansicolor += "perl-module-warnings" -RDEPENDS_perl-module-term-cap += "perl-module-strict" -RDEPENDS_perl-module-term-cap += "perl-module-vars" -RDEPENDS_perl-module-term-complete += "perl-module-exporter" -RDEPENDS_perl-module-term-complete += "perl-module-strict" -RDEPENDS_perl-module-term-readline += "perl-module-strict" -RDEPENDS_perl-module-term-readline += "perl-module-term-cap" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-strict" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-warnings" -RDEPENDS_perl-module-test-builder-module += "perl-module-exporter" -RDEPENDS_perl-module-test-builder-module += "perl-module-strict" -RDEPENDS_perl-module-test-builder-module += "perl-module-test-builder" -RDEPENDS_perl-module-test-builder += "perl-module-data-dumper" -RDEPENDS_perl-module-test-builder += "perl-module-list-util" -RDEPENDS_perl-module-test-builder += "perl-module-overload" -RDEPENDS_perl-module-test-builder += "perl-module-strict" -RDEPENDS_perl-module-test-builder += "perl-module-scalar-util" -RDEPENDS_perl-module-test-builder += "perl-module-test-builder-formatter" -RDEPENDS_perl-module-test-builder += "perl-module-test-builder-tododiag" -RDEPENDS_perl-module-test-builder += "perl-module-test2-api" -RDEPENDS_perl-module-test-builder += "perl-module-test2-util" -RDEPENDS_perl-module-test-builder += "perl-module-warnings" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-strict" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-test2-formatter" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-test2-formatter-tap" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test-builder-formatter += "perl-module-warnings" -RDEPENDS_perl-module-test-builder-tester-color += "perl-module-strict" -RDEPENDS_perl-module-test-builder-tester-color += "perl-module-test-builder-tester" -RDEPENDS_perl-module-test-builder-tester += "perl-module-exporter" -RDEPENDS_perl-module-test-builder-tester += "perl-module-strict" -RDEPENDS_perl-module-test-builder-tester += "perl-module-test-builder" -RDEPENDS_perl-module-test-builder-tododiag += "perl-module-strict" -RDEPENDS_perl-module-test-builder-tododiag += "perl-module-warnings" -RDEPENDS_perl-module-test-harness += "perl-module-base" -RDEPENDS_perl-module-test-harness += "perl-module-config" -RDEPENDS_perl-module-test-harness += "perl-module-constant" -RDEPENDS_perl-module-test-harness += "perl-module-strict" -RDEPENDS_perl-module-test-harness += "perl-module-tap-harness" -RDEPENDS_perl-module-test-harness += "perl-module-tap-parser-aggregator" -RDEPENDS_perl-module-test-harness += "perl-module-tap-parser-source" -RDEPENDS_perl-module-test-harness += "perl-module-tap-parser-sourcehandler-perl" -RDEPENDS_perl-module-test-harness += "perl-module-text-parsewords" -RDEPENDS_perl-module-test-harness += "perl-module-warnings" -RDEPENDS_perl-module-test-more += "perl-module-file-temp" -RDEPENDS_perl-module-test-more += "perl-module-strict" -RDEPENDS_perl-module-test-more += "perl-module-test-builder-module" -RDEPENDS_perl-module-test-more += "perl-module-warnings" -RDEPENDS_perl-module-test += "perl-module-exporter" -RDEPENDS_perl-module-test += "perl-module-file-temp" -RDEPENDS_perl-module-test += "perl-module-strict" -RDEPENDS_perl-module-test-simple += "perl-module-strict" -RDEPENDS_perl-module-test-simple += "perl-module-test-builder-module" -RDEPENDS_perl-module-test-tester-capture += "perl-module-config" -RDEPENDS_perl-module-test-tester-capture += "perl-module-strict" -RDEPENDS_perl-module-test-tester-capture += "perl-module-test-builder" -RDEPENDS_perl-module-test-tester-capture += "perl-module-threads-shared" -RDEPENDS_perl-module-test-tester-capture += "perl-module-vars" -RDEPENDS_perl-module-test-tester-capturerunner += "perl-module-exporter" -RDEPENDS_perl-module-test-tester-capturerunner += "perl-module-strict" -RDEPENDS_perl-module-test-tester-capturerunner += "perl-module-test-tester-capture" -RDEPENDS_perl-module-test-tester-delegate += "perl-module-strict" -RDEPENDS_perl-module-test-tester-delegate += "perl-module-vars" -RDEPENDS_perl-module-test-tester-delegate += "perl-module-warnings" -RDEPENDS_perl-module-test-tester += "perl-module-exporter" -RDEPENDS_perl-module-test-tester += "perl-module-strict" -RDEPENDS_perl-module-test-tester += "perl-module-test-builder" -RDEPENDS_perl-module-test-tester += "perl-module-test-tester-capturerunner" -RDEPENDS_perl-module-test-tester += "perl-module-test-tester-delegate" -RDEPENDS_perl-module-test-tester += "perl-module-vars" -RDEPENDS_perl-module-test2-api += "perl-module-carp" -RDEPENDS_perl-module-test2-api += "perl-module-exporter" -RDEPENDS_perl-module-test2-api += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-api += "perl-module-strict" -RDEPENDS_perl-module-test2-api += "perl-module-test2-api-context" -RDEPENDS_perl-module-test2-api += "perl-module-test2-api-instance" -RDEPENDS_perl-module-test2-api += "perl-module-test2-api-stack" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-bail" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-diag" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-exception" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-fail" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-note" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-ok" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-plan" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-skip" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-subtest" -RDEPENDS_perl-module-test2-api += "perl-module-test2-event-waiting" -RDEPENDS_perl-module-test2-api += "perl-module-test2-eventfacet-trace" -RDEPENDS_perl-module-test2-api += "perl-module-test2-hub" -RDEPENDS_perl-module-test2-api += "perl-module-test2-hub-interceptor" -RDEPENDS_perl-module-test2-api += "perl-module-test2-hub-interceptor-terminator" -RDEPENDS_perl-module-test2-api += "perl-module-test2-hub-subtest" -RDEPENDS_perl-module-test2-api += "perl-module-test2-util" -RDEPENDS_perl-module-test2-api += "perl-module-test2-util-trace" -RDEPENDS_perl-module-test2-api += "perl-module-warnings" -RDEPENDS_perl-module-test2-api-context += "perl-module-carp" -RDEPENDS_perl-module-test2-api-context += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-api-context += "perl-module-strict" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-api" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-event-v2" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-eventfacet" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-eventfacet-trace" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-util" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-util-externalmeta" -RDEPENDS_perl-module-test2-api-context += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test2-api-context += "perl-module-warnings" -RDEPENDS_perl-module-test2-event += "perl-module-carp" -RDEPENDS_perl-module-test2-event += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-event += "perl-module-strict" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-about" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-amnesty" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-assert" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-control" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-error" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-hub" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-info" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-meta" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-parent" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-plan" -RDEPENDS_perl-module-test2-event += "perl-module-test2-eventfacet-trace" -RDEPENDS_perl-module-test2-event += "perl-module-test2-util" -RDEPENDS_perl-module-test2-event += "perl-module-test2-util-externalmeta" -RDEPENDS_perl-module-test2-event += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test2-event += "perl-module-test2-util-trace" -RDEPENDS_perl-module-test2-event += "perl-module-warnings" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-carp" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-test2-util-facets2legacy" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-strict" -RDEPENDS_perl-module-test2-event-v2 += "perl-module-warnings" -RDEPENDS_perl-module-test2-eventfacet += "perl-module-carp" -RDEPENDS_perl-module-test2-eventfacet += "perl-module-test2-util" -RDEPENDS_perl-module-test2-eventfacet += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test2-eventfacet += "perl-module-strict" -RDEPENDS_perl-module-test2-eventfacet += "perl-module-warnings" -RDEPENDS_perl-module-test2-hub += "perl-module-carp" -RDEPENDS_perl-module-test2-hub += "perl-module-list-util" -RDEPENDS_perl-module-test2-hub += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-hub += "perl-module-strict" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-event-ok" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-event-pass" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-event-plan" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-util" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-util-externalmeta" -RDEPENDS_perl-module-test2-hub += "perl-module-test2-util-hashbase" -RDEPENDS_perl-module-test2-hub += "perl-module-warnings" -RDEPENDS_perl-module-test2-util += "perl-module-carp" -RDEPENDS_perl-module-test2-util += "perl-module-config" -RDEPENDS_perl-module-test2-util += "perl-module-posix" -RDEPENDS_perl-module-test2-util += "perl-module-strict" -RDEPENDS_perl-module-test2-util += "perl-module-warnings" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-base" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-carp" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-exporter" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-scalar-util" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-strict" -RDEPENDS_perl-module-test2-util-facets2legacy += "perl-module-warnings" -RDEPENDS_perl-module-test2-util-hashbase += "perl-module-carp" -RDEPENDS_perl-module-test2-util-hashbase += "perl-module-mro" -RDEPENDS_perl-module-test2-util-hashbase += "perl-module-strict" -RDEPENDS_perl-module-test2-util-hashbase += "perl-module-warnings" -RDEPENDS_perl-module-text-abbrev += "perl-module-exporter" -RDEPENDS_perl-module-text-balanced += "perl-module-exporter" -RDEPENDS_perl-module-text-balanced += "perl-module-overload" -RDEPENDS_perl-module-text-balanced += "perl-module-selfloader" -RDEPENDS_perl-module-text-balanced += "perl-module-strict" -RDEPENDS_perl-module-text-balanced += "perl-module-vars" -RDEPENDS_perl-module-text-parsewords += "perl-module-exporter" -RDEPENDS_perl-module-text-parsewords += "perl-module-strict" -RDEPENDS_perl-module-text-tabs += "perl-module-exporter" -RDEPENDS_perl-module-text-tabs += "perl-module-strict" -RDEPENDS_perl-module-text-tabs += "perl-module-vars" -RDEPENDS_perl-module-text-wrap += "perl-module-exporter" -RDEPENDS_perl-module-text-wrap += "perl-module-re" -RDEPENDS_perl-module-text-wrap += "perl-module-strict" -RDEPENDS_perl-module-text-wrap += "perl-module-text-tabs" -RDEPENDS_perl-module-text-wrap += "perl-module-vars" -RDEPENDS_perl-module-text-wrap += "perl-module-warnings-register" -RDEPENDS_perl-module-thread += "perl-module-config" -RDEPENDS_perl-module-thread += "perl-module-exporter" -RDEPENDS_perl-module-thread += "perl-module-strict" -RDEPENDS_perl-module-thread += "perl-module-threads" -RDEPENDS_perl-module-thread += "perl-module-threads-shared" -RDEPENDS_perl-module-thread += "perl-module-warnings" -RDEPENDS_perl-module-thread-queue += "perl-module-strict" -RDEPENDS_perl-module-thread-queue += "perl-module-threads-shared" -RDEPENDS_perl-module-thread-queue += "perl-module-warnings" -RDEPENDS_perl-module-thread-semaphore += "perl-module-strict" -RDEPENDS_perl-module-thread-semaphore += "perl-module-threads-shared" -RDEPENDS_perl-module-thread-semaphore += "perl-module-warnings" -RDEPENDS_perl-module-threads += "perl-module-config" -RDEPENDS_perl-module-threads += "perl-module-overload" -RDEPENDS_perl-module-threads += "perl-module-strict" -RDEPENDS_perl-module-threads += "perl-module-warnings" -RDEPENDS_perl-module-threads += "perl-module-xsloader" -RDEPENDS_perl-module-threads-shared += "perl-module-strict" -RDEPENDS_perl-module-threads-shared += "perl-module-warnings" -RDEPENDS_perl-module-threads-shared += "perl-module-xsloader" -RDEPENDS_perl-module-tie-array += "perl-module-strict" -RDEPENDS_perl-module-tie-file += "perl-module-fcntl" -RDEPENDS_perl-module-tie-file += "perl-module-posix" -RDEPENDS_perl-module-tie-file += "perl-module-strict" -RDEPENDS_perl-module-tie-handle += "perl-module-tie-stdhandle" -RDEPENDS_perl-module-tie-handle += "perl-module-warnings-register" -RDEPENDS_perl-module-tie-hash-namedcapture += "perl-module-strict" -RDEPENDS_perl-module-tie-hash-namedcapture += "perl-module-xsloader" -RDEPENDS_perl-module-tie-hash += "perl-module-warnings-register" -RDEPENDS_perl-module-tie-hash += "perl-module-carp" -RDEPENDS_perl-module-tie-memoize += "perl-module-strict" -RDEPENDS_perl-module-tie-memoize += "perl-module-tie-hash" -RDEPENDS_perl-module-tie-refhash += "perl-module-config" -RDEPENDS_perl-module-tie-refhash += "perl-module-overload" -RDEPENDS_perl-module-tie-refhash += "perl-module-strict" -RDEPENDS_perl-module-tie-refhash += "perl-module-tie-hash" -RDEPENDS_perl-module-tie-refhash += "perl-module-vars" -RDEPENDS_perl-module-tie-scalar += "perl-module-warnings-register" -RDEPENDS_perl-module-tie-stdhandle += "perl-module-strict" -RDEPENDS_perl-module-tie-stdhandle += "perl-module-tie-handle" -RDEPENDS_perl-module-tie-substrhash += "perl-module-integer" -RDEPENDS_perl-module-time-gmtime += "perl-module-exporter" -RDEPENDS_perl-module-time-gmtime += "perl-module-strict" -RDEPENDS_perl-module-time-gmtime += "perl-module-time-tm" -RDEPENDS_perl-module-time-hires += "perl-module-exporter" -RDEPENDS_perl-module-time-hires += "perl-module-strict" -RDEPENDS_perl-module-time-hires += "perl-module-xsloader" -RDEPENDS_perl-module-time-local += "perl-module-config" -RDEPENDS_perl-module-time-local += "perl-module-constant" -RDEPENDS_perl-module-time-local += "perl-module-exporter" -RDEPENDS_perl-module-time-local += "perl-module-parent" -RDEPENDS_perl-module-time-local += "perl-module-strict" -RDEPENDS_perl-module-time-localtime += "perl-module-exporter" -RDEPENDS_perl-module-time-localtime += "perl-module-strict" -RDEPENDS_perl-module-time-localtime += "perl-module-time-tm" -RDEPENDS_perl-module-time-piece += "perl-module-constant" -RDEPENDS_perl-module-time-piece += "perl-module-exporter" -RDEPENDS_perl-module-time-piece += "perl-module-integer" -RDEPENDS_perl-module-time-piece += "perl-module-overload" -RDEPENDS_perl-module-time-piece += "perl-module-strict" -RDEPENDS_perl-module-time-piece += "perl-module-time-local" -RDEPENDS_perl-module-time-piece += "perl-module-time-seconds" -RDEPENDS_perl-module-time-piece += "perl-module-xsloader" -RDEPENDS_perl-module-time-seconds += "perl-module-constant" -RDEPENDS_perl-module-time-seconds += "perl-module-exporter" -RDEPENDS_perl-module-time-seconds += "perl-module-overload" -RDEPENDS_perl-module-time-seconds += "perl-module-strict" -RDEPENDS_perl-module-time-tm += "perl-module-class-struct" -RDEPENDS_perl-module-time-tm += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-big5 += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-big5 += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-gb2312 += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-gb2312 += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-jisx0208 += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-jisx0208 += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-korean += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-korean += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-pinyin += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-pinyin += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-stroke += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-stroke += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-cjk-zhuyin += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-cjk-zhuyin += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate-locale += "perl-module-base" -RDEPENDS_perl-module-unicode-collate-locale += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate-locale += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate += "perl-module-constant" -RDEPENDS_perl-module-unicode-collate += "perl-module-strict" -RDEPENDS_perl-module-unicode-collate += "perl-module-warnings" -RDEPENDS_perl-module-unicode-collate += "perl-module-xsloader" -RDEPENDS_perl-module-unicode-normalize += "perl-module-exporter" -RDEPENDS_perl-module-unicode-normalize += "perl-module-strict" -RDEPENDS_perl-module-unicode-normalize += "perl-module-warnings" -RDEPENDS_perl-module-unicode-normalize += "perl-module-xsloader" -RDEPENDS_perl-module-unicode-ucd += "perl-module-charnames" -RDEPENDS_perl-module-unicode-ucd += "perl-module-exporter" -RDEPENDS_perl-module-unicode-ucd += "perl-module-feature" -RDEPENDS_perl-module-unicode-ucd += "perl-module-if" -RDEPENDS_perl-module-unicode-ucd += "perl-module-integer" -RDEPENDS_perl-module-unicode-ucd += "perl-module-strict" -RDEPENDS_perl-module-unicode-ucd += "perl-module-unicode-normalize" -RDEPENDS_perl-module-unicode-ucd += "perl-module-utf8-heavy" -RDEPENDS_perl-module-unicode-ucd += "perl-module-warnings" -RDEPENDS_perl-module-user-grent += "perl-module-class-struct" -RDEPENDS_perl-module-user-grent += "perl-module-exporter" -RDEPENDS_perl-module-user-grent += "perl-module-strict" -RDEPENDS_perl-module-user-pwent += "perl-module-class-struct" -RDEPENDS_perl-module-user-pwent += "perl-module-config" -RDEPENDS_perl-module-user-pwent += "perl-module-exporter" -RDEPENDS_perl-module-user-pwent += "perl-module-strict" -RDEPENDS_perl-module-user-pwent += "perl-module-warnings" -RDEPENDS_perl-module-utf8 += "perl-module-utf8-heavy" -RDEPENDS_perl-module-version += "perl-module-strict" -RDEPENDS_perl-module-version += "perl-module-version-regex" -RDEPENDS_perl-module-version += "perl-module-warnings-register" -RDEPENDS_perl-module-version-regex += "perl-module-strict" -RDEPENDS_perl-module-xsloader += "perl-module-dynaloader" -RDEPENDS_perl-module-anydbm-file += "perl-module-strict" -RDEPENDS_perl-module-anydbm-file += "perl-module-warnings" -RDEPENDS_perl-module-app-cpan += "perl-module-config" -RDEPENDS_perl-module-app-cpan += "perl-module-constant" -RDEPENDS_perl-module-app-cpan += "perl-module-cpan" -RDEPENDS_perl-module-app-cpan += "perl-module-cwd" -RDEPENDS_perl-module-app-cpan += "perl-module-data-dumper" -RDEPENDS_perl-module-app-cpan += "perl-module-file-basename" -RDEPENDS_perl-module-app-cpan += "perl-module-file-find" -RDEPENDS_perl-module-app-cpan += "perl-module-getopt-std" -RDEPENDS_perl-module-app-cpan += "perl-module-if" -RDEPENDS_perl-module-app-cpan += "perl-module-net-ping" -RDEPENDS_perl-module-app-cpan += "perl-module-strict" -RDEPENDS_perl-module-app-cpan += "perl-module-user-pwent" -RDEPENDS_perl-module-app-cpan += "perl-module-vars" -RDEPENDS_perl-module-app-cpan += "perl-module-warnings" -RDEPENDS_perl-module-app-prove += "perl-module-app-prove-state" -RDEPENDS_perl-module-app-prove += "perl-module-base" -RDEPENDS_perl-module-app-prove += "perl-module-constant" -RDEPENDS_perl-module-app-prove += "perl-module-getopt-long" -RDEPENDS_perl-module-app-prove += "perl-module-strict" -RDEPENDS_perl-module-app-prove += "perl-module-tap-harness" -RDEPENDS_perl-module-app-prove += "perl-module-tap-harness-env" -RDEPENDS_perl-module-app-prove += "perl-module-text-parsewords" -RDEPENDS_perl-module-app-prove += "perl-module-warnings" -RDEPENDS_perl-module-app-prove-state += "perl-module-app-prove-state-result" -RDEPENDS_perl-module-app-prove-state += "perl-module-base" -RDEPENDS_perl-module-app-prove-state += "perl-module-constant" -RDEPENDS_perl-module-app-prove-state += "perl-module-file-find" -RDEPENDS_perl-module-app-prove-state += "perl-module-strict" -RDEPENDS_perl-module-app-prove-state += "perl-module-tap-parser-yamlish-reader" -RDEPENDS_perl-module-app-prove-state += "perl-module-tap-parser-yamlish-writer" -RDEPENDS_perl-module-app-prove-state += "perl-module-warnings" -RDEPENDS_perl-module-app-prove-state-result += "perl-module-app-prove-state-result-test" -RDEPENDS_perl-module-app-prove-state-result += "perl-module-constant" -RDEPENDS_perl-module-app-prove-state-result += "perl-module-strict" -RDEPENDS_perl-module-app-prove-state-result += "perl-module-warnings" -RDEPENDS_perl-module-app-prove-state-result-test += "perl-module-strict" -RDEPENDS_perl-module-app-prove-state-result-test += "perl-module-warnings" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-constant" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-exporter" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-io-compress-bzip2" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-strict" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-time-local" -RDEPENDS_perl-module-archive-tar-constant += "perl-module-warnings" -RDEPENDS_perl-module-archive-tar-file += "perl-module-archive-tar" -RDEPENDS_perl-module-archive-tar-file += "perl-module-archive-tar-constant" -RDEPENDS_perl-module-archive-tar-file += "perl-module-file-basename" -RDEPENDS_perl-module-archive-tar-file += "perl-module-io-file" -RDEPENDS_perl-module-archive-tar-file += "perl-module-strict" -RDEPENDS_perl-module-archive-tar-file += "perl-module-vars" -RDEPENDS_perl-module-archive-tar += "perl-module-archive-tar-constant" -RDEPENDS_perl-module-archive-tar += "perl-module-archive-tar-file" -RDEPENDS_perl-module-archive-tar += "perl-module-config" -RDEPENDS_perl-module-archive-tar += "perl-module-cwd" -RDEPENDS_perl-module-archive-tar += "perl-module-exporter" -RDEPENDS_perl-module-archive-tar += "perl-module-file-path" -RDEPENDS_perl-module-archive-tar += "perl-module-io-file" -RDEPENDS_perl-module-archive-tar += "perl-module-io-zlib" -RDEPENDS_perl-module-archive-tar += "perl-module-strict" -RDEPENDS_perl-module-archive-tar += "perl-module-vars" -RDEPENDS_perl-module-arybase += "perl-module-xsloader" -RDEPENDS_perl-module-attribute-handlers += "perl-module-strict" -RDEPENDS_perl-module-attribute-handlers += "perl-module-warnings" -RDEPENDS_perl-module-attributes += "perl-module-exporter" -RDEPENDS_perl-module-attributes += "perl-module-strict" -RDEPENDS_perl-module-attributes += "perl-module-warnings" -RDEPENDS_perl-module-attributes += "perl-module-xsloader" -RDEPENDS_perl-module-autodie-exception += "perl-module-constant" -RDEPENDS_perl-module-autodie-exception += "perl-module-fatal" -RDEPENDS_perl-module-autodie-exception += "perl-module-fcntl" -RDEPENDS_perl-module-autodie-exception += "perl-module-overload" -RDEPENDS_perl-module-autodie-exception += "perl-module-strict" -RDEPENDS_perl-module-autodie-exception += "perl-module-warnings" -RDEPENDS_perl-module-autodie-exception-system += "perl-module-parent" -RDEPENDS_perl-module-autodie-exception-system += "perl-module-strict" -RDEPENDS_perl-module-autodie-exception-system += "perl-module-warnings" -RDEPENDS_perl-module-autodie-hints += "perl-module-b" -RDEPENDS_perl-module-autodie-hints += "perl-module-constant" -RDEPENDS_perl-module-autodie-hints += "perl-module-strict" -RDEPENDS_perl-module-autodie-hints += "perl-module-warnings" -RDEPENDS_perl-module-autodie += "perl-module-constant" -RDEPENDS_perl-module-autodie += "perl-module-lib" -RDEPENDS_perl-module-autodie += "perl-module-parent" -RDEPENDS_perl-module-autodie += "perl-module-strict" -RDEPENDS_perl-module-autodie += "perl-module-warnings" -RDEPENDS_perl-module-autodie-scope-guard += "perl-module-strict" -RDEPENDS_perl-module-autodie-scope-guard += "perl-module-warnings" -RDEPENDS_perl-module-autodie-scope-guardstack += "perl-module-autodie-scope-guard" -RDEPENDS_perl-module-autodie-scope-guardstack += "perl-module-strict" -RDEPENDS_perl-module-autodie-scope-guardstack += "perl-module-warnings" -RDEPENDS_perl-module-autodie-skip += "perl-module-strict" -RDEPENDS_perl-module-autodie-skip += "perl-module-warnings" -RDEPENDS_perl-module-autodie-util += "perl-module-autodie-scope-guardstack" -RDEPENDS_perl-module-autodie-util += "perl-module-exporter" -RDEPENDS_perl-module-autodie-util += "perl-module-strict" -RDEPENDS_perl-module-autodie-util += "perl-module-warnings" -RDEPENDS_perl-module-autoloader += "perl-module-strict" -RDEPENDS_perl-module-autosplit += "perl-module-config" -RDEPENDS_perl-module-autosplit += "perl-module-exporter" -RDEPENDS_perl-module-autosplit += "perl-module-file-basename" -RDEPENDS_perl-module-autosplit += "perl-module-file-path" -RDEPENDS_perl-module-autosplit += "perl-module-strict" -RDEPENDS_perl-module-base += "perl-module-strict" -RDEPENDS_perl-module-b-concise += "perl-module-b" -RDEPENDS_perl-module-b-concise += "perl-module-b-op-private" -RDEPENDS_perl-module-b-concise += "perl-module-config" -RDEPENDS_perl-module-b-concise += "perl-module-exporter" -RDEPENDS_perl-module-b-concise += "perl-module-feature" -RDEPENDS_perl-module-b-concise += "perl-module-strict" -RDEPENDS_perl-module-b-concise += "perl-module-warnings" -RDEPENDS_perl-module-b-debug += "perl-module-b" -RDEPENDS_perl-module-b-debug += "perl-module-config" -RDEPENDS_perl-module-b-debug += "perl-module-strict" -RDEPENDS_perl-module-benchmark += "perl-module-exporter" -RDEPENDS_perl-module-benchmark += "perl-module-strict" -RDEPENDS_perl-module-bigint += "perl-module-constant" -RDEPENDS_perl-module-bigint += "perl-module-exporter" -RDEPENDS_perl-module-bigint += "perl-module-math-bigint" -RDEPENDS_perl-module-bigint += "perl-module-math-bigint-trace" -RDEPENDS_perl-module-bigint += "perl-module-overload" -RDEPENDS_perl-module-bigint += "perl-module-strict" -RDEPENDS_perl-module-bigint += "perl-module-warnings" -RDEPENDS_perl-module-bignum += "perl-module-bigint" -RDEPENDS_perl-module-bignum += "perl-module-exporter" -RDEPENDS_perl-module-bignum += "perl-module-math-bigfloat" -RDEPENDS_perl-module-bignum += "perl-module-math-bigfloat-trace" -RDEPENDS_perl-module-bignum += "perl-module-math-bigint" -RDEPENDS_perl-module-bignum += "perl-module-math-bigint-trace" -RDEPENDS_perl-module-bignum += "perl-module-overload" -RDEPENDS_perl-module-bignum += "perl-module-strict" -RDEPENDS_perl-module-bignum += "perl-module-warnings" -RDEPENDS_perl-module-bigrat += "perl-module-bigint" -RDEPENDS_perl-module-bigrat += "perl-module-exporter" -RDEPENDS_perl-module-bigrat += "perl-module-math-bigfloat" -RDEPENDS_perl-module-bigrat += "perl-module-math-bigint" -RDEPENDS_perl-module-bigrat += "perl-module-math-bigint-trace" -RDEPENDS_perl-module-bigrat += "perl-module-math-bigrat" -RDEPENDS_perl-module-bigrat += "perl-module-overload" -RDEPENDS_perl-module-bigrat += "perl-module-strict" -RDEPENDS_perl-module-bigrat += "perl-module-warnings" -RDEPENDS_perl-module-blib += "perl-module-cwd" -RDEPENDS_perl-module-b += "perl-module-exporter" -RDEPENDS_perl-module-b += "perl-module-xsloader" -RDEPENDS_perl-module-b-showlex += "perl-module-b" -RDEPENDS_perl-module-b-showlex += "perl-module-b-concise" -RDEPENDS_perl-module-b-showlex += "perl-module-b-terse" -RDEPENDS_perl-module-b-showlex += "perl-module-strict" -RDEPENDS_perl-module-b-terse += "perl-module-b" -RDEPENDS_perl-module-b-terse += "perl-module-b-concise" -RDEPENDS_perl-module-b-terse += "perl-module-strict" -RDEPENDS_perl-module-b-xref += "perl-module-b" -RDEPENDS_perl-module-b-xref += "perl-module-config" -RDEPENDS_perl-module-b-xref += "perl-module-strict" -RDEPENDS_perl-module-bytes += "perl-module-bytes-heavy" -RDEPENDS_perl-module--charnames += "perl-module-bytes" -RDEPENDS_perl-module-charnames += "perl-module-bytes" -RDEPENDS_perl-module-charnames += "perl-module--charnames" -RDEPENDS_perl-module--charnames += "perl-module-re" -RDEPENDS_perl-module-charnames += "perl-module-re" -RDEPENDS_perl-module--charnames += "perl-module-strict" -RDEPENDS_perl-module-charnames += "perl-module-strict" -RDEPENDS_perl-module--charnames += "perl-module-warnings" -RDEPENDS_perl-module-charnames += "perl-module-warnings" -RDEPENDS_perl-module-class-struct += "perl-module-exporter" -RDEPENDS_perl-module-class-struct += "perl-module-strict" -RDEPENDS_perl-module-class-struct += "perl-module-warnings-register" -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-bytes " -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-constant" -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-dynaloader" -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-exporter" -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-strict " -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-warnings " -RDEPENDS_perl-module-compress-raw-bzip2 += "perl-module-xsloader" -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-bytes " -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-constant" -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-dynaloader" -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-exporter" -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-strict " -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-warnings " -RDEPENDS_perl-module-compress-raw-zlib += "perl-module-xsloader" -RDEPENDS_perl-module-compress-zlib += "perl-module-bytes " -RDEPENDS_perl-module-compress-zlib += "perl-module-compress-raw-zlib" -RDEPENDS_perl-module-compress-zlib += "perl-module-constant" -RDEPENDS_perl-module-compress-zlib += "perl-module-exporter" -RDEPENDS_perl-module-compress-zlib += "perl-module-io-compress-base-common" -RDEPENDS_perl-module-compress-zlib += "perl-module-io-compress-gzip" -RDEPENDS_perl-module-compress-zlib += "perl-module-io-compress-gzip-constants" -RDEPENDS_perl-module-compress-zlib += "perl-module-io-handle " -RDEPENDS_perl-module-compress-zlib += "perl-module-io-uncompress-gunzip" -RDEPENDS_perl-module-compress-zlib += "perl-module-strict " -RDEPENDS_perl-module-compress-zlib += "perl-module-warnings " -RDEPENDS_perl-module-config-extensions += "perl-module-config" -RDEPENDS_perl-module-config-extensions += "perl-module-exporter" -RDEPENDS_perl-module-config-extensions += "perl-module-strict" -RDEPENDS_perl-module-config += "perl-module-strict" -RDEPENDS_perl-module-config += "perl-module-warnings" RDEPENDS_perl-module-config-perl-v += "perl-module-config" RDEPENDS_perl-module-config-perl-v += "perl-module-exporter" RDEPENDS_perl-module-config-perl-v += "perl-module-strict" @@ -2534,7 +220,6 @@ RDEPENDS_perl-module-cpan += "perl-module-json-pp" RDEPENDS_perl-module-cpan += "perl-module-lib" RDEPENDS_perl-module-cpan += "perl-module-net-ping" RDEPENDS_perl-module-cpan += "perl-module-overload" -RDEPENDS_perl-module-cpan += "perl-module-parse-cpan-meta" RDEPENDS_perl-module-cpan += "perl-module-posix" RDEPENDS_perl-module-cpan += "perl-module-safe" RDEPENDS_perl-module-cpan += "perl-module-strict" @@ -2553,6 +238,13 @@ RDEPENDS_perl-module-data-dumper += "perl-module-config" RDEPENDS_perl-module-data-dumper += "perl-module-constant" RDEPENDS_perl-module-data-dumper += "perl-module-exporter" RDEPENDS_perl-module-data-dumper += "perl-module-xsloader" +RDEPENDS_perl-module-db-file += "perl-module-dynaloader" +RDEPENDS_perl-module-db-file += "perl-module-exporter" +RDEPENDS_perl-module-db-file += "perl-module-fcntl" +RDEPENDS_perl-module-db-file += "perl-module-strict " +RDEPENDS_perl-module-db-file += "perl-module-strict" +RDEPENDS_perl-module-db-file += "perl-module-tie-hash" +RDEPENDS_perl-module-db-file += "perl-module-warnings" RDEPENDS_perl-module-dbm-filter-compress += "perl-module-strict" RDEPENDS_perl-module-dbm-filter-compress += "perl-module-warnings" RDEPENDS_perl-module-dbm-filter-encode += "perl-module-strict" @@ -2699,7 +391,6 @@ RDEPENDS_perl-module-encode += "perl-module-constant" RDEPENDS_perl-module-encode += "perl-module-encode-alias" RDEPENDS_perl-module-encode += "perl-module-encode-config" RDEPENDS_perl-module-encode += "perl-module-encode-configlocal-pm" -RDEPENDS_perl-module-encode += "perl-module-encode-encoding" RDEPENDS_perl-module-encode += "perl-module-encode-mime-name" RDEPENDS_perl-module-encode += "perl-module-exporter" RDEPENDS_perl-module-encode += "perl-module-parent" @@ -2886,7 +577,6 @@ RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-makemaker-confi RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-makemaker-version" RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-manifest" RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-mm" -RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-mm-unix" RDEPENDS_perl-module-extutils-makemaker += "perl-module-extutils-my" RDEPENDS_perl-module-extutils-makemaker += "perl-module-file-path" RDEPENDS_perl-module-extutils-makemaker += "perl-module-strict" @@ -3082,7 +772,6 @@ RDEPENDS_perl-module-file-fetch += "perl-module-load" RDEPENDS_perl-module-file-fetch += "perl-module-params-check" RDEPENDS_perl-module-file-fetch += "perl-module-strict" RDEPENDS_perl-module-file-fetch += "perl-module-vars" -RDEPENDS_perl-module-file-find += "perl-module-config" RDEPENDS_perl-module-file-find += "perl-module-cwd" RDEPENDS_perl-module-file-find += "perl-module-exporter" RDEPENDS_perl-module-file-find += "perl-module-file-basename" @@ -3094,7 +783,6 @@ RDEPENDS_perl-module-file-globmapper += "perl-module-strict" RDEPENDS_perl-module-file-globmapper += "perl-module-warnings" RDEPENDS_perl-module-file-glob += "perl-module-exporter" RDEPENDS_perl-module-file-glob += "perl-module-strict" -RDEPENDS_perl-module-file-glob += "perl-module-warnings" RDEPENDS_perl-module-file-glob += "perl-module-xsloader" RDEPENDS_perl-module-filehandle += "perl-module-exporter" RDEPENDS_perl-module-filehandle += "perl-module-fcntl" @@ -3123,9 +811,7 @@ RDEPENDS_perl-module-file-temp += "perl-module-io-seekable" RDEPENDS_perl-module-file-temp += "perl-module-overload" RDEPENDS_perl-module-file-temp += "perl-module-parent" RDEPENDS_perl-module-file-temp += "perl-module-posix" -RDEPENDS_perl-module-file-temp += "perl-module-file-spec" RDEPENDS_perl-module-file-temp += "perl-module-strict" -RDEPENDS_perl-module-file-temp += "perl-module-vars" RDEPENDS_perl-module-filter-simple += "perl-module-filter-util-call" RDEPENDS_perl-module-filter-simple += "perl-module-text-balanced" RDEPENDS_perl-module-filter-util-call += "perl-module-exporter" @@ -3135,6 +821,11 @@ RDEPENDS_perl-module-filter-util-call += "perl-module-xsloader" RDEPENDS_perl-module-findbin += "perl-module-cwd" RDEPENDS_perl-module-findbin += "perl-module-exporter" RDEPENDS_perl-module-findbin += "perl-module-file-basename" +RDEPENDS_perl-module-gdbm-file += "perl-module-exporter" +RDEPENDS_perl-module-gdbm-file += "perl-module-strict" +RDEPENDS_perl-module-gdbm-file += "perl-module-tie-hash" +RDEPENDS_perl-module-gdbm-file += "perl-module-warnings" +RDEPENDS_perl-module-gdbm-file += "perl-module-xsloader" RDEPENDS_perl-module-getopt-long += "perl-module-constant" RDEPENDS_perl-module-getopt-long += "perl-module-exporter" RDEPENDS_perl-module-getopt-long += "perl-module-overload" @@ -3389,6 +1080,7 @@ RDEPENDS_perl-module-io-uncompress-rawinflate += "perl-module-warnings" RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-bytes" RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-compress-raw-zlib" RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-constant" +RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-encode" RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-exporter " RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-fcntl" RDEPENDS_perl-module-io-uncompress-unzip += "perl-module-io-compress-base-common" @@ -3486,116 +1178,6 @@ RDEPENDS_perl-module-load += "perl-module-strict" RDEPENDS_perl-module-load += "perl-module-vars" RDEPENDS_perl-module-load += "perl-module-version" RDEPENDS_perl-module-load += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-constant" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-constants += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-country-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-country += "perl-module-if" -RDEPENDS_perl-module-locale-codes-country += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-country += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-country += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-country-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-currency-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-if" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-currency-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langext-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langext-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langfam-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langfam-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langfam-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-language-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-language += "perl-module-if" -RDEPENDS_perl-module-locale-codes-language += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-language += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-language += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-language-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langvar-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-if" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-langvar-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes += "perl-module-if" -RDEPENDS_perl-module-locale-codes += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-script-codes += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script += "perl-module-exporter" -RDEPENDS_perl-module-locale-codes-script += "perl-module-if" -RDEPENDS_perl-module-locale-codes-script += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-codes-script += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-codes-script += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script += "perl-module-warnings" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-strict" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-utf8" -RDEPENDS_perl-module-locale-codes-script-retired += "perl-module-warnings" -RDEPENDS_perl-module-locale-country += "perl-module-exporter" -RDEPENDS_perl-module-locale-country += "perl-module-if" -RDEPENDS_perl-module-locale-country += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-country += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-country += "perl-module-strict" -RDEPENDS_perl-module-locale-country += "perl-module-warnings" -RDEPENDS_perl-module-locale-currency += "perl-module-exporter" -RDEPENDS_perl-module-locale-currency += "perl-module-if" -RDEPENDS_perl-module-locale-currency += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-currency += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-currency += "perl-module-strict" -RDEPENDS_perl-module-locale-currency += "perl-module-warnings" -RDEPENDS_perl-module-locale-language += "perl-module-exporter" -RDEPENDS_perl-module-locale-language += "perl-module-if" -RDEPENDS_perl-module-locale-language += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-language += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-language += "perl-module-strict" -RDEPENDS_perl-module-locale-language += "perl-module-warnings" RDEPENDS_perl-module-locale-maketext-gutsloader += "perl-module-locale-maketext" RDEPENDS_perl-module-locale-maketext-guts += "perl-module-locale-maketext" RDEPENDS_perl-module-locale-maketext += "perl-module-i18n-langtags" @@ -3605,12 +1187,6 @@ RDEPENDS_perl-module-locale-maketext += "perl-module-strict" RDEPENDS_perl-module-locale-maketext-simple += "perl-module-base" RDEPENDS_perl-module-locale-maketext-simple += "perl-module-strict" RDEPENDS_perl-module-locale += "perl-module-config" -RDEPENDS_perl-module-locale-script += "perl-module-exporter" -RDEPENDS_perl-module-locale-script += "perl-module-if" -RDEPENDS_perl-module-locale-script += "perl-module-locale-codes" -RDEPENDS_perl-module-locale-script += "perl-module-locale-codes-constants" -RDEPENDS_perl-module-locale-script += "perl-module-strict" -RDEPENDS_perl-module-locale-script += "perl-module-warnings" RDEPENDS_perl-module-math-bigfloat += "perl-module-exporter" RDEPENDS_perl-module-math-bigfloat += "perl-module-math-bigint" RDEPENDS_perl-module-math-bigfloat += "perl-module-math-complex" @@ -3622,8 +1198,6 @@ RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-math-bigfloat" RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-overload" RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-strict" RDEPENDS_perl-module-math-bigfloat-trace += "perl-module-warnings" -RDEPENDS_perl-module-math-bigint-calcemu += "perl-module-strict" -RDEPENDS_perl-module-math-bigint-calcemu += "perl-module-warnings" RDEPENDS_perl-module-math-bigint-calc += "perl-module-constant" RDEPENDS_perl-module-math-bigint-calc += "perl-module-integer" RDEPENDS_perl-module-math-bigint-calc += "perl-module-math-bigint-lib" @@ -3800,7 +1374,6 @@ RDEPENDS_perl-module-params-check += "perl-module-locale-maketext-simple" RDEPENDS_perl-module-params-check += "perl-module-strict" RDEPENDS_perl-module-params-check += "perl-module-vars" RDEPENDS_perl-module-parent += "perl-module-strict" -RDEPENDS_perl-module-parent += "perl-module-vars" RDEPENDS_perl-module-perlfaq += "perl-module-strict" RDEPENDS_perl-module-perlfaq += "perl-module-warnings" RDEPENDS_perl-module-perlio-encoding += "perl-module-strict" @@ -3820,16 +1393,8 @@ RDEPENDS_perl-module-pod-escapes += "perl-module-exporter" RDEPENDS_perl-module-pod-escapes += "perl-module-strict" RDEPENDS_perl-module-pod-escapes += "perl-module-vars" RDEPENDS_perl-module-pod-escapes += "perl-module-warnings" -RDEPENDS_perl-module-pod-find += "perl-module-config" -RDEPENDS_perl-module-pod-find += "perl-module-cwd" -RDEPENDS_perl-module-pod-find += "perl-module-exporter" -RDEPENDS_perl-module-pod-find += "perl-module-file-find" -RDEPENDS_perl-module-pod-find += "perl-module-strict" -RDEPENDS_perl-module-pod-find += "perl-module-vars" RDEPENDS_perl-module-pod-functions += "perl-module-exporter" RDEPENDS_perl-module-pod-functions += "perl-module-strict" -RDEPENDS_perl-module-pod-inputobjects += "perl-module-strict" -RDEPENDS_perl-module-pod-inputobjects += "perl-module-vars" RDEPENDS_perl-module-pod-man += "perl-module-file-basename" RDEPENDS_perl-module-pod-man += "perl-module-pod-simple" RDEPENDS_perl-module-pod-man += "perl-module-strict" @@ -3840,12 +1405,6 @@ RDEPENDS_perl-module-pod-parselink += "perl-module-exporter" RDEPENDS_perl-module-pod-parselink += "perl-module-strict" RDEPENDS_perl-module-pod-parselink += "perl-module-vars" RDEPENDS_perl-module-pod-parselink += "perl-module-warnings" -RDEPENDS_perl-module-pod-parser += "perl-module-exporter" -RDEPENDS_perl-module-pod-parser += "perl-module-pod-inputobjects" -RDEPENDS_perl-module-pod-parser += "perl-module-strict" -RDEPENDS_perl-module-pod-parser += "perl-module-vars" -RDEPENDS_perl-module-pod-parseutils += "perl-module-strict" -RDEPENDS_perl-module-pod-parseutils += "perl-module-vars" RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-config" RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-strict" RDEPENDS_perl-module-pod-perldoc-baseto += "perl-module-vars" @@ -3908,12 +1467,7 @@ RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-parent" RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-strict" RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-vars" RDEPENDS_perl-module-pod-perldoc-toxml += "perl-module-warnings" -RDEPENDS_perl-module-pod-plaintext += "perl-module-pod-select" -RDEPENDS_perl-module-pod-plaintext += "perl-module-strict" -RDEPENDS_perl-module-pod-plaintext += "perl-module-vars" -RDEPENDS_perl-module-pod-select += "perl-module-pod-parser" -RDEPENDS_perl-module-pod-select += "perl-module-strict" -RDEPENDS_perl-module-pod-select += "perl-module-vars" +RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-if" RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-integer" RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-pod-simple" RDEPENDS_perl-module-pod-simple-blackbox += "perl-module-pod-simple-transcode" @@ -3932,6 +1486,9 @@ RDEPENDS_perl-module-pod-simple-dumpastext += "perl-module-strict" RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-pod-simple" RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-strict" RDEPENDS_perl-module-pod-simple-dumpasxml += "perl-module-text-wrap" +RDEPENDS_perl-module-pod-simple-justpod += "perl-module-pod-simple-methody" +RDEPENDS_perl-module-pod-simple-justpod += "perl-module-strict" +RDEPENDS_perl-module-pod-simple-justpod += "perl-module-warnings" RDEPENDS_perl-module-pod-simple-linksection += "perl-module-overload" RDEPENDS_perl-module-pod-simple-linksection += "perl-module-pod-simple-blackbox" RDEPENDS_perl-module-pod-simple-linksection += "perl-module-strict" @@ -3962,6 +1519,7 @@ RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-pod-simple-p RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-strict" RDEPENDS_perl-module-pod-simple-pullparsertexttoken += "perl-module-vars" RDEPENDS_perl-module-pod-simple-pullparsertoken += "perl-module-strict" +RDEPENDS_perl-module-pod-simple-rtf += "perl-module-if" RDEPENDS_perl-module-pod-simple-rtf += "perl-module-integer" RDEPENDS_perl-module-pod-simple-rtf += "perl-module-pod-simple-pullparser" RDEPENDS_perl-module-pod-simple-rtf += "perl-module-strict" @@ -3987,8 +1545,6 @@ RDEPENDS_perl-module-pod-simple-tiedoutfh += "perl-module-vars" RDEPENDS_perl-module-pod-simple-transcodedumb += "perl-module-strict" RDEPENDS_perl-module-pod-simple-transcodedumb += "perl-module-vars" RDEPENDS_perl-module-pod-simple-transcode += "perl-module-strict" -RDEPENDS_perl-module-pod-simple-transcode += "perl-module-pod-simple-transcodedumb" -RDEPENDS_perl-module-pod-simple-transcode += "perl-module-pod-simple-transcodesmart" RDEPENDS_perl-module-pod-simple-transcode += "perl-module-vars" RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-encode" RDEPENDS_perl-module-pod-simple-transcodesmart += "perl-module-pod-simple" @@ -4055,7 +1611,6 @@ RDEPENDS_perl-module-socket += "perl-module-xsloader" RDEPENDS_perl-module-sort += "perl-module-strict" RDEPENDS_perl-module-storable += "perl-module-exporter" RDEPENDS_perl-module-storable += "perl-module-io-file" -RDEPENDS_perl-module-storable += "perl-module-xsloader" RDEPENDS_perl-module-sub-util += "perl-module-exporter" RDEPENDS_perl-module-sub-util += "perl-module-list-util" RDEPENDS_perl-module-sub-util += "perl-module-strict" @@ -4082,7 +1637,6 @@ RDEPENDS_perl-module-sys-syslog += "perl-module-xsloader" RDEPENDS_perl-module-tap-base += "perl-module-base" RDEPENDS_perl-module-tap-base += "perl-module-constant" RDEPENDS_perl-module-tap-base += "perl-module-strict" -RDEPENDS_perl-module-tap-base += "perl-module-tap-object" RDEPENDS_perl-module-tap-base += "perl-module-warnings" RDEPENDS_perl-module-tap-formatter-base += "perl-module-base" RDEPENDS_perl-module-tap-formatter-base += "perl-module-posix" @@ -4108,9 +1662,7 @@ RDEPENDS_perl-module-tap-formatter-console-session += "perl-module-warnings" RDEPENDS_perl-module-tap-formatter-file += "perl-module-base" RDEPENDS_perl-module-tap-formatter-file += "perl-module-posix" RDEPENDS_perl-module-tap-formatter-file += "perl-module-strict" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-tap-formatter-base" RDEPENDS_perl-module-tap-formatter-file += "perl-module-tap-formatter-file-session" -RDEPENDS_perl-module-tap-formatter-file += "perl-module-tap-formatter-session" RDEPENDS_perl-module-tap-formatter-file += "perl-module-warnings" RDEPENDS_perl-module-tap-formatter-file-session += "perl-module-base" RDEPENDS_perl-module-tap-formatter-file-session += "perl-module-strict" @@ -4127,7 +1679,6 @@ RDEPENDS_perl-module-tap-harness += "perl-module-base" RDEPENDS_perl-module-tap-harness += "perl-module-file-path" RDEPENDS_perl-module-tap-harness += "perl-module-io-handle" RDEPENDS_perl-module-tap-harness += "perl-module-strict" -RDEPENDS_perl-module-tap-harness += "perl-module-tap-formatter-file" RDEPENDS_perl-module-tap-harness += "perl-module-warnings" RDEPENDS_perl-module-tap-object += "perl-module-strict" RDEPENDS_perl-module-tap-object += "perl-module-warnings" @@ -4252,7 +1803,6 @@ RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-constant" RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-strict" RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-iteratorfactory" RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-iterator-process" -RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-tap-parser-sourcehandler-executable" RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-text-parsewords" RDEPENDS_perl-module-tap-parser-sourcehandler-perl += "perl-module-warnings" RDEPENDS_perl-module-tap-parser-sourcehandler-rawtap += "perl-module-base" @@ -4319,7 +1869,6 @@ RDEPENDS_perl-module-test-simple += "perl-module-test-builder-module" RDEPENDS_perl-module-test-tester-capture += "perl-module-config" RDEPENDS_perl-module-test-tester-capture += "perl-module-strict" RDEPENDS_perl-module-test-tester-capture += "perl-module-test-builder" -RDEPENDS_perl-module-test-tester-capture += "perl-module-threads-shared" RDEPENDS_perl-module-test-tester-capture += "perl-module-vars" RDEPENDS_perl-module-test-tester-capturerunner += "perl-module-exporter" RDEPENDS_perl-module-test-tester-capturerunner += "perl-module-strict" @@ -4367,6 +1916,7 @@ RDEPENDS_perl-module-threads += "perl-module-overload" RDEPENDS_perl-module-threads += "perl-module-strict" RDEPENDS_perl-module-threads += "perl-module-warnings" RDEPENDS_perl-module-threads += "perl-module-xsloader" +RDEPENDS_perl-module-threads-shared += "perl-module-config" RDEPENDS_perl-module-threads-shared += "perl-module-strict" RDEPENDS_perl-module-threads-shared += "perl-module-warnings" RDEPENDS_perl-module-threads-shared += "perl-module-xsloader" @@ -4374,10 +1924,10 @@ RDEPENDS_perl-module-tie-array += "perl-module-strict" RDEPENDS_perl-module-tie-file += "perl-module-fcntl" RDEPENDS_perl-module-tie-file += "perl-module-posix" RDEPENDS_perl-module-tie-file += "perl-module-strict" +RDEPENDS_perl-module-tie-file += "perl-module-warnings" RDEPENDS_perl-module-tie-handle += "perl-module-tie-stdhandle" RDEPENDS_perl-module-tie-handle += "perl-module-warnings-register" RDEPENDS_perl-module-tie-hash-namedcapture += "perl-module-strict" -RDEPENDS_perl-module-tie-hash-namedcapture += "perl-module-xsloader" RDEPENDS_perl-module-tie-hash += "perl-module-warnings-register" RDEPENDS_perl-module-tie-memoize += "perl-module-strict" RDEPENDS_perl-module-tie-memoize += "perl-module-tie-hash" @@ -4448,9 +1998,9 @@ RDEPENDS_perl-module-unicode-ucd += "perl-module-exporter" RDEPENDS_perl-module-unicode-ucd += "perl-module-feature" RDEPENDS_perl-module-unicode-ucd += "perl-module-if" RDEPENDS_perl-module-unicode-ucd += "perl-module-integer" +RDEPENDS_perl-module-unicode-ucd += "perl-module-re" RDEPENDS_perl-module-unicode-ucd += "perl-module-strict" RDEPENDS_perl-module-unicode-ucd += "perl-module-unicode-normalize" -RDEPENDS_perl-module-unicode-ucd += "perl-module-utf8-heavy" RDEPENDS_perl-module-unicode-ucd += "perl-module-warnings" RDEPENDS_perl-module-user-grent += "perl-module-class-struct" RDEPENDS_perl-module-user-grent += "perl-module-exporter" @@ -4460,7 +2010,6 @@ RDEPENDS_perl-module-user-pwent += "perl-module-config" RDEPENDS_perl-module-user-pwent += "perl-module-exporter" RDEPENDS_perl-module-user-pwent += "perl-module-strict" RDEPENDS_perl-module-user-pwent += "perl-module-warnings" -RDEPENDS_perl-module-utf8 += "perl-module-utf8-heavy" RDEPENDS_perl-module-version += "perl-module-strict" RDEPENDS_perl-module-version += "perl-module-version-regex" RDEPENDS_perl-module-version += "perl-module-warnings-register" diff --git a/poky/meta/recipes-devtools/perl/perl_5.30.2.bb b/poky/meta/recipes-devtools/perl/perl_5.30.2.bb deleted file mode 100644 index 88a1c9665..000000000 --- a/poky/meta/recipes-devtools/perl/perl_5.30.2.bb +++ /dev/null @@ -1,387 +0,0 @@ -SUMMARY = "Perl scripting language" -HOMEPAGE = "http://www.perl.org/" -SECTION = "devel" -LICENSE = "Artistic-1.0 | GPL-1.0+" -LIC_FILES_CHKSUM = "file://Copying;md5=5b122a36d0f6dc55279a0ebc69f3c60b \ - file://Artistic;md5=71a4d5d9acc18c0952a6df2218bb68da \ - " - - -SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \ - https://github.com/arsv/perl-cross/releases/download/1.3.2/perl-cross-1.3.2.tar.gz;name=perl-cross \ - file://perl-rdepends.txt \ - file://0001-configure_tool.sh-do-not-quote-the-argument-to-comma.patch \ - file://0001-ExtUtils-MakeMaker-add-LDFLAGS-when-linking-binary-m.patch \ - file://0001-Somehow-this-module-breaks-through-the-perl-wrapper-.patch \ - file://errno_ver.diff \ - file://native-perlinc.patch \ - file://0001-perl-cross-add-LDFLAGS-when-linking-libperl.patch \ - file://perl-dynloader.patch \ - file://0001-configure_path.sh-do-not-hardcode-prefix-lib-as-libr.patch \ - file://0001-enc2xs-Add-environment-variable-to-suppress-comments.patch \ - file://0002-Constant-Fix-up-shebang.patch \ - file://0001-PATCH-perl-134117-Close-DATA-in-loc_tools.pl.patch \ - file://determinism.patch \ - " -SRC_URI_append_class-native = " \ - file://perl-configpm-switch.patch \ -" -SRC_URI_append_class-target = " \ - file://encodefix.patch \ -" - -SRC_URI[perl.sha256sum] = "66db7df8a91979eb576fac91743644da878244cf8ee152f02cd6f5cd7a731689" -SRC_URI[perl-cross.sha256sum] = "defa12f0ad7be0b6c48b4f76e2fb5b37c1b37fbeb6e9ebe938279cd539a0c20c" - -S = "${WORKDIR}/perl-${PV}" - -inherit upstream-version-is-even update-alternatives - -DEPENDS += "zlib virtual/crypt" - -PERL_LIB_VER = "${@'.'.join(d.getVar('PV').split('.')[0:2])}.0" - -PACKAGECONFIG ??= "bdb gdbm" -PACKAGECONFIG[bdb] = ",-Ui_db,db" -PACKAGECONFIG[gdbm] = ",-Ui_gdbm,gdbm" - -# Don't generate comments in enc2xs output files. They are not reproducible -export ENC2XS_NO_COMMENTS = "1" - -do_unpack_append() { - bb.build.exec_func('do_copy_perlcross', d) -} - -do_copy_perlcross() { - cp -rfp ${WORKDIR}/perl-cross*/* ${S} -} - -do_configure_class-target() { - ./configure --prefix=${prefix} --libdir=${libdir} \ - --target=${TARGET_SYS} \ - -Duseshrplib \ - -Dsoname=libperl.so.5 \ - -Dvendorprefix=${prefix} \ - -Darchlibexp=${STAGING_LIBDIR}/perl5/${PV}/${TARGET_ARCH}-linux \ - ${PACKAGECONFIG_CONFARGS} - - #perl.c uses an ARCHLIB_EXP define to generate compile-time code that - #adds the archlibexp path to @INC during run-time initialization of a - #new perl interpreter. - - #Because we've changed this value in a temporary way to make it - #possible to use ExtUtils::Embed in the target build (the temporary - #value in config.sh gets re-stripped out during packaging), the - #ARCHLIB_EXP value that gets generated still uses the temporary version - #instead of the original expected version (i.e. becauses it's in the - #generated config.h, it doesn't get stripped out during packaging like - #the others in config.sh). - - sed -i -e "s,${STAGING_LIBDIR},${libdir},g" config.h -} - -do_configure_class-nativesdk() { - ./configure --prefix=${prefix} \ - --target=${TARGET_SYS} \ - -Duseshrplib \ - -Dsoname=libperl.so.5 \ - -Dvendorprefix=${prefix} \ - -Darchlibexp=${STAGING_LIBDIR}/perl5/${PV}/${TARGET_ARCH}-linux \ - ${PACKAGECONFIG_CONFARGS} - - # See the comment above - sed -i -e "s,${STAGING_LIBDIR},${libdir},g" config.h -} - -do_configure_class-native() { - ./configure --prefix=${prefix} \ - -Dbin=${bindir}/perl-native \ - -Duseshrplib \ - -Dsoname=libperl.so.5 \ - -Dvendorprefix=${prefix} \ - -Ui_xlocale \ - ${PACKAGECONFIG_CONFARGS} -} - -do_configure_append() { - if [ -n "$SOURCE_DATE_EPOCH" ]; then - PERL_BUILD_DATE="$(${PYTHON} -c "\ -from datetime import datetime, timezone; \ -print(datetime.fromtimestamp($SOURCE_DATE_EPOCH, timezone.utc).strftime('%a %b %d %H:%M:%S %Y')) \ - ")" - echo "#define PERL_BUILD_DATE \"$PERL_BUILD_DATE\"" >> config.h - fi -} - -do_compile() { - oe_runmake - # This isn't generated reliably so delete and re-generate. - # https://github.com/arsv/perl-cross/issues/86 - - if [ -e pod/perltoc.pod ]; then - bbnote Rebuilding perltoc.pod - rm -f pod/perltoc.pod - oe_runmake pod/perltoc.pod - fi -} - -do_install() { - oe_runmake 'DESTDIR=${D}' install - - install -d ${D}${libdir}/perl5 - install -d ${D}${libdir}/perl5/${PV}/ - install -d ${D}${libdir}/perl5/${PV}/ExtUtils/ - - # Save native config - install config.sh ${D}${libdir}/perl5 - install lib/Config.pm ${D}${libdir}/perl5/${PV}/ - install lib/ExtUtils/typemap ${D}${libdir}/perl5/${PV}/ExtUtils/ - - # Fix up shared library - rm ${D}/${libdir}/perl5/${PV}/*/CORE/libperl.so - ln -sf ../../../../libperl.so.${PERL_LIB_VER} $(echo ${D}/${libdir}/perl5/${PV}/*/CORE)/libperl.so -} - -do_install_append_class-target() { - # This is used to substitute target configuration when running native perl via perl-configpm-switch.patch - ln -s Config_heavy.pl ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy-target.pl - - # This contains host-specific information used for building miniperl (a helper executable built with host compiler) - # and therefore isn't reproducible. I believe the file isn't actually needed on target. - rm ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/xconfig.h -} - -do_install_append_class-nativesdk() { - # This is used to substitute target configuration when running native perl via perl-configpm-switch.patch - ln -s Config_heavy.pl ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy-target.pl - - create_wrapper ${D}${bindir}/perl \ - PERL5LIB='$PERL5LIB:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/site_perl/${PV}:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/vendor_perl/${PV}:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/${PV}' -} - -do_install_append_class-native () { - # Those wrappers mean that perl installed from sstate (which may change - # path location) works and that in the nativesdk case, the SDK can be - # installed to a different location from the one it was built for. - create_wrapper ${D}${bindir}/perl-native/perl PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl5/site_perl/${PV}:${STAGING_LIBDIR}/perl5/vendor_perl/${PV}:${STAGING_LIBDIR}/perl5/${PV}' - - # Use /usr/bin/env nativeperl for the perl script. - for f in `grep -Il '#! *${bindir}/perl' ${D}/${bindir}/*`; do - sed -i -e 's|${bindir}/perl|/usr/bin/env nativeperl|' $f - done -} - -PACKAGE_PREPROCESS_FUNCS += "perl_package_preprocess" - -perl_package_preprocess () { - # Fix up installed configuration - sed -i -e "s,${D},,g" \ - -e "s,${DEBUG_PREFIX_MAP},,g" \ - -e "s,--sysroot=${STAGING_DIR_HOST},,g" \ - -e "s,-isystem${STAGING_INCDIR} ,,g" \ - -e "s,${STAGING_LIBDIR},${libdir},g" \ - -e "s,${STAGING_BINDIR},${bindir},g" \ - -e "s,${STAGING_INCDIR},${includedir},g" \ - -e "s,${STAGING_BINDIR_NATIVE}/perl-native/,${bindir}/,g" \ - -e "s,${STAGING_BINDIR_NATIVE}/,,g" \ - -e "s,${STAGING_BINDIR_TOOLCHAIN}/${TARGET_PREFIX},${bindir},g" \ - -e 's:${RECIPE_SYSROOT}::g' \ - ${PKGD}${bindir}/h2xs.perl \ - ${PKGD}${bindir}/h2ph.perl \ - ${PKGD}${bindir}/pod2man.perl \ - ${PKGD}${bindir}/pod2text.perl \ - ${PKGD}${bindir}/pod2usage.perl \ - ${PKGD}${bindir}/podchecker.perl \ - ${PKGD}${bindir}/podselect.perl \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/config.h \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/perl.h \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/pp.h \ - ${PKGD}${libdir}/perl5/${PV}/Config.pm \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pm \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pod \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_git.pl \ - ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy.pl \ - ${PKGD}${libdir}/perl5/${PV}/ExtUtils/Liblist/Kid.pm \ - ${PKGD}${libdir}/perl5/${PV}/FileCache.pm \ - ${PKGD}${libdir}/perl5/${PV}/pod/*.pod \ - ${PKGD}${libdir}/perl5/config.sh -} - -inherit update-alternatives - -ALTERNATIVE_PRIORITY = "100" - -ALTERNATIVE_${PN}-misc = "corelist cpan enc2xs encguess h2ph h2xs instmodsh json_pp libnetcfg \ - piconv pl2pm pod2html pod2man pod2text pod2usage podchecker podselect \ - prove ptar ptardiff ptargrep shasum splain xsubpp zipdetails" -ALTERNATIVE_LINK_NAME[corelist] = "${bindir}/corelist" -ALTERNATIVE_LINK_NAME[cpan] = "${bindir}/cpan" -ALTERNATIVE_LINK_NAME[enc2xs] = "${bindir}/enc2xs" -ALTERNATIVE_LINK_NAME[encguess] = "${bindir}/encguess" -ALTERNATIVE_LINK_NAME[h2ph] = "${bindir}/h2ph" -ALTERNATIVE_LINK_NAME[h2xs] = "${bindir}/h2xs" -ALTERNATIVE_LINK_NAME[instmodsh] = "${bindir}/instmodsh" -ALTERNATIVE_LINK_NAME[json_pp] = "${bindir}/json_pp" -ALTERNATIVE_LINK_NAME[libnetcfg] = "${bindir}/libnetcfg" -ALTERNATIVE_LINK_NAME[piconv] = "${bindir}/piconv" -ALTERNATIVE_LINK_NAME[pl2pm] = "${bindir}/pl2pm" -ALTERNATIVE_LINK_NAME[pod2html] = "${bindir}/pod2html" -ALTERNATIVE_LINK_NAME[pod2man] = "${bindir}/pod2man" -ALTERNATIVE_LINK_NAME[pod2text] = "${bindir}/pod2text" -ALTERNATIVE_LINK_NAME[pod2usage] = "${bindir}/pod2usage" -ALTERNATIVE_LINK_NAME[podchecker] = "${bindir}/podchecker" -ALTERNATIVE_LINK_NAME[podselect] = "${bindir}/podselect" -ALTERNATIVE_LINK_NAME[prove] = "${bindir}/prove" -ALTERNATIVE_LINK_NAME[ptar] = "${bindir}/ptar" -ALTERNATIVE_LINK_NAME[ptardiff] = "${bindir}/ptardiff" -ALTERNATIVE_LINK_NAME[ptargrep] = "${bindir}/ptargrep" -ALTERNATIVE_LINK_NAME[shasum] = "${bindir}/shasum" -ALTERNATIVE_LINK_NAME[splain] = "${bindir}/splain" -ALTERNATIVE_LINK_NAME[xsubpp] = "${bindir}/xsubpp" -ALTERNATIVE_LINK_NAME[zipdetails] = "${bindir}/zipdetails" - -require perl-ptest.inc - -FILES_${PN} = "${bindir}/perl ${bindir}/perl.real ${bindir}/perl${PV} ${libdir}/libperl.so* \ - ${libdir}/perl5/site_perl \ - ${libdir}/perl5/${PV}/Config.pm \ - ${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pm \ - ${libdir}/perl5/${PV}/*/Config_git.pl \ - ${libdir}/perl5/${PV}/*/Config_heavy-target.pl \ - ${libdir}/perl5/config.sh \ - ${libdir}/perl5/${PV}/strict.pm \ - ${libdir}/perl5/${PV}/warnings.pm \ - ${libdir}/perl5/${PV}/warnings \ - ${libdir}/perl5/${PV}/vars.pm \ - ${libdir}/perl5/site_perl \ - ${libdir}/perl5/${PV}/ExtUtils/MANIFEST.SKIP \ - ${libdir}/perl5/${PV}/ExtUtils/xsubpp \ - ${libdir}/perl5/${PV}/ExtUtils/typemap \ - " -RPROVIDES_${PN} += "perl-module-strict perl-module-vars perl-module-config perl-module-warnings \ - perl-module-warnings-register" - -FILES_${PN}-staticdev_append = " ${libdir}/perl5/${PV}/*/CORE/libperl.a" - -FILES_${PN}-dev_append = " ${libdir}/perl5/${PV}/*/CORE" - -FILES_${PN}-doc_append = " ${libdir}/perl5/${PV}/Unicode/Collate/*.txt \ - ${libdir}/perl5/${PV}/*/.packlist \ - ${libdir}/perl5/${PV}/Encode/encode.h \ - " -PACKAGES += "${PN}-misc" - -FILES_${PN}-misc = "${bindir}/*" - -PACKAGES += "${PN}-pod" - -FILES_${PN}-pod = "${libdir}/perl5/${PV}/pod \ - ${libdir}/perl5/${PV}/*.pod \ - ${libdir}/perl5/${PV}/*/*.pod \ - ${libdir}/perl5/${PV}/*/*/*.pod \ - ${libdir}/perl5/${PV}/*/*/*/*.pod \ - " - -PACKAGES += "${PN}-module-cpan ${PN}-module-unicore" - -FILES_${PN}-module-cpan += "${libdir}/perl5/${PV}/CPAN \ - " -FILES_${PN}-module-unicore += "${libdir}/perl5/${PV}/unicore" - -ALTERNATIVE_PRIORITY = "40" -ALTERNATIVE_${PN}-doc = "Thread.3" -ALTERNATIVE_LINK_NAME[Thread.3] = "${mandir}/man3/Thread.3" - -# Create a perl-modules package recommending all the other perl -# packages (actually the non modules packages and not created too) -ALLOW_EMPTY_${PN}-modules = "1" -PACKAGES += "${PN}-modules " - -PACKAGESPLITFUNCS_prepend = "split_perl_packages " - -python split_perl_packages () { - libdir = d.expand('${libdir}/perl5/${PV}') - do_split_packages(d, libdir, r'.*/auto/([^.]*)/[^/]*\.(so|ld|ix|al)', '${PN}-module-%s', 'perl module %s', recursive=True, match_path=True, prepend=False) - do_split_packages(d, libdir, r'.*linux/([^\/]*)\.pm', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) - do_split_packages(d, libdir, r'Module/([^\/]*)\.pm', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) - do_split_packages(d, libdir, r'Module/([^\/]*)/.*', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) - do_split_packages(d, libdir, r'.*linux/([^\/].*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) - do_split_packages(d, libdir, r'(^(?!(CPAN\/|CPANPLUS\/|Module\/|unicore\/|.*linux\/)[^\/]).*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) - - # perl-modules should recommend every perl module, and only the - # modules. Don't attempt to use the result of do_split_packages() as some - # modules are manually split (eg. perl-module-unicore). - packages = filter(lambda p: 'perl-module-' in p, d.getVar('PACKAGES').split()) - d.setVar(d.expand("RRECOMMENDS_${PN}-modules"), ' '.join(packages)) - - # Read the pre-generated dependency file, and use it to set module dependecies - for line in open(d.expand("${WORKDIR}") + '/perl-rdepends.txt').readlines(): - splitline = line.split() - if bb.data.inherits_class('native', d): - module = splitline[0] + '-native' - depends = "perl-native" - else: - module = splitline[0].replace("RDEPENDS_perl", "RDEPENDS_${PN}") - depends = splitline[2].strip('"').replace("perl-module", "${PN}-module") - d.appendVar(d.expand(module), " " + depends) -} - -python() { - if d.getVar('CLASSOVERRIDE') == "class-target": - d.setVar("PACKAGES_DYNAMIC", "^${MLPREFIX}perl-module-.*(? ${WORKDIR}/perl-rdepends.inc - -# Some additional dependencies that the above doesn't manage to figure out -RDEPENDS_${PN}-module-file-spec += "${PN}-module-file-spec-unix" -RDEPENDS_${PN}-module-math-bigint += "${PN}-module-math-bigint-calc" -RDEPENDS_${PN}-module-thread-queue += "${PN}-module-attributes" -RDEPENDS_${PN}-module-overload += "${PN}-module-overloading" - -# Generated depends list beyond this line -EOPREAMBLE - test -e packages-split.new && rm -rf packages-split.new - cp -r packages-split packages-split.new && cd packages-split.new - find . -name \*.pm | xargs sed -i '/^=head/,/^=cut/d' - egrep -r "^\s*(\ */+= \"perl-module-/g;s/CPANPLUS::.*/cpanplus/g;s/CPAN::.*/cpan/g;s/::/-/g;s/ [^+\"].*//g;s/_/-/g;s/\.pl\"$/\"/;s/\"\?\$/\"/;s/(//;" | tr [:upper:] [:lower:] | \ - awk '{if ($3 != "\x22"$1"\x22"){ print $0}}'| \ - grep -v -e "\-vms\-" -e module-5 -e "^$" -e "\\$" -e your -e tk -e autoperl -e html -e http -e parse-cpan -e perl-ostype -e ndbm-file -e module-mac -e fcgi -e lwp -e dbd -e dbix | \ - sort -u | \ - sed 's/^/RDEPENDS_/;s/perl-module-/${PN}-module-/g;s/module-\(module-\)/\1/g;s/\(module-load\)-conditional/\1/g;s/encode-configlocal/&-pm/;' | \ - egrep -wv '=>|module-a|module-apache.?|module-apr|module-authen-sasl|module-b-asmdata|module-convert-ebcdic|module-devel-size|module-digest-perl-md5|module-dumpvalue|module-extutils-constant-aaargh56hash|module-extutils-xssymset|module-file-bsdglob|module-for|module-it|module-io-socket-inet6|module-io-socket-ssl|module-io-string|module-ipc-system-simple|module-lexical|module-local-lib|metadata|module-modperl-util|module-pluggable-object|module-test-builder-io-scalar|module-test2|module-text-unidecode|module-unicore|module-win32|objects\sload|syscall.ph|systeminfo.ph|%s' | \ - egrep -wv '=>|module-algorithm-diff|module-carp|module-c|module-encode-hanextra|module-extutils-makemaker-version-regex|module-file-spec|module-io-compress-lzma|module-locale-maketext-lexicon|module-log-agent|module-meta-notation|module-net-localcfg|module-net-ping-external|module-b-deparse|module-scalar-util|module-some-module|module-symbol|module-uri|module-win32api-file' >> ${WORKDIR}/perl-rdepends.generated - cp ${WORKDIR}/perl-rdepends.generated ${THISDIR}/files/perl-rdepends.txt -} - -# bitbake perl -c create_rdepends_inc -addtask do_create_rdepends_inc - -SYSROOT_PREPROCESS_FUNCS += "perl_sysroot_create_wrapper" - -perl_sysroot_create_wrapper () { - mkdir -p ${SYSROOT_DESTDIR}${bindir} - # Create a wrapper that /usr/bin/env perl will use to get perl-native. - # This MUST live in the normal bindir. - cat > ${SYSROOT_DESTDIR}${bindir}/nativeperl << EOF -#!/bin/sh -realpath=\`readlink -fn \$0\` -exec \`dirname \$realpath\`/perl-native/perl "\$@" -EOF - chmod 0755 ${SYSROOT_DESTDIR}${bindir}/nativeperl - cat ${SYSROOT_DESTDIR}${bindir}/nativeperl -} diff --git a/poky/meta/recipes-devtools/perl/perl_5.32.0.bb b/poky/meta/recipes-devtools/perl/perl_5.32.0.bb new file mode 100644 index 000000000..1adfd90e7 --- /dev/null +++ b/poky/meta/recipes-devtools/perl/perl_5.32.0.bb @@ -0,0 +1,383 @@ +SUMMARY = "Perl scripting language" +HOMEPAGE = "http://www.perl.org/" +SECTION = "devel" +LICENSE = "Artistic-1.0 | GPL-1.0+" +LIC_FILES_CHKSUM = "file://Copying;md5=5b122a36d0f6dc55279a0ebc69f3c60b \ + file://Artistic;md5=71a4d5d9acc18c0952a6df2218bb68da \ + " + + +SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \ + https://github.com/arsv/perl-cross/releases/download/1.3.4/perl-cross-1.3.4.tar.gz;name=perl-cross \ + file://perl-rdepends.txt \ + file://0001-configure_tool.sh-do-not-quote-the-argument-to-comma.patch \ + file://0001-ExtUtils-MakeMaker-add-LDFLAGS-when-linking-binary-m.patch \ + file://0001-Somehow-this-module-breaks-through-the-perl-wrapper-.patch \ + file://errno_ver.diff \ + file://native-perlinc.patch \ + file://0001-perl-cross-add-LDFLAGS-when-linking-libperl.patch \ + file://perl-dynloader.patch \ + file://0001-configure_path.sh-do-not-hardcode-prefix-lib-as-libr.patch \ + file://0002-Constant-Fix-up-shebang.patch \ + file://determinism.patch \ + " +SRC_URI_append_class-native = " \ + file://perl-configpm-switch.patch \ +" +SRC_URI_append_class-target = " \ + file://encodefix.patch \ +" + +SRC_URI[perl.sha256sum] = "efeb1ce1f10824190ad1cadbcccf6fdb8a5d37007d0100d2d9ae5f2b5900c0b4" +SRC_URI[perl-cross.sha256sum] = "755aa0ca8141a942188a269564f86c3c82349f82c346ed5c992495d7f35138ba" + +S = "${WORKDIR}/perl-${PV}" + +inherit upstream-version-is-even update-alternatives + +DEPENDS += "zlib virtual/crypt" + +PERL_LIB_VER = "${@'.'.join(d.getVar('PV').split('.')[0:2])}.0" + +PACKAGECONFIG ??= "bdb gdbm" +PACKAGECONFIG[bdb] = ",-Ui_db,db" +PACKAGECONFIG[gdbm] = ",-Ui_gdbm,gdbm" + +# Don't generate comments in enc2xs output files. They are not reproducible +export ENC2XS_NO_COMMENTS = "1" + +do_unpack_append() { + bb.build.exec_func('do_copy_perlcross', d) +} + +do_copy_perlcross() { + cp -rfp ${WORKDIR}/perl-cross*/* ${S} +} + +do_configure_class-target() { + ./configure --prefix=${prefix} --libdir=${libdir} \ + --target=${TARGET_SYS} \ + -Duseshrplib \ + -Dsoname=libperl.so.5 \ + -Dvendorprefix=${prefix} \ + -Darchlibexp=${STAGING_LIBDIR}/perl5/${PV}/${TARGET_ARCH}-linux \ + ${PACKAGECONFIG_CONFARGS} + + #perl.c uses an ARCHLIB_EXP define to generate compile-time code that + #adds the archlibexp path to @INC during run-time initialization of a + #new perl interpreter. + + #Because we've changed this value in a temporary way to make it + #possible to use ExtUtils::Embed in the target build (the temporary + #value in config.sh gets re-stripped out during packaging), the + #ARCHLIB_EXP value that gets generated still uses the temporary version + #instead of the original expected version (i.e. becauses it's in the + #generated config.h, it doesn't get stripped out during packaging like + #the others in config.sh). + + sed -i -e "s,${STAGING_LIBDIR},${libdir},g" config.h +} + +do_configure_class-nativesdk() { + ./configure --prefix=${prefix} \ + --target=${TARGET_SYS} \ + -Duseshrplib \ + -Dsoname=libperl.so.5 \ + -Dvendorprefix=${prefix} \ + -Darchlibexp=${STAGING_LIBDIR}/perl5/${PV}/${TARGET_ARCH}-linux \ + ${PACKAGECONFIG_CONFARGS} + + # See the comment above + sed -i -e "s,${STAGING_LIBDIR},${libdir},g" config.h +} + +do_configure_class-native() { + ./configure --prefix=${prefix} \ + -Dbin=${bindir}/perl-native \ + -Duseshrplib \ + -Dsoname=libperl.so.5 \ + -Dvendorprefix=${prefix} \ + -Ui_xlocale \ + ${PACKAGECONFIG_CONFARGS} +} + +do_configure_append() { + if [ -n "$SOURCE_DATE_EPOCH" ]; then + PERL_BUILD_DATE="$(${PYTHON} -c "\ +from datetime import datetime, timezone; \ +print(datetime.fromtimestamp($SOURCE_DATE_EPOCH, timezone.utc).strftime('%a %b %d %H:%M:%S %Y')) \ + ")" + echo "#define PERL_BUILD_DATE \"$PERL_BUILD_DATE\"" >> config.h + fi +} + +do_compile() { + oe_runmake + # This isn't generated reliably so delete and re-generate. + # https://github.com/arsv/perl-cross/issues/86 + + if [ -e pod/perltoc.pod ]; then + bbnote Rebuilding perltoc.pod + rm -f pod/perltoc.pod + oe_runmake pod/perltoc.pod + fi +} + +do_install() { + oe_runmake 'DESTDIR=${D}' install + + install -d ${D}${libdir}/perl5 + install -d ${D}${libdir}/perl5/${PV}/ + install -d ${D}${libdir}/perl5/${PV}/ExtUtils/ + + # Save native config + install config.sh ${D}${libdir}/perl5 + install lib/Config.pm ${D}${libdir}/perl5/${PV}/ + install lib/ExtUtils/typemap ${D}${libdir}/perl5/${PV}/ExtUtils/ + + # Fix up shared library + rm ${D}/${libdir}/perl5/${PV}/*/CORE/libperl.so + ln -sf ../../../../libperl.so.${PERL_LIB_VER} $(echo ${D}/${libdir}/perl5/${PV}/*/CORE)/libperl.so +} + +do_install_append_class-target() { + # This is used to substitute target configuration when running native perl via perl-configpm-switch.patch + ln -s Config_heavy.pl ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy-target.pl + + # This contains host-specific information used for building miniperl (a helper executable built with host compiler) + # and therefore isn't reproducible. I believe the file isn't actually needed on target. + rm ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/xconfig.h +} + +do_install_append_class-nativesdk() { + # This is used to substitute target configuration when running native perl via perl-configpm-switch.patch + ln -s Config_heavy.pl ${D}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy-target.pl + + create_wrapper ${D}${bindir}/perl \ + PERL5LIB='$PERL5LIB:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/site_perl/${PV}:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/vendor_perl/${PV}:${SDKPATHNATIVE}/${libdir_nativesdk}/perl5/${PV}' +} + +do_install_append_class-native () { + # Those wrappers mean that perl installed from sstate (which may change + # path location) works and that in the nativesdk case, the SDK can be + # installed to a different location from the one it was built for. + create_wrapper ${D}${bindir}/perl-native/perl PERL5LIB='$PERL5LIB:${STAGING_LIBDIR}/perl5/site_perl/${PV}:${STAGING_LIBDIR}/perl5/vendor_perl/${PV}:${STAGING_LIBDIR}/perl5/${PV}' + + # Use /usr/bin/env nativeperl for the perl script. + for f in `grep -Il '#! *${bindir}/perl' ${D}/${bindir}/*`; do + sed -i -e 's|${bindir}/perl|/usr/bin/env nativeperl|' $f + done +} + +PACKAGE_PREPROCESS_FUNCS += "perl_package_preprocess" + +perl_package_preprocess () { + # Fix up installed configuration + sed -i -e "s,${D},,g" \ + -e "s,${DEBUG_PREFIX_MAP},,g" \ + -e "s,--sysroot=${STAGING_DIR_HOST},,g" \ + -e "s,-isystem${STAGING_INCDIR} ,,g" \ + -e "s,${STAGING_LIBDIR},${libdir},g" \ + -e "s,${STAGING_BINDIR},${bindir},g" \ + -e "s,${STAGING_INCDIR},${includedir},g" \ + -e "s,${STAGING_BINDIR_NATIVE}/perl-native/,${bindir}/,g" \ + -e "s,${STAGING_BINDIR_NATIVE}/,,g" \ + -e "s,${STAGING_BINDIR_TOOLCHAIN}/${TARGET_PREFIX},${bindir},g" \ + -e 's:${RECIPE_SYSROOT}::g' \ + ${PKGD}${bindir}/h2xs.perl \ + ${PKGD}${bindir}/h2ph.perl \ + ${PKGD}${bindir}/pod2man.perl \ + ${PKGD}${bindir}/pod2text.perl \ + ${PKGD}${bindir}/pod2usage.perl \ + ${PKGD}${bindir}/podchecker.perl \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/config.h \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/perl.h \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/CORE/pp.h \ + ${PKGD}${libdir}/perl5/${PV}/Config.pm \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pm \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pod \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_git.pl \ + ${PKGD}${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config_heavy.pl \ + ${PKGD}${libdir}/perl5/${PV}/ExtUtils/Liblist/Kid.pm \ + ${PKGD}${libdir}/perl5/${PV}/FileCache.pm \ + ${PKGD}${libdir}/perl5/${PV}/pod/*.pod \ + ${PKGD}${libdir}/perl5/config.sh +} + +inherit update-alternatives + +ALTERNATIVE_PRIORITY = "100" + +ALTERNATIVE_${PN}-misc = "corelist cpan enc2xs encguess h2ph h2xs instmodsh json_pp libnetcfg \ + piconv pl2pm pod2html pod2man pod2text pod2usage podchecker \ + prove ptar ptardiff ptargrep shasum splain xsubpp zipdetails" +ALTERNATIVE_LINK_NAME[corelist] = "${bindir}/corelist" +ALTERNATIVE_LINK_NAME[cpan] = "${bindir}/cpan" +ALTERNATIVE_LINK_NAME[enc2xs] = "${bindir}/enc2xs" +ALTERNATIVE_LINK_NAME[encguess] = "${bindir}/encguess" +ALTERNATIVE_LINK_NAME[h2ph] = "${bindir}/h2ph" +ALTERNATIVE_LINK_NAME[h2xs] = "${bindir}/h2xs" +ALTERNATIVE_LINK_NAME[instmodsh] = "${bindir}/instmodsh" +ALTERNATIVE_LINK_NAME[json_pp] = "${bindir}/json_pp" +ALTERNATIVE_LINK_NAME[libnetcfg] = "${bindir}/libnetcfg" +ALTERNATIVE_LINK_NAME[piconv] = "${bindir}/piconv" +ALTERNATIVE_LINK_NAME[pl2pm] = "${bindir}/pl2pm" +ALTERNATIVE_LINK_NAME[pod2html] = "${bindir}/pod2html" +ALTERNATIVE_LINK_NAME[pod2man] = "${bindir}/pod2man" +ALTERNATIVE_LINK_NAME[pod2text] = "${bindir}/pod2text" +ALTERNATIVE_LINK_NAME[pod2usage] = "${bindir}/pod2usage" +ALTERNATIVE_LINK_NAME[podchecker] = "${bindir}/podchecker" +ALTERNATIVE_LINK_NAME[prove] = "${bindir}/prove" +ALTERNATIVE_LINK_NAME[ptar] = "${bindir}/ptar" +ALTERNATIVE_LINK_NAME[ptardiff] = "${bindir}/ptardiff" +ALTERNATIVE_LINK_NAME[ptargrep] = "${bindir}/ptargrep" +ALTERNATIVE_LINK_NAME[shasum] = "${bindir}/shasum" +ALTERNATIVE_LINK_NAME[splain] = "${bindir}/splain" +ALTERNATIVE_LINK_NAME[xsubpp] = "${bindir}/xsubpp" +ALTERNATIVE_LINK_NAME[zipdetails] = "${bindir}/zipdetails" + +require perl-ptest.inc + +FILES_${PN} = "${bindir}/perl ${bindir}/perl.real ${bindir}/perl${PV} ${libdir}/libperl.so* \ + ${libdir}/perl5/site_perl \ + ${libdir}/perl5/${PV}/Config.pm \ + ${libdir}/perl5/${PV}/${TARGET_ARCH}-linux/Config.pm \ + ${libdir}/perl5/${PV}/*/Config_git.pl \ + ${libdir}/perl5/${PV}/*/Config_heavy-target.pl \ + ${libdir}/perl5/config.sh \ + ${libdir}/perl5/${PV}/strict.pm \ + ${libdir}/perl5/${PV}/warnings.pm \ + ${libdir}/perl5/${PV}/warnings \ + ${libdir}/perl5/${PV}/vars.pm \ + ${libdir}/perl5/site_perl \ + ${libdir}/perl5/${PV}/ExtUtils/MANIFEST.SKIP \ + ${libdir}/perl5/${PV}/ExtUtils/xsubpp \ + ${libdir}/perl5/${PV}/ExtUtils/typemap \ + " +RPROVIDES_${PN} += "perl-module-strict perl-module-vars perl-module-config perl-module-warnings \ + perl-module-warnings-register" + +FILES_${PN}-staticdev_append = " ${libdir}/perl5/${PV}/*/CORE/libperl.a" + +FILES_${PN}-dev_append = " ${libdir}/perl5/${PV}/*/CORE" + +FILES_${PN}-doc_append = " ${libdir}/perl5/${PV}/Unicode/Collate/*.txt \ + ${libdir}/perl5/${PV}/*/.packlist \ + ${libdir}/perl5/${PV}/Encode/encode.h \ + " +PACKAGES += "${PN}-misc" + +FILES_${PN}-misc = "${bindir}/*" + +PACKAGES += "${PN}-pod" + +FILES_${PN}-pod = "${libdir}/perl5/${PV}/pod \ + ${libdir}/perl5/${PV}/*.pod \ + ${libdir}/perl5/${PV}/*/*.pod \ + ${libdir}/perl5/${PV}/*/*/*.pod \ + ${libdir}/perl5/${PV}/*/*/*/*.pod \ + " + +PACKAGES += "${PN}-module-cpan ${PN}-module-unicore" + +FILES_${PN}-module-cpan += "${libdir}/perl5/${PV}/CPAN \ + " +FILES_${PN}-module-unicore += "${libdir}/perl5/${PV}/unicore" + +ALTERNATIVE_PRIORITY = "40" +ALTERNATIVE_${PN}-doc = "Thread.3" +ALTERNATIVE_LINK_NAME[Thread.3] = "${mandir}/man3/Thread.3" + +# Create a perl-modules package recommending all the other perl +# packages (actually the non modules packages and not created too) +ALLOW_EMPTY_${PN}-modules = "1" +PACKAGES += "${PN}-modules " + +PACKAGESPLITFUNCS_prepend = "split_perl_packages " + +python split_perl_packages () { + libdir = d.expand('${libdir}/perl5/${PV}') + do_split_packages(d, libdir, r'.*/auto/([^.]*)/[^/]*\.(so|ld|ix|al)', '${PN}-module-%s', 'perl module %s', recursive=True, match_path=True, prepend=False) + do_split_packages(d, libdir, r'.*linux/([^\/]*)\.pm', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) + do_split_packages(d, libdir, r'Module/([^\/]*)\.pm', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) + do_split_packages(d, libdir, r'Module/([^\/]*)/.*', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) + do_split_packages(d, libdir, r'.*linux/([^\/].*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) + do_split_packages(d, libdir, r'(^(?!(CPAN\/|CPANPLUS\/|Module\/|unicore\/|.*linux\/)[^\/]).*)\.(pm|pl|e2x)', '${PN}-module-%s', 'perl module %s', recursive=True, allow_dirs=False, match_path=True, prepend=False) + + # perl-modules should recommend every perl module, and only the + # modules. Don't attempt to use the result of do_split_packages() as some + # modules are manually split (eg. perl-module-unicore). + packages = filter(lambda p: 'perl-module-' in p, d.getVar('PACKAGES').split()) + d.setVar(d.expand("RRECOMMENDS_${PN}-modules"), ' '.join(packages)) + + # Read the pre-generated dependency file, and use it to set module dependecies + for line in open(d.expand("${WORKDIR}") + '/perl-rdepends.txt').readlines(): + splitline = line.split() + if bb.data.inherits_class('native', d): + module = splitline[0] + '-native' + depends = "perl-native" + else: + module = splitline[0].replace("RDEPENDS_perl", "RDEPENDS_${PN}") + depends = splitline[2].strip('"').replace("perl-module", "${PN}-module") + d.appendVar(d.expand(module), " " + depends) +} + +python() { + if d.getVar('CLASSOVERRIDE') == "class-target": + d.setVar("PACKAGES_DYNAMIC", "^${MLPREFIX}perl-module-.*(? ${WORKDIR}/perl-rdepends.inc + +# Some additional dependencies that the above doesn't manage to figure out +RDEPENDS_${PN}-module-file-spec += "${PN}-module-file-spec-unix" +RDEPENDS_${PN}-module-math-bigint += "${PN}-module-math-bigint-calc" +RDEPENDS_${PN}-module-thread-queue += "${PN}-module-attributes" +RDEPENDS_${PN}-module-overload += "${PN}-module-overloading" + +# Generated depends list beyond this line +EOPREAMBLE + test -e packages-split.new && rm -rf packages-split.new + cp -r packages-split packages-split.new && cd packages-split.new + find . -name \*.pm | xargs sed -i '/^=head/,/^=cut/d' + egrep -r "^\s*(\ */+= \"perl-module-/g;s/CPANPLUS::.*/cpanplus/g;s/CPAN::.*/cpan/g;s/::/-/g;s/ [^+\"].*//g;s/_/-/g;s/\.pl\"$/\"/;s/\"\?\$/\"/;s/(//;" | tr [:upper:] [:lower:] | \ + awk '{if ($3 != "\x22"$1"\x22"){ print $0}}'| \ + grep -v -e "\-vms\-" -e module-5 -e "^$" -e "\\$" -e your -e tk -e autoperl -e html -e http -e parse-cpan -e perl-ostype -e ndbm-file -e module-mac -e fcgi -e lwp -e dbd -e dbix | \ + sort -u | \ + sed 's/^/RDEPENDS_/;s/perl-module-/${PN}-module-/g;s/module-\(module-\)/\1/g;s/\(module-load\)-conditional/\1/g;s/encode-configlocal/&-pm/;' | \ + egrep -wv '=>|module-a|module-apache.?|module-apr|module-authen-sasl|module-b-asmdata|module-convert-ebcdic|module-devel-size|module-digest-perl-md5|module-dumpvalue|module-extutils-constant-aaargh56hash|module-extutils-xssymset|module-file-bsdglob|module-for|module-it|module-io-socket-inet6|module-io-socket-ssl|module-io-string|module-ipc-system-simple|module-lexical|module-local-lib|metadata|module-modperl-util|module-pluggable-object|module-test-builder-io-scalar|module-test2|module-text-unidecode|module-unicore|module-win32|objects\sload|syscall.ph|systeminfo.ph|%s' | \ + egrep -wv '=>|module-algorithm-diff|module-carp|module-c|module-encode-hanextra|module-extutils-makemaker-version-regex|module-file-spec|module-io-compress-lzma|module-locale-maketext-lexicon|module-log-agent|module-meta-notation|module-net-localcfg|module-net-ping-external|module-b-deparse|module-scalar-util|module-some-module|module-symbol|module-uri|module-win32api-file' >> ${WORKDIR}/perl-rdepends.generated + cp ${WORKDIR}/perl-rdepends.generated ${THISDIR}/files/perl-rdepends.txt +} + +# bitbake perl -c create_rdepends_inc +addtask do_create_rdepends_inc + +SYSROOT_PREPROCESS_FUNCS += "perl_sysroot_create_wrapper" + +perl_sysroot_create_wrapper () { + mkdir -p ${SYSROOT_DESTDIR}${bindir} + # Create a wrapper that /usr/bin/env perl will use to get perl-native. + # This MUST live in the normal bindir. + cat > ${SYSROOT_DESTDIR}${bindir}/nativeperl << EOF +#!/bin/sh +realpath=\`readlink -fn \$0\` +exec \`dirname \$realpath\`/perl-native/perl "\$@" +EOF + chmod 0755 ${SYSROOT_DESTDIR}${bindir}/nativeperl + cat ${SYSROOT_DESTDIR}${bindir}/nativeperl +} diff --git a/poky/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch b/poky/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch deleted file mode 100644 index f01e699de..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0001-Add-statx.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 4e41a05de1f34ba00a68ca4f20fb49c4d1cbd2d0 Mon Sep 17 00:00:00 2001 -From: Richard Purdie -Date: Wed, 6 Nov 2019 12:17:46 +0000 -Subject: [PATCH] Add statx glibc/syscall support - -Modern distros (e.g. fedora30) are starting to use the new statx() syscall through -the newly exposed glibc wrapper function in software like coreutils (e.g. the ls -command). Add support to intercept this to pseudo. - -Signed-off-by: Richard Purdie -Upstream-Status: Submitted [Emailed to seebs] ---- - ports/linux/guts/statx.c | 48 ++++++++++++++++++++++++++++++++++++++++ - ports/linux/portdefs.h | 1 + - ports/linux/wrapfuncs.in | 1 + - 3 files changed, 50 insertions(+) - create mode 100644 ports/linux/guts/statx.c - -diff --git a/ports/linux/statx/guts/statx.c b/ports/linux/statx/guts/statx.c -new file mode 100644 -index 0000000..a3259c4 ---- /dev/null -+++ b/ports/linux/statx/guts/statx.c -@@ -0,0 +1,42 @@ -+/* -+ * Copyright (c) 2019 Linux Foundation -+ * Author: Richard Purdie -+ * -+ * SPDX-License-Identifier: LGPL-2.1-only -+ * -+ * int -+ * statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf) { -+ * int rc = -1; -+ */ -+ pseudo_msg_t *msg; -+ PSEUDO_STATBUF buf; -+ int save_errno; -+ -+ rc = real_statx(dirfd, pathname, flags, mask, statxbuf); -+ save_errno = errno; -+ if (rc == -1) { -+ return rc; -+ } -+ -+ buf.st_uid = statxbuf->stx_uid; -+ buf.st_gid = statxbuf->stx_gid; -+ buf.st_dev = makedev(statxbuf->stx_dev_major, statxbuf->stx_dev_minor); -+ buf.st_ino = statxbuf->stx_ino; -+ buf.st_mode = statxbuf->stx_mode; -+ buf.st_rdev = makedev(statxbuf->stx_rdev_major, statxbuf->stx_rdev_minor); -+ buf.st_nlink = statxbuf->stx_nlink; -+ msg = pseudo_client_op(OP_STAT, 0, -1, dirfd, pathname, &buf); -+ if (msg && msg->result == RESULT_SUCCEED) { -+ pseudo_debug(PDBGF_FILE, "statx(path %s), flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid); -+ statxbuf->stx_uid = msg->uid; -+ statxbuf->stx_gid = msg->gid; -+ statxbuf->stx_mode = msg->mode; -+ statxbuf->stx_rdev_major = major(msg->rdev); -+ statxbuf->stx_rdev_minor = minor(msg->rdev); -+ } else { -+ pseudo_debug(PDBGF_FILE, "statx(path %s) failed, flags %o, stat rc %d, stat uid %o\n", pathname, flags, rc, statxbuf->stx_uid); -+ } -+ errno = save_errno; -+/* return rc; -+ * } -+ */ -diff --git a/ports/linux/statx/portdefs.h b/ports/linux/statx/portdefs.h -new file mode 100644 -index 0000000..bf934dc ---- /dev/null -+++ b/ports/linux/statx/portdefs.h -@@ -0,0 +1,6 @@ -+/* -+ * SPDX-License-Identifier: LGPL-2.1-only -+ * -+ */ -+#include -+#include -diff --git a/ports/linux/statx/wrapfuncs.in b/ports/linux/statx/wrapfuncs.in -new file mode 100644 -index 0000000..c9cd4c3 ---- /dev/null -+++ b/ports/linux/statx/wrapfuncs.in -@@ -0,0 +1 @@ -+int statx(int dirfd, const char *pathname, int flags, unsigned int mask, struct statx *statxbuf); -diff --git a/ports/linux/subports b/ports/linux/subports -index a29044a..49081bf 100755 ---- a/ports/linux/subports -+++ b/ports/linux/subports -@@ -54,3 +54,13 @@ else - fi - rm -f dummy.c dummy.o - -+cat > dummy.c < -+struct statx x; -+EOF -+if ${CC} -c -o dummy.o dummy.c >/dev/null 2>&1; then -+ echo "linux/statx" -+fi -+rm -f dummy.c dummy.o -+ --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch b/poky/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch deleted file mode 100644 index b2dbdad27..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0001-maketables-wrappers-use-Python-3.patch +++ /dev/null @@ -1,34 +0,0 @@ -From dbd34b1b2af8fbf44a0d5c37abe3448405819823 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Wed, 28 Aug 2019 19:20:29 +0200 -Subject: [PATCH] maketables/wrappers: use Python 3 - -Changelog indicates they should be compatible. - -Upstream-Status: Pending -Signed-off-by: Alexander Kanavin ---- - maketables | 2 +- - makewrappers | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/maketables b/maketables -index a211772..52285e2 100755 ---- a/maketables -+++ b/maketables -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - # - # Copyright (c) 2008-2010, 2013 Wind River Systems, Inc. - # -diff --git a/makewrappers b/makewrappers -index e84607d..b34f7eb 100755 ---- a/makewrappers -+++ b/makewrappers -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - # - # Copyright (c) 2008-2011,2013 Wind River Systems, Inc. - # diff --git a/poky/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch b/poky/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch deleted file mode 100644 index 9c49e33b0..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0001-pseudo-On-a-DB-fixup-remove-files-that-do-not-exist-.patch +++ /dev/null @@ -1,49 +0,0 @@ -From b0902e36108b49e6bc88d6b251cc2f8cffcd5a13 Mon Sep 17 00:00:00 2001 -From: Ricardo Ribalda -Date: Sun, 5 Apr 2020 11:40:30 +0000 -Subject: [PATCH] pseudo: On a DB fixup remove files that do not exist anymore - -If the user decides to fix a database, remove the files that do not -exist anymore. -If only DB test is selected do not change the behaviour (return error). - -Signed-off-by: Ricardo Ribalda -Upstream-Status: Submitted [https://lists.openembedded.org/g/openembedded-core/message/137045] ---- - pseudo.c | 13 ++++++++++--- - 1 file changed, 10 insertions(+), 3 deletions(-) - -diff --git a/pseudo.c b/pseudo.c -index 0f5850e..98e5b0c 100644 ---- a/pseudo.c -+++ b/pseudo.c -@@ -1087,9 +1087,15 @@ pseudo_db_check(int fix) { - int fixup_needed = 0; - pseudo_debug(PDBGF_DB, "Checking <%s>\n", m->path); - if (lstat(m->path, &buf)) { -- errors = EXIT_FAILURE; -- pseudo_diag("can't stat <%s>\n", m->path); -- continue; -+ if (!fix) { -+ pseudo_diag("can't stat <%s>\n", m->path); -+ errors = EXIT_FAILURE; -+ continue; -+ } else { -+ pseudo_debug(PDBGF_DB, "can't stat <%s>\n", m->path); -+ fixup_needed = 2; -+ goto do_fixup; -+ } - } - /* can't check for device type mismatches, uid/gid, or - * permissions, because those are the very things we -@@ -1125,6 +1131,7 @@ pseudo_db_check(int fix) { - S_ISDIR(m->mode)); - fixup_needed = 2; - } -+ do_fixup: - if (fixup_needed) { - /* in fixup mode, either delete (mismatches) or - * correct (dev/ino). --- -2.21.1 - diff --git a/poky/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch b/poky/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch deleted file mode 100644 index 33d4ef3b2..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0001-pseudo_ipc.h-Fix-enum-typedef.patch +++ /dev/null @@ -1,31 +0,0 @@ -From a491aececfedf7313d29b80d626e0964fb533548 Mon Sep 17 00:00:00 2001 -From: Jacob Kroon -Date: Sun, 3 May 2020 06:24:03 +0200 -Subject: [PATCH] pseudo_ipc.h: Fix enum typedef - -'pseudo_access_t' is a type, so use typedef. - -Fixes building pseudo with gcc 10 where -fno-common is the default. - -Signed-off-by: Jacob Kroon -Upstream-Status: Submitted [https://lists.openembedded.org/g/openembedded-core/message/137758] ---- - pseudo_ipc.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pseudo_ipc.h b/pseudo_ipc.h -index caeae5c..d945257 100644 ---- a/pseudo_ipc.h -+++ b/pseudo_ipc.h -@@ -29,7 +29,7 @@ typedef struct { - char path[]; - } pseudo_msg_t; - --enum { -+typedef enum { - PSA_EXEC = 1, - PSA_WRITE = (PSA_EXEC << 1), - PSA_READ = (PSA_WRITE << 1), --- -2.26.2 - diff --git a/poky/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch b/poky/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch deleted file mode 100644 index 17829ef3a..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0001-realpath.c-Remove-trailing-slashes.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 86c9a5610e3333ad6aaadb1ac1e8b5a2c948d119 Mon Sep 17 00:00:00 2001 -From: Robert Yang -Date: Mon, 25 Nov 2019 18:46:45 +0800 -Subject: [PATCH] realpath.c: Remove trailing slashes - -Linux system's realpath() remove trailing slashes, but pseudo's doesn't, need -make them identical. - -E.g., the following code (rel.c) prints '/tmp' with system's realpath, but -pseudo's realpath prints '/tmp/': - - #include - #include - #include - - int main() { - char out[PATH_MAX]; - printf("%s\n", realpath("/tmp/", out)); - return 0; - } - -$ bitbake base-passwd -cdevshell # For pseudo env -$ gcc rel.c -$ ./a.out -/tmp/ (but should be /tmp) - -This patch fixes the problem. - -Upstream-Status: Submitted [https://lists.yoctoproject.org/g/poky/message/11879] - -Signed-off-by: Robert Yang ---- - ports/unix/guts/realpath.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/ports/unix/guts/realpath.c b/ports/unix/guts/realpath.c ---- a/ports/unix/guts/realpath.c -+++ b/ports/unix/guts/realpath.c -@@ -14,7 +14,14 @@ - errno = ENAMETOOLONG; - return NULL; - } -- if ((len = strlen(rname)) >= pseudo_sys_path_max()) { -+ len = strlen(rname); -+ char *ep = rname + len - 1; -+ while (ep > rname && *ep == '/') { -+ --len; -+ *(ep--) = '\0'; -+ } -+ -+ if (len >= pseudo_sys_path_max()) { - errno = ENAMETOOLONG; - return NULL; - } --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch b/poky/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch deleted file mode 100644 index 161357d55..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/0006-xattr-adjust-for-attr-2.4.48-release.patch +++ /dev/null @@ -1,48 +0,0 @@ -From 93d95ed2eaedcca110c214e1fe3f8896b1f6f853 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Tue, 17 Dec 2019 20:24:27 +0100 -Subject: [PATCH] xattr: adjust for attr 2.4.48 release - -Latest versions of attr have removed the xattr.h header, -with the rationale that libc is providing the same wrappers. - -attr/attributes.h is providing the ENOATTR definition. - -Upstream-Status: Pending -Signed-off-by: Alexander Kanavin ---- - ports/linux/subports | 5 +++-- - ports/linux/xattr/portdefs.h | 3 ++- - 2 files changed, 5 insertions(+), 3 deletions(-) - -diff --git a/ports/linux/subports b/ports/linux/subports -index 2c43ac9..740ec83 100755 ---- a/ports/linux/subports -+++ b/ports/linux/subports -@@ -29,11 +29,12 @@ fi - if $port_xattr; then - cat > dummy.c < --#include -+#include -+#include - int i; - EOF - if ! ${CC} -c -o dummy.o dummy.c >/dev/null 2>&1; then -- echo >&2 "Warning: Can't compile trivial program using ". -+ echo >&2 "Warning: Can't compile trivial program using ". - echo >&2 " xattr support will require that header." - fi - echo "linux/xattr" -diff --git a/ports/linux/xattr/portdefs.h b/ports/linux/xattr/portdefs.h -index 56cd3ca..068d39a 100644 ---- a/ports/linux/xattr/portdefs.h -+++ b/ports/linux/xattr/portdefs.h -@@ -2,5 +2,6 @@ - * SPDX-License-Identifier: LGPL-2.1-only - * - */ --#include -+#include -+#include - #include diff --git a/poky/meta/recipes-devtools/pseudo/files/moreretries.patch b/poky/meta/recipes-devtools/pseudo/files/moreretries.patch deleted file mode 100644 index adea2665b..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/moreretries.patch +++ /dev/null @@ -1,19 +0,0 @@ -Increase the number of retries in pseudo due to occasional slow -server shutdowns. - -Upstream-Status: Pending -RP 2016/2/28 - -Index: git/pseudo_client.c -=================================================================== ---- git.orig/pseudo_client.c -+++ git/pseudo_client.c -@@ -1282,7 +1282,7 @@ pseudo_client_setup(void) { - } - } - --#define PSEUDO_RETRIES 20 -+#define PSEUDO_RETRIES 250 - static pseudo_msg_t * - pseudo_client_request(pseudo_msg_t *msg, size_t len, const char *path) { - pseudo_msg_t *response = 0; diff --git a/poky/meta/recipes-devtools/pseudo/files/seccomp.patch b/poky/meta/recipes-devtools/pseudo/files/seccomp.patch deleted file mode 100644 index 283f99794..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/seccomp.patch +++ /dev/null @@ -1,137 +0,0 @@ -Pseudo changes the syscall access patterns which makes it incompatible with -seccomp. Therefore intercept the seccomp syscall and alter it, pretending that -seccomp was setup when in fact we do nothing. If we error as unsupported, -utilities like file will exit with errors so we can't just disable it. - -Upstream-Status: Pending -RP 2020/4/3 -Signed-off-by: Richard Purdie - -It fails to compile pseudo-native on centos 7: - -| ports/linux/pseudo_wrappers.c: In function ‘prctl’: -| ports/linux/pseudo_wrappers.c:129:14: error: ‘SECCOMP_SET_MODE_FILTER’ undeclared (first use in this function) -| if (cmd == SECCOMP_SET_MODE_FILTER) { -| ^ - -Add macro guard for seccomp to avoid the failure. - -Signed-off-by: Kai Kang - -Index: git/ports/linux/pseudo_wrappers.c -=================================================================== ---- git.orig/ports/linux/pseudo_wrappers.c -+++ git/ports/linux/pseudo_wrappers.c -@@ -57,6 +57,7 @@ int pseudo_capset(cap_user_header_t hdrp - long - syscall(long number, ...) { - long rc = -1; -+ va_list ap; - - if (!pseudo_check_wrappers() || !real_syscall) { - /* rc was initialized to the "failure" value */ -@@ -77,6 +78,20 @@ syscall(long number, ...) { - (void) number; - #endif - -+#ifdef SYS_seccomp -+ /* pseudo and seccomp are incompatible as pseudo uses different syscalls -+ * so pretend to enable seccomp but really do nothing */ -+ if (number == SYS_seccomp) { -+ unsigned long cmd; -+ va_start(ap, number); -+ cmd = va_arg(ap, unsigned long); -+ va_end(ap); -+ if (cmd == SECCOMP_SET_MODE_FILTER) { -+ return 0; -+ } -+ } -+#endif -+ - /* gcc magic to attempt to just pass these args to syscall. we have to - * guess about the number of args; the docs discuss calling conventions - * up to 7, so let's try that? -@@ -92,3 +108,44 @@ static long wrap_syscall(long nr, va_lis - (void) ap; - return -1; - } -+ -+int -+prctl(int option, ...) { -+ int rc = -1; -+ va_list ap; -+ -+ if (!pseudo_check_wrappers() || !real_prctl) { -+ /* rc was initialized to the "failure" value */ -+ pseudo_enosys("prctl"); -+ return rc; -+ } -+ -+#ifdef SECCOMP_SET_MODE_FILTER -+ /* pseudo and seccomp are incompatible as pseudo uses different syscalls -+ * so pretend to enable seccomp but really do nothing */ -+ if (option == PR_SET_SECCOMP) { -+ unsigned long cmd; -+ va_start(ap, option); -+ cmd = va_arg(ap, unsigned long); -+ va_end(ap); -+ if (cmd == SECCOMP_SET_MODE_FILTER) { -+ return 0; -+ } -+ } -+#endif -+ -+ /* gcc magic to attempt to just pass these args to prctl. we have to -+ * guess about the number of args; the docs discuss calling conventions -+ * up to 5, so let's try that? -+ */ -+ void *res = __builtin_apply((void (*)()) real_prctl, __builtin_apply_args(), sizeof(long) * 5); -+ __builtin_return(res); -+} -+ -+/* unused. -+ */ -+static int wrap_prctl(int option, va_list ap) { -+ (void) option; -+ (void) ap; -+ return -1; -+} -Index: git/ports/linux/guts/prctl.c -=================================================================== ---- /dev/null -+++ git/ports/linux/guts/prctl.c -@@ -0,0 +1,15 @@ -+/* -+ * Copyright (c) 2020 Richard Purdie -+ * -+ * SPDX-License-Identifier: LGPL-2.1-only -+ * -+ * int prctl(int option, ...) -+ * int rc = -1; -+ */ -+ -+ /* we should never get here, prctl is hand-wrapped */ -+ rc = -1; -+ -+/* return rc; -+ * } -+ */ -Index: git/ports/linux/portdefs.h -=================================================================== ---- git.orig/ports/linux/portdefs.h -+++ git/ports/linux/portdefs.h -@@ -32,3 +32,5 @@ GLIBC_COMPAT_SYMBOL(memcpy,2.0); - - #include - #include -+#include -+#include -Index: git/ports/linux/wrapfuncs.in -=================================================================== ---- git.orig/ports/linux/wrapfuncs.in -+++ git/ports/linux/wrapfuncs.in -@@ -56,3 +56,4 @@ int getgrent_r(struct group *gbuf, char - int capset(cap_user_header_t hdrp, const cap_user_data_t datap); /* real_func=pseudo_capset */ - long syscall(long nr, ...); /* hand_wrapped=1 */ - int renameat2(int olddirfd, const char *oldpath, int newdirfd, const char *newpath, unsigned int flags); /* flags=AT_SYMLINK_NOFOLLOW */ -+int prctl(int option, ...); /* hand_wrapped=1 */ diff --git a/poky/meta/recipes-devtools/pseudo/files/toomanyfiles.patch b/poky/meta/recipes-devtools/pseudo/files/toomanyfiles.patch deleted file mode 100644 index bda7e4b20..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/toomanyfiles.patch +++ /dev/null @@ -1,71 +0,0 @@ -From b0b25fbc041a148d1de09f5a6503cd95973ec77c Mon Sep 17 00:00:00 2001 -From: Richard Purdie -Date: Tue, 25 Apr 2017 15:25:54 +0100 -Subject: [PATCH 3/3] pseudo: Handle too many files deadlock - -Currently if we max out the maximum number of files, pseudo can deadlock, unable to -accept new connections yet unable to move forward and unblock the other processes -waiting either. - -Rather than hang, when this happens, close out inactive connections, allowing us -to accept the new ones. The disconnected clients will simply reconnect. There is -a small risk of data loss here sadly but its better than hanging. - -RP -2017/4/25 - -Upstream-Status: Submitted [Peter is aware of the issue] - ---- - pseudo_server.c | 10 ++++++++++ - 1 file changed, 10 insertions(+) - -diff --git a/pseudo_server.c b/pseudo_server.c -index dac3258..15a3e8f 100644 ---- a/pseudo_server.c -+++ b/pseudo_server.c -@@ -802,6 +802,7 @@ pseudo_server_loop(void) { - struct sigaction eat_usr2 = { - .sa_handler = set_do_list_clients - }; -+ int hitmaxfiles; - - clients = malloc(16 * sizeof(*clients)); - -@@ -820,6 +821,7 @@ pseudo_server_loop(void) { - active_clients = 1; - max_clients = 16; - highest_client = 0; -+ hitmaxfiles = 0; - - pseudo_debug(PDBGF_SERVER, "server loop started.\n"); - if (listen_fd < 0) { -@@ -878,10 +880,15 @@ pseudo_server_loop(void) { - } else { - serve_client(i); - } -+ } else if (hitmaxfiles) { -+ /* Only close one per loop iteration in the interests of caution */ -+ close_client(i); -+ hitmaxfiles = 0; - } - if (die_forcefully) - break; - } -+ hitmaxfiles = 0; - if (!die_forcefully && - (FD_ISSET(clients[0].fd, &events) || - FD_ISSET(clients[0].fd, &reads))) { -@@ -903,6 +910,9 @@ pseudo_server_loop(void) { - */ - pseudo_server_timeout = DEFAULT_PSEUDO_SERVER_TIMEOUT; - die_peacefully = 0; -+ } else if (errno == EMFILE) { -+ hitmaxfiles = 1; -+ pseudo_debug(PDBGF_SERVER, "Hit max open files, dropping a client.\n"); - } - } - pseudo_debug(PDBGF_SERVER, "server loop complete [%d clients left]\n", active_clients); --- -2.15.1 - diff --git a/poky/meta/recipes-devtools/pseudo/files/xattr_version.patch b/poky/meta/recipes-devtools/pseudo/files/xattr_version.patch deleted file mode 100644 index a8b14bdd6..000000000 --- a/poky/meta/recipes-devtools/pseudo/files/xattr_version.patch +++ /dev/null @@ -1,54 +0,0 @@ -On a tumbleweed system, "install X Y" was showing the error: - -pseudo: ENOSYS for 'fsetxattr'. - -which was being caused by dlsym() for that function returning NULL. This -appears to be due to it finding an unresolved symbol in libacl for this -symbol in libattr. It hasn't been resolved so its NULL. dlerror() returns -nothing since this is a valid symbol entry, its just not the one we want. - -We can add the glibc version string for the symbol we actually want so we get -that version rather than the libattr/libacl one. - -To quote libattr: -""" - These dumb wrappers are for backwards compatibility only. - Actual syscall wrappers are long gone to libc. -""" -and they are simply wrappers around the libc version so our attaching -to the libc versions should intercept any accesses via these too. - -RP 2020/06/22 -Signed-off-by: Richard Purdie Date: Thu, 10 Dec 2015 13:20:30 +0200 Subject: [PATCH] Don't search /usr and so on for libraries by default to @@ -14,11 +14,11 @@ Signed-off-by: Alexander Kanavin 1 file changed, 5 insertions(+), 37 deletions(-) diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py -index ba2b1f4..f94dce1 100644 +index 3a6a7b2..6c2c3da 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py -@@ -278,45 +278,13 @@ if sys.platform == 'win32': - add_system_root(os.path.join(conda_dir, 'Library')) +@@ -309,45 +309,13 @@ if sys.platform == 'win32': + add_system_root(os.path.join(conda_dir, 'Library')) else: - default_lib_dirs = libpaths(['/usr/local/lib', '/opt/lib', '/usr/lib', @@ -68,3 +68,6 @@ index ba2b1f4..f94dce1 100644 if os.path.join(sys.prefix, 'lib') not in default_lib_dirs: default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib')) +-- +2.17.1 + diff --git a/poky/meta/recipes-devtools/python-numpy/files/0001-convert-shebang-from-python-to-python3.patch b/poky/meta/recipes-devtools/python-numpy/files/0001-convert-shebang-from-python-to-python3.patch deleted file mode 100644 index a41624442..000000000 --- a/poky/meta/recipes-devtools/python-numpy/files/0001-convert-shebang-from-python-to-python3.patch +++ /dev/null @@ -1,555 +0,0 @@ -From 00848d760fa3999e2bed759b585452b35d65d6ec Mon Sep 17 00:00:00 2001 -From: Changqing Li -Date: Thu, 5 Mar 2020 12:02:35 +0800 -Subject: [PATCH] convert shebang from python to python3 - -Upstream-Status: Backport -[https://github.com/numpy/numpy/commit/583901a074dc65145d3d6136ba7dcd02634d680b] - -Signed-off-by: Changqing Li - ---- - doc/DISTUTILS.rst.txt | 2 +- - doc/cdoc/numpyfilter.py | 2 +- - doc/postprocess.py | 2 +- - doc/summarize.py | 2 +- - numpy/distutils/conv_template.py | 2 +- - numpy/distutils/cpuinfo.py | 2 +- - numpy/distutils/from_template.py | 2 +- - numpy/distutils/setup.py | 2 +- - numpy/distutils/system_info.py | 2 +- - numpy/f2py/__init__.py | 2 +- - numpy/f2py/auxfuncs.py | 2 +- - numpy/f2py/capi_maps.py | 2 +- - numpy/f2py/cb_rules.py | 2 +- - numpy/f2py/cfuncs.py | 2 +- - numpy/f2py/common_rules.py | 2 +- - numpy/f2py/crackfortran.py | 2 +- - numpy/f2py/diagnose.py | 2 +- - numpy/f2py/f2py2e.py | 2 +- - numpy/f2py/f90mod_rules.py | 2 +- - numpy/f2py/func2subr.py | 2 +- - numpy/f2py/rules.py | 2 +- - numpy/f2py/setup.py | 2 +- - numpy/f2py/use_rules.py | 2 +- - numpy/linalg/lapack_lite/clapack_scrub.py | 2 +- - numpy/linalg/lapack_lite/make_lite.py | 2 +- - numpy/ma/bench.py | 2 +- - numpy/ma/setup.py | 2 +- - numpy/matrixlib/setup.py | 2 +- - numpy/random/_examples/cython/extending.pyx | 2 +- - numpy/random/_examples/cython/extending_distributions.pyx | 2 +- - numpy/setup.py | 2 +- - numpy/testing/print_coercion_tables.py | 2 +- - numpy/testing/setup.py | 2 +- - runtests.py | 2 +- - setup.py | 2 +- - tools/c_coverage/c_coverage_report.py | 2 +- - tools/changelog.py | 2 +- - tools/ci/push_docs_to_repo.py | 2 +- - tools/find_deprecated_escaped_characters.py | 2 +- - tools/refguide_check.py | 2 +- - tools/swig/test/setup.py | 2 +- - tools/swig/test/testArray.py | 2 +- - tools/swig/test/testFarray.py | 2 +- - tools/swig/test/testFlat.py | 2 +- - tools/swig/test/testFortran.py | 2 +- - tools/swig/test/testMatrix.py | 2 +- - tools/swig/test/testSuperTensor.py | 2 +- - tools/swig/test/testTensor.py | 2 +- - tools/swig/test/testVector.py | 2 +- - 49 files changed, 49 insertions(+), 49 deletions(-) - -diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt -index bcef825..bc1700f 100644 ---- a/doc/DISTUTILS.rst.txt -+++ b/doc/DISTUTILS.rst.txt -@@ -59,7 +59,7 @@ SciPy pure Python package example - - Below is an example of a minimal ``setup.py`` file for a pure SciPy package:: - -- #!/usr/bin/env python -+ #!/usr/bin/env python3 - def configuration(parent_package='',top_path=None): - from numpy.distutils.misc_util import Configuration - config = Configuration('mypackage',parent_package,top_path) -diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py -index 0ec5069..067bd36 100755 ---- a/doc/cdoc/numpyfilter.py -+++ b/doc/cdoc/numpyfilter.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - numpyfilter.py INPUTFILE - -diff --git a/doc/postprocess.py b/doc/postprocess.py -index 2e50c11..1be6f39 100755 ---- a/doc/postprocess.py -+++ b/doc/postprocess.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - %prog MODE FILES... - -diff --git a/doc/summarize.py b/doc/summarize.py -index cfce271..563af02 100755 ---- a/doc/summarize.py -+++ b/doc/summarize.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - summarize.py - -diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py -index 3bcb7b8..88432c8 100644 ---- a/numpy/distutils/conv_template.py -+++ b/numpy/distutils/conv_template.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - takes templated file .xxx.src and produces .xxx file where .xxx is - .i or .c or .h, using the following template rules -diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py -index bc97283..87502a9 100644 ---- a/numpy/distutils/cpuinfo.py -+++ b/numpy/distutils/cpuinfo.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - cpuinfo - -diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py -index c5c1163..af75971 100644 ---- a/numpy/distutils/from_template.py -+++ b/numpy/distutils/from_template.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - process_file(filename) -diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py -index 82a53bd..646921b 100644 ---- a/numpy/distutils/setup.py -+++ b/numpy/distutils/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - def configuration(parent_package='',top_path=None): -diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py -index 4f340b6..189081d 100644 ---- a/numpy/distutils/system_info.py -+++ b/numpy/distutils/system_info.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - This file defines a set of system_info classes for getting - information about various resources (libraries, library directories, -diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py -index 42e3632..09a3657 100644 ---- a/numpy/f2py/__init__.py -+++ b/numpy/f2py/__init__.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """Fortran to Python Interface Generator. - - """ -diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py -index 404bdbd..d23d959 100644 ---- a/numpy/f2py/auxfuncs.py -+++ b/numpy/f2py/auxfuncs.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Auxiliary functions for f2py2e. -diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py -index ce79f68..e5d3fd2 100644 ---- a/numpy/f2py/capi_maps.py -+++ b/numpy/f2py/capi_maps.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Copyright 1999,2000 Pearu Peterson all rights reserved, -diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py -index 183d7c2..93e93fe 100644 ---- a/numpy/f2py/cb_rules.py -+++ b/numpy/f2py/cb_rules.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Build call-back mechanism for f2py2e. -diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py -index ccb7b3a..cdb783d 100644 ---- a/numpy/f2py/cfuncs.py -+++ b/numpy/f2py/cfuncs.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - C declarations, CPP macros, and C functions for f2py2e. -diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py -index f61d881..fe510bf 100644 ---- a/numpy/f2py/common_rules.py -+++ b/numpy/f2py/common_rules.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Build common block mechanism for f2py2e. -diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py -index 2aaf5d7..fb5ef2f 100755 ---- a/numpy/f2py/crackfortran.py -+++ b/numpy/f2py/crackfortran.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - crackfortran --- read fortran (77,90) code and extract declaration information. - -diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py -index 0241fed..6c0304c 100644 ---- a/numpy/f2py/diagnose.py -+++ b/numpy/f2py/diagnose.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - import os -diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py -index d03eff9..c17bfd9 100755 ---- a/numpy/f2py/f2py2e.py -+++ b/numpy/f2py/f2py2e.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - f2py2e - Fortran to Python C/API generator. 2nd Edition. -diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py -index 85eae80..70be128 100644 ---- a/numpy/f2py/f90mod_rules.py -+++ b/numpy/f2py/f90mod_rules.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Build F90 module support for f2py2e. -diff --git a/numpy/f2py/func2subr.py b/numpy/f2py/func2subr.py -index 6010d5a..fdea0c2 100644 ---- a/numpy/f2py/func2subr.py -+++ b/numpy/f2py/func2subr.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Rules for building C/API module with f2py2e. -diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py -index f2f713b..f87b03c 100755 ---- a/numpy/f2py/rules.py -+++ b/numpy/f2py/rules.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Rules for building C/API module with f2py2e. -diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py -index a8c1401..2e7a517 100644 ---- a/numpy/f2py/setup.py -+++ b/numpy/f2py/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - setup.py for installing F2PY - -diff --git a/numpy/f2py/use_rules.py b/numpy/f2py/use_rules.py -index 6f44f16..8214f42 100644 ---- a/numpy/f2py/use_rules.py -+++ b/numpy/f2py/use_rules.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - - Build 'use others module data' mechanism for f2py2e. -diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py -index 4345861..91e66e7 100644 ---- a/numpy/linalg/lapack_lite/clapack_scrub.py -+++ b/numpy/linalg/lapack_lite/clapack_scrub.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - import sys, os -diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py -index 61102d6..0211f4e 100755 ---- a/numpy/linalg/lapack_lite/make_lite.py -+++ b/numpy/linalg/lapack_lite/make_lite.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - Usage: make_lite.py - -diff --git a/numpy/ma/bench.py b/numpy/ma/bench.py -index a9ba42d..a377436 100644 ---- a/numpy/ma/bench.py -+++ b/numpy/ma/bench.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - # -*- coding: utf-8 -*- - - from __future__ import division, print_function -diff --git a/numpy/ma/setup.py b/numpy/ma/setup.py -index d1d6c89..a04b79b 100644 ---- a/numpy/ma/setup.py -+++ b/numpy/ma/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - def configuration(parent_package='',top_path=None): -diff --git a/numpy/matrixlib/setup.py b/numpy/matrixlib/setup.py -index d0981d6..57534d1 100644 ---- a/numpy/matrixlib/setup.py -+++ b/numpy/matrixlib/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - def configuration(parent_package='', top_path=None): -diff --git a/numpy/random/_examples/cython/extending.pyx b/numpy/random/_examples/cython/extending.pyx -index 7a0dfe0..3a7f81a 100644 ---- a/numpy/random/_examples/cython/extending.pyx -+++ b/numpy/random/_examples/cython/extending.pyx -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - #cython: language_level=3 - - from libc.stdint cimport uint32_t -diff --git a/numpy/random/_examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx -index 1bef506..4da6a4b 100644 ---- a/numpy/random/_examples/cython/extending_distributions.pyx -+++ b/numpy/random/_examples/cython/extending_distributions.pyx -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - #cython: language_level=3 - """ - This file shows how the to use a BitGenerator to create a distribution. -diff --git a/numpy/setup.py b/numpy/setup.py -index 4ccdaee..db06c82 100644 ---- a/numpy/setup.py -+++ b/numpy/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - -diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py -index 72b22ce..1e9a301 100755 ---- a/numpy/testing/print_coercion_tables.py -+++ b/numpy/testing/print_coercion_tables.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """Prints type-coercion tables for the built-in NumPy types - - """ -diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py -index 7c3f2fb..bd315ee 100755 ---- a/numpy/testing/setup.py -+++ b/numpy/testing/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - -diff --git a/runtests.py b/runtests.py -index a38054f..383ddaa 100755 ---- a/runtests.py -+++ b/runtests.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - runtests.py [OPTIONS] [-- ARGS] - -diff --git a/setup.py b/setup.py -index d7f807b..705ea5d 100755 ---- a/setup.py -+++ b/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ NumPy is the fundamental package for array computing with Python. - - It provides: -diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py -index 327f6dc..8837684 100755 ---- a/tools/c_coverage/c_coverage_report.py -+++ b/tools/c_coverage/c_coverage_report.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - A script to create C code-coverage reports based on the output of - valgrind's callgrind tool. -diff --git a/tools/changelog.py b/tools/changelog.py -index b135b14..5d8b33c 100755 ---- a/tools/changelog.py -+++ b/tools/changelog.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - # -*- encoding:utf-8 -*- - """ - Script to generate contributor and pull request lists -diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py -index a989668..ae53054 100755 ---- a/tools/ci/push_docs_to_repo.py -+++ b/tools/ci/push_docs_to_repo.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - - import argparse - import subprocess -diff --git a/tools/find_deprecated_escaped_characters.py b/tools/find_deprecated_escaped_characters.py -index 6f90001..10e0378 100644 ---- a/tools/find_deprecated_escaped_characters.py -+++ b/tools/find_deprecated_escaped_characters.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - r""" - Look for escape sequences deprecated in Python 3.6. - -diff --git a/tools/refguide_check.py b/tools/refguide_check.py -index 2c62809..a19a29e 100644 ---- a/tools/refguide_check.py -+++ b/tools/refguide_check.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - """ - refguide_check.py [OPTIONS] [-- ARGS] - -diff --git a/tools/swig/test/setup.py b/tools/swig/test/setup.py -index 4ff870e..f8f05e6 100755 ---- a/tools/swig/test/setup.py -+++ b/tools/swig/test/setup.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - # System imports -diff --git a/tools/swig/test/testArray.py b/tools/swig/test/testArray.py -index 8d9c797..54ffe71 100755 ---- a/tools/swig/test/testArray.py -+++ b/tools/swig/test/testArray.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py -index e8bf711..b377f7c 100755 ---- a/tools/swig/test/testFarray.py -+++ b/tools/swig/test/testFarray.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py -index 71be277..55034bf 100755 ---- a/tools/swig/test/testFlat.py -+++ b/tools/swig/test/testFlat.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py -index 426e894..0f7d0e6 100644 ---- a/tools/swig/test/testFortran.py -+++ b/tools/swig/test/testFortran.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py -index 065be0d..854a23c 100755 ---- a/tools/swig/test/testMatrix.py -+++ b/tools/swig/test/testMatrix.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py -index 97fe80c..31b63d0 100644 ---- a/tools/swig/test/testSuperTensor.py -+++ b/tools/swig/test/testSuperTensor.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, print_function - - # System imports -diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py -index ac1b749..f47d9e8 100755 ---- a/tools/swig/test/testTensor.py -+++ b/tools/swig/test/testTensor.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports -diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py -index 45e763b..067b922 100755 ---- a/tools/swig/test/testVector.py -+++ b/tools/swig/test/testVector.py -@@ -1,4 +1,4 @@ --#! /usr/bin/env python -+#!/usr/bin/env python3 - from __future__ import division, absolute_import, print_function - - # System imports diff --git a/poky/meta/recipes-devtools/python-numpy/python-numpy.inc b/poky/meta/recipes-devtools/python-numpy/python-numpy.inc index 75309e308..e37ab399e 100644 --- a/poky/meta/recipes-devtools/python-numpy/python-numpy.inc +++ b/poky/meta/recipes-devtools/python-numpy/python-numpy.inc @@ -1,15 +1,14 @@ SUMMARY = "A sophisticated Numeric Processing Package for Python" SECTION = "devel/python" LICENSE = "BSD-3-Clause & BSD-2-Clause & PSF & Apache-2.0 & BSD & MIT" -LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=1a32aba007a415aa8a1c708a0e2b86a1" +LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=643d4e90100b7abe468c4db88127b895" SRCNAME = "numpy" SRC_URI = "https://github.com/${SRCNAME}/${SRCNAME}/releases/download/v${PV}/${SRCNAME}-${PV}.tar.gz \ file://0001-Don-t-search-usr-and-so-on-for-libraries-by-default-.patch \ - file://0001-convert-shebang-from-python-to-python3.patch \ " -SRC_URI[sha256sum] = "2c095bd1c5290966cceee8b6ef5cd66f13cd0e9d6d0e8d6fc8961abd64a8e51f" +SRC_URI[sha256sum] = "153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90" UPSTREAM_CHECK_URI = "https://github.com/numpy/numpy/releases" UPSTREAM_CHECK_REGEX = "(?P\d+(\.\d+)+)\.tar" @@ -20,7 +19,7 @@ S = "${WORKDIR}/numpy-${PV}" CLEANBROKEN = "1" -FILES_${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/numpy/core/lib/*.a" +FILES_${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/numpy/core/lib/*.a ${PYTHON_SITEPACKAGES_DIR}/numpy/random/lib/*.a" # install what is needed for numpy.test() RDEPENDS_${PN} = "${PYTHON_PN}-unittest \ diff --git a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.18.5.bb b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.18.5.bb deleted file mode 100644 index d388e88d2..000000000 --- a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.18.5.bb +++ /dev/null @@ -1,3 +0,0 @@ -inherit setuptools3 -require python-numpy.inc - diff --git a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb new file mode 100644 index 000000000..d388e88d2 --- /dev/null +++ b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb @@ -0,0 +1,3 @@ +inherit setuptools3 +require python-numpy.inc + diff --git a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch index 5f99cd251..201e3570d 100644 --- a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch +++ b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch @@ -1,4 +1,4 @@ -From a8f871c9ebc4fcb99d4163b226aabeef26567099 Mon Sep 17 00:00:00 2001 +From 234c3da52da09b28db5b2c4d33ebe9c800c461ac Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Tue, 17 Jul 2018 10:13:38 +0800 Subject: [PATCH] conditionally do not fetch code by easy_install @@ -15,10 +15,10 @@ Signed-off-by: Hongxu Jia 1 file changed, 5 insertions(+) diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py -index 426301d..55c8062 100644 +index 5a9576f..f5961cb 100644 --- a/setuptools/command/easy_install.py +++ b/setuptools/command/easy_install.py -@@ -649,6 +649,11 @@ class easy_install(Command): +@@ -656,6 +656,11 @@ class easy_install(Command): os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir)) def easy_install(self, spec, deps=False): diff --git a/poky/meta/recipes-devtools/python/python-cython.inc b/poky/meta/recipes-devtools/python/python-cython.inc index eee567ed2..a0cd2a5fb 100644 --- a/poky/meta/recipes-devtools/python/python-cython.inc +++ b/poky/meta/recipes-devtools/python/python-cython.inc @@ -7,7 +7,7 @@ LIC_FILES_CHKSUM = "file://LICENSE.txt;md5=e23fadd6ceef8c618fc1c65191d846fa" PYPI_PACKAGE = "Cython" BBCLASSEXTEND = "native nativesdk" -SRC_URI[sha256sum] = "97f98a7dc0d58ea833dc1f8f8b3ce07adf4c0f030d1886c5399a2135ed415258" +SRC_URI[sha256sum] = "22d91af5fc2253f717a1b80b8bb45acb655f643611983fd6f782b9423f8171c7" UPSTREAM_CHECK_REGEX = "Cython-(?P.*)\.tar" inherit pypi diff --git a/poky/meta/recipes-devtools/python/python-setuptools.inc b/poky/meta/recipes-devtools/python/python-setuptools.inc index 3222de707..ecf120575 100644 --- a/poky/meta/recipes-devtools/python/python-setuptools.inc +++ b/poky/meta/recipes-devtools/python/python-setuptools.inc @@ -10,12 +10,9 @@ inherit pypi SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch" -SRC_URI += "file://0001-change-shebang-to-python3.patch \ - file://0001-ScriptWriter-create-more-efficient-usr-bin-wrappers-signoff-included.patch \ - " +SRC_URI += "file://0001-change-shebang-to-python3.patch" -SRC_URI[md5sum] = "6e9de90b242fdd60ef59f497424ce13a" -SRC_URI[sha256sum] = "145fa62b9d7bb544fce16e9b5a9bf4ab2032d2f758b7cd674af09a92736aff74" +SRC_URI[sha256sum] = "843037738d1e34e8b326b5e061f474aca6ef9d7ece41329afbc8aac6195a3920" DEPENDS += "${PYTHON_PN}" diff --git a/poky/meta/recipes-devtools/python/python3-cython_0.29.19.bb b/poky/meta/recipes-devtools/python/python3-cython_0.29.19.bb deleted file mode 100644 index 2ce6bdbd6..000000000 --- a/poky/meta/recipes-devtools/python/python3-cython_0.29.19.bb +++ /dev/null @@ -1,18 +0,0 @@ -inherit setuptools3 -require python-cython.inc - -RDEPENDS_${PN} += "\ - python3-setuptools \ -" - -# running build_ext a second time during install fails, because Python -# would then attempt to import cythonized modules built for the target -# architecture. -DISTUTILS_INSTALL_ARGS += "--skip-build" - -do_install_append() { - # rename scripts that would conflict with the Python 2 build of Cython - mv ${D}${bindir}/cython ${D}${bindir}/cython3 - mv ${D}${bindir}/cythonize ${D}${bindir}/cythonize3 - mv ${D}${bindir}/cygdb ${D}${bindir}/cygdb3 -} diff --git a/poky/meta/recipes-devtools/python/python3-cython_0.29.20.bb b/poky/meta/recipes-devtools/python/python3-cython_0.29.20.bb new file mode 100644 index 000000000..2ce6bdbd6 --- /dev/null +++ b/poky/meta/recipes-devtools/python/python3-cython_0.29.20.bb @@ -0,0 +1,18 @@ +inherit setuptools3 +require python-cython.inc + +RDEPENDS_${PN} += "\ + python3-setuptools \ +" + +# running build_ext a second time during install fails, because Python +# would then attempt to import cythonized modules built for the target +# architecture. +DISTUTILS_INSTALL_ARGS += "--skip-build" + +do_install_append() { + # rename scripts that would conflict with the Python 2 build of Cython + mv ${D}${bindir}/cython ${D}${bindir}/cython3 + mv ${D}${bindir}/cythonize ${D}${bindir}/cythonize3 + mv ${D}${bindir}/cygdb ${D}${bindir}/cygdb3 +} diff --git a/poky/meta/recipes-devtools/python/python3-libarchive-c_2.9.bb b/poky/meta/recipes-devtools/python/python3-libarchive-c_2.9.bb index 4983ae527..3a2d8733e 100644 --- a/poky/meta/recipes-devtools/python/python3-libarchive-c_2.9.bb +++ b/poky/meta/recipes-devtools/python/python3-libarchive-c_2.9.bb @@ -12,6 +12,10 @@ inherit pypi setuptools3 SRC_URI[md5sum] = "083bd2cb0043c1e22a52cb9a05e31532" SRC_URI[sha256sum] = "9919344cec203f5db6596a29b5bc26b07ba9662925a05e24980b84709232ef60" -RDEPENDS_${PN} += "libarchive" +RDEPENDS_${PN} += "\ + libarchive \ + ${PYTHON_PN}-ctypes \ + ${PYTHON_PN}-mmap \ +" BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/python/python3-setuptools/0001-ScriptWriter-create-more-efficient-usr-bin-wrappers-signoff-included.patch b/poky/meta/recipes-devtools/python/python3-setuptools/0001-ScriptWriter-create-more-efficient-usr-bin-wrappers-signoff-included.patch deleted file mode 100644 index 7e931c845..000000000 --- a/poky/meta/recipes-devtools/python/python3-setuptools/0001-ScriptWriter-create-more-efficient-usr-bin-wrappers-signoff-included.patch +++ /dev/null @@ -1,62 +0,0 @@ -From aae8cd3de3f289cea3db01212579913c925191e8 Mon Sep 17 00:00:00 2001 -From: Lauri Tirkkonen -Date: Thu, 26 Mar 2020 14:24:25 +0000 -Subject: [PATCH] ScriptWriter: create more efficient /usr/bin wrappers - -Upstream setuptools writes scripts to /usr/bin that do insanely much -stuff at runtime. https://github.com/pypa/setuptools/issues/510 - -Since the script entry points are already known at build time, we can -just write those directly into the /usr/bin wrapper, avoiding the -expensive 'pkg_resources' import at runtime. The idea is from -https://github.com/ninjaaron/fast-entry_points but patched directly into -the native build of setuptools here, so that all Python modules under -bitbake automatically use it without needing additional build time -dependencies. - -Upstream-Status: Pending - -Signed-off-by: Lauri Tirkkonen -Signed-off-by: Trevor Gamblin ---- - setuptools/command/easy_install.py | 14 ++++++-------- - 1 file changed, 6 insertions(+), 8 deletions(-) - -diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py -index 8fba7b41..03a72714 100755 ---- a/setuptools/command/easy_install.py -+++ b/setuptools/command/easy_install.py -@@ -2023,17 +2023,12 @@ class ScriptWriter(object): - """ - - template = textwrap.dedent(r""" -- # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r -- __requires__ = %(spec)r -- import re - import sys -- from pkg_resources import load_entry_point -+ -+ from %(module)s import %(ep0)s - - if __name__ == '__main__': -- sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) -- sys.exit( -- load_entry_point(%(spec)r, %(group)r, %(name)r)() -- ) -+ sys.exit(%(entrypoint)s()) - """).lstrip() - - command_spec_class = CommandSpec -@@ -2068,6 +2063,9 @@ class ScriptWriter(object): - for type_ in 'console', 'gui': - group = type_ + '_scripts' - for name, ep in dist.get_entry_map(group).items(): -+ module = ep.module_name -+ ep0 = ep.attrs[0] -+ entrypoint = '.'.join(ep.attrs) - cls._ensure_safe_name(name) - script_text = cls.template % locals() - args = cls._get_script_args(type_, name, header, script_text) --- -2.24.1 - diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_47.1.1.bb b/poky/meta/recipes-devtools/python/python3-setuptools_47.1.1.bb deleted file mode 100644 index 0dc1ed862..000000000 --- a/poky/meta/recipes-devtools/python/python3-setuptools_47.1.1.bb +++ /dev/null @@ -1,6 +0,0 @@ -require python-setuptools.inc -inherit setuptools3 - -do_install_append() { - mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install -} diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb b/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb new file mode 100644 index 000000000..0dc1ed862 --- /dev/null +++ b/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb @@ -0,0 +1,6 @@ +require python-setuptools.inc +inherit setuptools3 + +do_install_append() { + mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install +} diff --git a/poky/meta/recipes-devtools/python/python3_3.8.3.bb b/poky/meta/recipes-devtools/python/python3_3.8.3.bb index 2eb2a422e..a7cfbad5c 100644 --- a/poky/meta/recipes-devtools/python/python3_3.8.3.bb +++ b/poky/meta/recipes-devtools/python/python3_3.8.3.bb @@ -350,6 +350,7 @@ FILES_${PN}-man = "${datadir}/man" # See https://bugs.python.org/issue18748 and https://bugs.python.org/issue37395 RDEPENDS_libpython3_append_libc-glibc = " libgcc" +RDEPENDS_${PN}-ctypes_append_libc-glibc = " ${MLPREFIX}ldconfig" RDEPENDS_${PN}-ptest = "${PN}-modules ${PN}-tests unzip bzip2 libgcc tzdata-europe coreutils sed" RDEPENDS_${PN}-ptest_append_libc-glibc = " locale-base-tr-tr.iso-8859-9" RDEPENDS_${PN}-tkinter += "${@bb.utils.contains('PACKAGECONFIG', 'tk', 'tk tk-lib', '', d)}" diff --git a/poky/meta/recipes-devtools/qemu/qemu.inc b/poky/meta/recipes-devtools/qemu/qemu.inc index 8b6157e69..d41cc8f20 100644 --- a/poky/meta/recipes-devtools/qemu/qemu.inc +++ b/poky/meta/recipes-devtools/qemu/qemu.inc @@ -31,6 +31,7 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \ file://0001-qemu-Do-not-include-file-if-not-exists.patch \ file://CVE-2020-13361.patch \ file://find_datadir.patch \ + file://CVE-2020-10761.patch \ " UPSTREAM_CHECK_REGEX = "qemu-(?P\d+(\.\d+)+)\.tar" @@ -139,7 +140,7 @@ PACKAGECONFIG_remove_darwin = "kvm virglrenderer glx gtk+" PACKAGECONFIG_remove_mingw32 = "kvm virglrenderer glx gtk+" PACKAGECONFIG[sdl] = "--enable-sdl,--disable-sdl,libsdl2" -PACKAGECONFIG[virtfs] = "--enable-virtfs --enable-attr,--disable-virtfs,libcap attr," +PACKAGECONFIG[virtfs] = "--enable-virtfs --enable-attr,--disable-virtfs,libcap-ng attr," PACKAGECONFIG[aio] = "--enable-linux-aio,--disable-linux-aio,libaio," PACKAGECONFIG[xfs] = "--enable-xfsctl,--disable-xfsctl,xfsprogs," PACKAGECONFIG[xen] = "--enable-xen,--disable-xen,xen-tools,xen-tools-libxenstore xen-tools-libxenctrl xen-tools-libxenguest" diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch new file mode 100644 index 000000000..19f26ae5b --- /dev/null +++ b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch @@ -0,0 +1,151 @@ +From 5c4fe018c025740fef4a0a4421e8162db0c3eefd Mon Sep 17 00:00:00 2001 +From: Eric Blake +Date: Mon, 8 Jun 2020 13:26:37 -0500 +Subject: [PATCH] nbd/server: Avoid long error message assertions + CVE-2020-10761 + +Ever since commit 36683283 (v2.8), the server code asserts that error +strings sent to the client are well-formed per the protocol by not +exceeding the maximum string length of 4096. At the time the server +first started sending error messages, the assertion could not be +triggered, because messages were completely under our control. +However, over the years, we have added latent scenarios where a client +could trigger the server to attempt an error message that would +include the client's information if it passed other checks first: + +- requesting NBD_OPT_INFO/GO on an export name that is not present + (commit 0cfae925 in v2.12 echoes the name) + +- requesting NBD_OPT_LIST/SET_META_CONTEXT on an export name that is + not present (commit e7b1948d in v2.12 echoes the name) + +At the time, those were still safe because we flagged names larger +than 256 bytes with a different message; but that changed in commit +93676c88 (v4.2) when we raised the name limit to 4096 to match the NBD +string limit. (That commit also failed to change the magic number +4096 in nbd_negotiate_send_rep_err to the just-introduced named +constant.) So with that commit, long client names appended to server +text can now trigger the assertion, and thus be used as a denial of +service attack against a server. As a mitigating factor, if the +server requires TLS, the client cannot trigger the problematic paths +unless it first supplies TLS credentials, and such trusted clients are +less likely to try to intentionally crash the server. + +We may later want to further sanitize the user-supplied strings we +place into our error messages, such as scrubbing out control +characters, but that is less important to the CVE fix, so it can be a +later patch to the new nbd_sanitize_name. + +Consideration was given to changing the assertion in +nbd_negotiate_send_rep_verr to instead merely log a server error and +truncate the message, to avoid leaving a latent path that could +trigger a future CVE DoS on any new error message. However, this +merely complicates the code for something that is already (correctly) +flagging coding errors, and now that we are aware of the long message +pitfall, we are less likely to introduce such errors in the future, +which would make such error handling dead code. + +Reported-by: Xueqiang Wei +CC: qemu-stable@nongnu.org +Fixes: https://bugzilla.redhat.com/1843684 CVE-2020-10761 +Fixes: 93676c88d7 +Signed-off-by: Eric Blake +Message-Id: <20200610163741.3745251-2-eblake@redhat.com> +Reviewed-by: Vladimir Sementsov-Ogievskiy + +Upstream-Status: Backport [https://github.com/qemu/qemu/commit/5c4fe018c025740fef4a0a4421e8162db0c3eefd] +CVE: CVE-2020-10761 +Signed-off-by: Chee Yang Lee + +--- + nbd/server.c | 23 ++++++++++++++++++++--- + tests/qemu-iotests/143 | 4 ++++ + tests/qemu-iotests/143.out | 2 ++ + 3 files changed, 26 insertions(+), 3 deletions(-) + +diff --git a/nbd/server.c b/nbd/server.c +index 02b1ed08014..20754e9ebc3 100644 +--- a/nbd/server.c ++++ b/nbd/server.c +@@ -217,7 +217,7 @@ nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, + + msg = g_strdup_vprintf(fmt, va); + len = strlen(msg); +- assert(len < 4096); ++ assert(len < NBD_MAX_STRING_SIZE); + trace_nbd_negotiate_send_rep_err(msg); + ret = nbd_negotiate_send_rep_len(client, type, len, errp); + if (ret < 0) { +@@ -231,6 +231,19 @@ nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, + return 0; + } + ++/* ++ * Return a malloc'd copy of @name suitable for use in an error reply. ++ */ ++static char * ++nbd_sanitize_name(const char *name) ++{ ++ if (strnlen(name, 80) < 80) { ++ return g_strdup(name); ++ } ++ /* XXX Should we also try to sanitize any control characters? */ ++ return g_strdup_printf("%.80s...", name); ++} ++ + /* Send an error reply. + * Return -errno on error, 0 on success. */ + static int GCC_FMT_ATTR(4, 5) +@@ -595,9 +608,11 @@ static int nbd_negotiate_handle_info(NBDClient *client, Error **errp) + + exp = nbd_export_find(name); + if (!exp) { ++ g_autofree char *sane_name = nbd_sanitize_name(name); ++ + return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN, + errp, "export '%s' not present", +- name); ++ sane_name); + } + + /* Don't bother sending NBD_INFO_NAME unless client requested it */ +@@ -995,8 +1010,10 @@ static int nbd_negotiate_meta_queries(NBDClient *client, + + meta->exp = nbd_export_find(export_name); + if (meta->exp == NULL) { ++ g_autofree char *sane_name = nbd_sanitize_name(export_name); ++ + return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp, +- "export '%s' not present", export_name); ++ "export '%s' not present", sane_name); + } + + ret = nbd_opt_read(client, &nb_queries, sizeof(nb_queries), errp); +diff --git a/tests/qemu-iotests/143 b/tests/qemu-iotests/143 +index f649b361950..d2349903b1b 100755 +--- a/tests/qemu-iotests/143 ++++ b/tests/qemu-iotests/143 +@@ -58,6 +58,10 @@ _send_qemu_cmd $QEMU_HANDLE \ + $QEMU_IO_PROG -f raw -c quit \ + "nbd+unix:///no_such_export?socket=$SOCK_DIR/nbd" 2>&1 \ + | _filter_qemu_io | _filter_nbd ++# Likewise, with longest possible name permitted in NBD protocol ++$QEMU_IO_PROG -f raw -c quit \ ++ "nbd+unix:///$(printf %4096d 1 | tr ' ' a)?socket=$SOCK_DIR/nbd" 2>&1 \ ++ | _filter_qemu_io | _filter_nbd | sed 's/aaaa*aa/aa--aa/' + + _send_qemu_cmd $QEMU_HANDLE \ + "{ 'execute': 'quit' }" \ +diff --git a/tests/qemu-iotests/143.out b/tests/qemu-iotests/143.out +index 1f4001c6013..fc9c0a761fa 100644 +--- a/tests/qemu-iotests/143.out ++++ b/tests/qemu-iotests/143.out +@@ -5,6 +5,8 @@ QA output created by 143 + {"return": {}} + qemu-io: can't open device nbd+unix:///no_such_export?socket=SOCK_DIR/nbd: Requested export not available + server reported: export 'no_such_export' not present ++qemu-io: can't open device nbd+unix:///aa--aa1?socket=SOCK_DIR/nbd: Requested export not available ++server reported: export 'aa--aa...' not present + { 'execute': 'quit' } + {"return": {}} + {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} diff --git a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9840.patch b/poky/meta/recipes-devtools/rsync/files/CVE-2016-9840.patch deleted file mode 100644 index 758188779..000000000 --- a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9840.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 6a043145ca6e9c55184013841a67b2fef87e44c0 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Wed, 21 Sep 2016 23:35:50 -0700 -Subject: [PATCH] Remove offset pointer optimization in inftrees.c. - -inftrees.c was subtracting an offset from a pointer to an array, -in order to provide a pointer that allowed indexing starting at -the offset. This is not compliant with the C standard, for which -the behavior of a pointer decremented before its allocated memory -is undefined. Per the recommendation of a security audit of the -zlib code by Trail of Bits and TrustInSoft, in support of the -Mozilla Foundation, this tiny optimization was removed, in order -to avoid the possibility of undefined behavior. - -CVE: CVE-2016-9840 -Upstream-Status: Backport -Signed-off-by: Anuj Mittal ---- - inftrees.c | 18 ++++++++---------- - 1 file changed, 8 insertions(+), 10 deletions(-) - -diff --git a/zlib/inftrees.c b/zlib/inftrees.c -index 22fcd666..0d2670d5 100644 ---- a/zlib/inftrees.c -+++ b/zlib/inftrees.c -@@ -54,7 +54,7 @@ unsigned short FAR *work; - code FAR *next; /* next available space in table */ - const unsigned short FAR *base; /* base value table to use */ - const unsigned short FAR *extra; /* extra bits table to use */ -- int end; /* use base and extra for symbol > end */ -+ unsigned match; /* use base and extra for symbol >= match */ - unsigned short count[MAXBITS+1]; /* number of codes of each length */ - unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ - static const unsigned short lbase[31] = { /* Length codes 257..285 base */ -@@ -181,19 +181,17 @@ unsigned short FAR *work; - switch (type) { - case CODES: - base = extra = work; /* dummy value--not used */ -- end = 19; -+ match = 20; - break; - case LENS: - base = lbase; -- base -= 257; - extra = lext; -- extra -= 257; -- end = 256; -+ match = 257; - break; - default: /* DISTS */ - base = dbase; - extra = dext; -- end = -1; -+ match = 0; - } - - /* initialize state for loop */ -@@ -216,13 +214,13 @@ unsigned short FAR *work; - for (;;) { - /* create table entry */ - here.bits = (unsigned char)(len - drop); -- if ((int)(work[sym]) < end) { -+ if (work[sym] + 1 < match) { - here.op = (unsigned char)0; - here.val = work[sym]; - } -- else if ((int)(work[sym]) > end) { -- here.op = (unsigned char)(extra[work[sym]]); -- here.val = base[work[sym]]; -+ else if (work[sym] >= match) { -+ here.op = (unsigned char)(extra[work[sym] - match]); -+ here.val = base[work[sym] - match]; - } - else { - here.op = (unsigned char)(32 + 64); /* end of block */ diff --git a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9841.patch b/poky/meta/recipes-devtools/rsync/files/CVE-2016-9841.patch deleted file mode 100644 index 3942176de..000000000 --- a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9841.patch +++ /dev/null @@ -1,228 +0,0 @@ -From 9aaec95e82117c1cb0f9624264c3618fc380cecb Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Wed, 21 Sep 2016 22:25:21 -0700 -Subject: [PATCH] Use post-increment only in inffast.c. - -An old inffast.c optimization turns out to not be optimal anymore -with modern compilers, and furthermore was not compliant with the -C standard, for which decrementing a pointer before its allocated -memory is undefined. Per the recommendation of a security audit of -the zlib code by Trail of Bits and TrustInSoft, in support of the -Mozilla Foundation, this "optimization" was removed, in order to -avoid the possibility of undefined behavior. - -CVE: CVE-2016-9841 -Upstream-Status: Backport -Signed-off-by: Anuj Mittal ---- - zlib/inffast.c | 81 +++++++++++++++++++++---------------------------------- - 1 file changed, 31 insertions(+), 50 deletions(-) - -diff --git a/zlib/inffast.c b/zlib/inffast.c -index bda59ceb..f0d163db 100644 ---- a/zlib/inffast.c -+++ b/zlib/inffast.c -@@ -10,25 +10,6 @@ - - #ifndef ASMINF - --/* Allow machine dependent optimization for post-increment or pre-increment. -- Based on testing to date, -- Pre-increment preferred for: -- - PowerPC G3 (Adler) -- - MIPS R5000 (Randers-Pehrson) -- Post-increment preferred for: -- - none -- No measurable difference: -- - Pentium III (Anderson) -- - M68060 (Nikl) -- */ --#ifdef POSTINC --# define OFF 0 --# define PUP(a) *(a)++ --#else --# define OFF 1 --# define PUP(a) *++(a) --#endif -- - /* - Decode literal, length, and distance codes and write out the resulting - literal and match bytes until either not enough input or output is -@@ -96,9 +77,9 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - - /* copy state to local variables */ - state = (struct inflate_state FAR *)strm->state; -- in = strm->next_in - OFF; -+ in = strm->next_in; - last = in + (strm->avail_in - 5); -- out = strm->next_out - OFF; -+ out = strm->next_out; - beg = out - (start - strm->avail_out); - end = out + (strm->avail_out - 257); - #ifdef INFLATE_STRICT -@@ -119,9 +100,9 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - input data or output space */ - do { - if (bits < 15) { -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; - } - here = lcode[hold & lmask]; -@@ -134,14 +115,14 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? - "inflate: literal '%c'\n" : - "inflate: literal 0x%02x\n", here.val)); -- PUP(out) = (unsigned char)(here.val); -+ *out++ = (unsigned char)(here.val); - } - else if (op & 16) { /* length base */ - len = (unsigned)(here.val); - op &= 15; /* number of extra bits */ - if (op) { - if (bits < op) { -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; - } - len += (unsigned)hold & ((1U << op) - 1); -@@ -150,9 +131,9 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - } - Tracevv((stderr, "inflate: length %u\n", len)); - if (bits < 15) { -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; - } - here = dcode[hold & dmask]; -@@ -165,10 +146,10 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - dist = (unsigned)(here.val); - op &= 15; /* number of extra bits */ - if (bits < op) { -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; - if (bits < op) { -- hold += (unsigned long)(PUP(in)) << bits; -+ hold += (unsigned long)(*in++) << bits; - bits += 8; - } - } -@@ -196,30 +177,30 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR - if (len <= op - whave) { - do { -- PUP(out) = 0; -+ *out++ = 0; - } while (--len); - continue; - } - len -= op - whave; - do { -- PUP(out) = 0; -+ *out++ = 0; - } while (--op > whave); - if (op == 0) { - from = out - dist; - do { -- PUP(out) = PUP(from); -+ *out++ = *from++; - } while (--len); - continue; - } - #endif - } -- from = window - OFF; -+ from = window; - if (wnext == 0) { /* very common case */ - from += wsize - op; - if (op < len) { /* some from window */ - len -= op; - do { -- PUP(out) = PUP(from); -+ *out++ = *from++; - } while (--op); - from = out - dist; /* rest from output */ - } -@@ -230,14 +211,14 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - if (op < len) { /* some from end of window */ - len -= op; - do { -- PUP(out) = PUP(from); -+ *out++ = *from++; - } while (--op); -- from = window - OFF; -+ from = window; - if (wnext < len) { /* some from start of window */ - op = wnext; - len -= op; - do { -- PUP(out) = PUP(from); -+ *out++ = *from++; - } while (--op); - from = out - dist; /* rest from output */ - } -@@ -248,35 +229,35 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - if (op < len) { /* some from window */ - len -= op; - do { -- PUP(out) = PUP(from); -+ *out++ = *from++; - } while (--op); - from = out - dist; /* rest from output */ - } - } - while (len > 2) { -- PUP(out) = PUP(from); -- PUP(out) = PUP(from); -- PUP(out) = PUP(from); -+ *out++ = *from++; -+ *out++ = *from++; -+ *out++ = *from++; - len -= 3; - } - if (len) { -- PUP(out) = PUP(from); -+ *out++ = *from++; - if (len > 1) -- PUP(out) = PUP(from); -+ *out++ = *from++; - } - } - else { - from = out - dist; /* copy direct from output */ - do { /* minimum length is three */ -- PUP(out) = PUP(from); -- PUP(out) = PUP(from); -- PUP(out) = PUP(from); -+ *out++ = *from++; -+ *out++ = *from++; -+ *out++ = *from++; - len -= 3; - } while (len > 2); - if (len) { -- PUP(out) = PUP(from); -+ *out++ = *from++; - if (len > 1) -- PUP(out) = PUP(from); -+ *out++ = *from++; - } - } - } -@@ -313,8 +294,8 @@ unsigned start; /* inflate()'s starting value for strm->avail_out */ - hold &= (1U << bits) - 1; - - /* update state and return */ -- strm->next_in = in + OFF; -- strm->next_out = out + OFF; -+ strm->next_in = in; -+ strm->next_out = out; - strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); - strm->avail_out = (unsigned)(out < end ? - 257 + (end - out) : 257 - (out - end)); diff --git a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9842.patch b/poky/meta/recipes-devtools/rsync/files/CVE-2016-9842.patch deleted file mode 100644 index 810d8a3fd..000000000 --- a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9842.patch +++ /dev/null @@ -1,33 +0,0 @@ -From e54e1299404101a5a9d0cf5e45512b543967f958 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Sat, 5 Sep 2015 17:45:55 -0700 -Subject: [PATCH] Avoid shifts of negative values inflateMark(). - -The C standard says that bit shifts of negative integers is -undefined. This casts to unsigned values to assure a known -result. - -CVE: CVE-2016-9842 -Upstream-Status: Backport -Signed-off-by: Anuj Mittal ---- - inflate.c | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/zlib/inflate.c b/zlib/inflate.c -index 2889e3a0..a7184167 100644 ---- a/zlib/inflate.c -+++ b/zlib/inflate.c -@@ -1506,9 +1506,10 @@ z_streamp strm; - { - struct inflate_state FAR *state; - -- if (strm == Z_NULL || strm->state == Z_NULL) return -1L << 16; -+ if (strm == Z_NULL || strm->state == Z_NULL) -+ return (long)(((unsigned long)0 - 1) << 16); - state = (struct inflate_state FAR *)strm->state; -- return ((long)(state->back) << 16) + -+ return (long)(((unsigned long)((long)state->back)) << 16) + - (state->mode == COPY ? state->length : - (state->mode == MATCH ? state->was - state->length : 0)); - } diff --git a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9843.patch b/poky/meta/recipes-devtools/rsync/files/CVE-2016-9843.patch deleted file mode 100644 index ea2e42fe7..000000000 --- a/poky/meta/recipes-devtools/rsync/files/CVE-2016-9843.patch +++ /dev/null @@ -1,53 +0,0 @@ -From d1d577490c15a0c6862473d7576352a9f18ef811 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Wed, 28 Sep 2016 20:20:25 -0700 -Subject: [PATCH] Avoid pre-decrement of pointer in big-endian CRC calculation. - -There was a small optimization for PowerPCs to pre-increment a -pointer when accessing a word, instead of post-incrementing. This -required prefacing the loop with a decrement of the pointer, -possibly pointing before the object passed. This is not compliant -with the C standard, for which decrementing a pointer before its -allocated memory is undefined. When tested on a modern PowerPC -with a modern compiler, the optimization no longer has any effect. -Due to all that, and per the recommendation of a security audit of -the zlib code by Trail of Bits and TrustInSoft, in support of the -Mozilla Foundation, this "optimization" was removed, in order to -avoid the possibility of undefined behavior. - -CVE: CVE-2016-9843 -Upstream-Status: Backport -Signed-off-by: Anuj Mittal ---- - crc32.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - -diff --git a/zlib/crc32.c b/zlib/crc32.c -index 979a7190..05733f4e 100644 ---- a/zlib/crc32.c -+++ b/zlib/crc32.c -@@ -278,7 +278,7 @@ local unsigned long crc32_little(crc, buf, len) - } - - /* ========================================================================= */ --#define DOBIG4 c ^= *++buf4; \ -+#define DOBIG4 c ^= *buf4++; \ - c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ - crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] - #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 -@@ -300,7 +300,6 @@ local unsigned long crc32_big(crc, buf, len) - } - - buf4 = (const z_crc_t FAR *)(const void FAR *)buf; -- buf4--; - while (len >= 32) { - DOBIG32; - len -= 32; -@@ -309,7 +308,6 @@ local unsigned long crc32_big(crc, buf, len) - DOBIG4; - len -= 4; - } -- buf4++; - buf = (const unsigned char FAR *)buf4; - - if (len) do { diff --git a/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch b/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch index 3d27fe72c..038a67209 100644 --- a/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch +++ b/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch @@ -1,20 +1,26 @@ +From 5ae38baadd40a996da3d19a147f37e7f1f3355bf Mon Sep 17 00:00:00 2001 +From: Ross Burton +Date: Tue, 12 Apr 2016 15:51:54 +0100 +Subject: [PATCH] rsync: remove upstream's rebuild logic + Remove the Makefile rules to reinvoke autoconf, they're not out-of-tree safe and generally overcomplicated, and we ensure that autoreconf is invoked if required. Upstream-Status: Inappropriate Signed-off-by: Ross Burton +--- + Makefile.in | 50 -------------------------------------------------- + 1 file changed, 50 deletions(-) + diff --git a/Makefile.in b/Makefile.in -index 151247d..8f3fdb6 100644 +index 31ddc43..41c9a93 100644 --- a/Makefile.in +++ b/Makefile.in -@@ -141,58 +141,6 @@ gen: conf proto.h man +@@ -167,56 +167,6 @@ gen: conf proto.h man gensend: gen - rsync -aivzc $(GENFILES) $${SAMBA_HOST-samba.org}:/home/ftp/pub/rsync/generated-files/ + rsync -aic $(GENFILES) $${SAMBA_HOST-samba.org}:/home/ftp/pub/rsync/generated-files/ --conf: -- cd $(srcdir) && $(MAKE) -f prepare-source.mak conf -- -aclocal.m4: $(srcdir)/m4/*.m4 - aclocal -I $(srcdir)/m4 - @@ -45,6 +51,7 @@ index 151247d..8f3fdb6 100644 - fi \ - fi - +-.PHONY: reconfigure -reconfigure: configure.sh - ./config.status --recheck - ./config.status @@ -64,6 +71,6 @@ index 151247d..8f3fdb6 100644 - fi \ - fi - - rsync-ssl: $(srcdir)/rsync-ssl.in Makefile - sed 's;\@bindir\@;$(bindir);g' <$(srcdir)/rsync-ssl.in >rsync-ssl - @chmod +x rsync-ssl + stunnel-rsyncd.conf: $(srcdir)/stunnel-rsyncd.conf.in Makefile + sed 's;\@bindir\@;$(bindir);g' <$(srcdir)/stunnel-rsyncd.conf.in >stunnel-rsyncd.conf + diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.1.3.bb b/poky/meta/recipes-devtools/rsync/rsync_3.1.3.bb deleted file mode 100644 index 152ff02a2..000000000 --- a/poky/meta/recipes-devtools/rsync/rsync_3.1.3.bb +++ /dev/null @@ -1,57 +0,0 @@ -SUMMARY = "File synchronization tool" -HOMEPAGE = "http://rsync.samba.org/" -BUGTRACKER = "http://rsync.samba.org/bugzilla.html" -SECTION = "console/network" -# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -DEPENDS = "popt" - -SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ - file://rsyncd.conf \ - file://makefile-no-rebuild.patch \ - file://CVE-2016-9840.patch \ - file://CVE-2016-9841.patch \ - file://CVE-2016-9842.patch \ - file://CVE-2016-9843.patch \ -" - -SRC_URI[md5sum] = "1581a588fde9d89f6bc6201e8129afaf" -SRC_URI[sha256sum] = "55cc554efec5fdaad70de921cd5a5eeb6c29a95524c715f3bbf849235b0800c0" - -# -16548 required for v3.1.3pre1. Already in v3.1.3. -CVE_CHECK_WHITELIST += " CVE-2017-16548 " - -inherit autotools - -PACKAGECONFIG ??= "acl attr \ - ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ -" - -PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," -PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," - -# By default, if crosscompiling, rsync disables a number of -# capabilities, hardlinking symlinks and special files (i.e. devices) -CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" - -EXTRA_OEMAKE = 'STRIP=""' - -# rsync 3.0 uses configure.sh instead of configure, and -# makefile checks the existence of configure.sh -do_configure_prepend () { - rm -f ${S}/configure ${S}/configure.sh -} - -do_configure_append () { - cp -f ${S}/configure ${S}/configure.sh -} - -do_install_append() { - install -d ${D}${sysconfdir} - install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} -} - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb b/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb new file mode 100644 index 000000000..ea6b1ce38 --- /dev/null +++ b/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb @@ -0,0 +1,58 @@ +SUMMARY = "File synchronization tool" +HOMEPAGE = "http://rsync.samba.org/" +BUGTRACKER = "http://rsync.samba.org/bugzilla.html" +SECTION = "console/network" +# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) +# Includes opennsh and xxhash dynamic link exception +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26" + +DEPENDS = "popt" + +SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ + file://rsyncd.conf \ + file://makefile-no-rebuild.patch \ + " + +SRC_URI[sha256sum] = "95f2dd62979b500a99b34c1a6453a0787ada0330e4bec7fcffad37b9062d58d3" + +# -16548 required for v3.1.3pre1. Already in v3.1.3. +CVE_CHECK_WHITELIST += " CVE-2017-16548 " + +inherit autotools-brokensep + +PACKAGECONFIG ??= "acl attr \ + ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ +" + +PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," +PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +PACKAGECONFIG[lz4] = "--enable-lz4,--disable-lz4,lz4" +PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" +PACKAGECONFIG[xxhash] = "--enable-xxhash,--disable-xxhash,xxhash" +PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd" + +# By default, if crosscompiling, rsync disables a number of +# capabilities, hardlinking symlinks and special files (i.e. devices) +CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" + +EXTRA_OEMAKE = 'STRIP=""' +EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm" + +# rsync 3.0 uses configure.sh instead of configure, and +# makefile checks the existence of configure.sh +do_configure_prepend () { + rm -f ${S}/configure ${S}/configure.sh +} + +do_configure_append () { + cp -f ${S}/configure ${S}/configure.sh +} + +do_install_append() { + install -d ${D}${sysconfdir} + install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} +} + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-extended/bzip2/bzip2/Makefile.am b/poky/meta/recipes-extended/bzip2/bzip2/Makefile.am index dcf64584d..d4498947e 100644 --- a/poky/meta/recipes-extended/bzip2/bzip2/Makefile.am +++ b/poky/meta/recipes-extended/bzip2/bzip2/Makefile.am @@ -46,6 +46,7 @@ runtest: else echo "FAIL: sample2 decompress"; fi @if cmp sample3.tst sample3.ref; then echo "PASS: sample3 decompress";\ else echo "FAIL: sample3 decompress"; fi + ./bzip2-tests/run-tests.sh --tests-dir="$(PWD)/bzip2-tests" install-ptest: sed -n '/^runtest:/,/^install-ptest:/{/^install-ptest:/!p}' \ diff --git a/poky/meta/recipes-extended/bzip2/bzip2_1.0.8.bb b/poky/meta/recipes-extended/bzip2/bzip2_1.0.8.bb index d58f553a4..70eb67f1f 100644 --- a/poky/meta/recipes-extended/bzip2/bzip2_1.0.8.bb +++ b/poky/meta/recipes-extended/bzip2/bzip2_1.0.8.bb @@ -4,10 +4,25 @@ Huffman coding. Compression is generally considerably better than that achieved LZ77/LZ78-based compressors, and approaches the performance of the PPM family of statistical compressors." HOMEPAGE = "https://sourceware.org/bzip2/" SECTION = "console/utils" -LICENSE = "bzip2-1.0.6" -LIC_FILES_CHKSUM = "file://LICENSE;beginline=4;endline=37;md5=600af43c50f1fcb82e32f19b32df4664" +LICENSE = "bzip2-1.0.6 & GPLv3+ & Apache-2.0 & MS-PL & BSD-3-Clause & Zlib" +LICENSE_${PN} = "bzip2-1.0.6" +LICENSE_${PN}-dev = "bzip2-1.0.6" +LICENSE_${PN}-dbg = "bzip2-1.0.6" +LICENSE_${PN}-doc = "bzip2-1.0.6" +LICENSE_${PN}-src = "bzip2-1.0.6" +LICENSE_libbz2 = "bzip2-1.0.6" +LICENSE_${PN}-ptest = "bzip2-1.0.6 & GPLv3+ & Apache-2.0 & MS-PL & BSD-3-Clause & Zlib" + +LIC_FILES_CHKSUM = "file://LICENSE;beginline=4;endline=37;md5=600af43c50f1fcb82e32f19b32df4664 \ + file://${WORKDIR}/git/commons-compress/LICENSE.txt;md5=86d3f3a95c324c9479bd8986968f4327 \ + file://${WORKDIR}/git/dotnetzip/License.txt;md5=9cb56871eed4e748c3bc7e8ff352a54f \ + file://${WORKDIR}/git/dotnetzip/License.zlib.txt;md5=cc421ccd22eeb2e5db6b79e6de0a029f \ + file://${WORKDIR}/git/go/LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707 \ + file://${WORKDIR}/git/lbzip2/COPYING;md5=d32239bcb673463ab874e80d47fae504 \ +" SRC_URI = "https://sourceware.org/pub/${BPN}/${BPN}-${PV}.tar.gz \ + git://sourceware.org/git/bzip2-tests.git;name=bzip2-tests \ file://configure.ac;subdir=${BP} \ file://Makefile.am;subdir=${BP} \ file://run-ptest \ @@ -15,6 +30,8 @@ SRC_URI = "https://sourceware.org/pub/${BPN}/${BPN}-${PV}.tar.gz \ SRC_URI[md5sum] = "67e051268d0c475ea773822f7500d0e5" SRC_URI[sha256sum] = "ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269" +SRCREV_bzip2-tests = "f9061c030a25de5b6829e1abf373057309c734c0" + UPSTREAM_CHECK_URI = "https://www.sourceware.org/pub/bzip2/" PACKAGES =+ "libbz2" @@ -34,12 +51,20 @@ do_configure_prepend () { } do_install_ptest () { + install -d ${D}${PTEST_PATH}/bzip2-tests + cp -r ${WORKDIR}/git/commons-compress ${D}${PTEST_PATH}/bzip2-tests/commons-compress + cp -r ${WORKDIR}/git/dotnetzip ${D}${PTEST_PATH}/bzip2-tests/dotnetzip + cp -r ${WORKDIR}/git/go ${D}${PTEST_PATH}/bzip2-tests/go + cp -r ${WORKDIR}/git/lbzip2 ${D}${PTEST_PATH}/bzip2-tests/lbzip2 + cp -r ${WORKDIR}/git/pyflate ${D}${PTEST_PATH}/bzip2-tests/pyflate + cp ${WORKDIR}/git/README ${D}${PTEST_PATH}/bzip2-tests/ + cp ${WORKDIR}/git/run-tests.sh ${D}${PTEST_PATH}/bzip2-tests/ sed -i -e "s|^Makefile:|_Makefile:|" ${D}${PTEST_PATH}/Makefile } FILES_libbz2 = "${libdir}/lib*${SOLIBS}" -RDEPENDS_${PN}-ptest += "make" +RDEPENDS_${PN}-ptest += "make bash" PROVIDES_append_class-native = " bzip2-replacement-native" BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/cpio/cpio_2.13.bb b/poky/meta/recipes-extended/cpio/cpio_2.13.bb index 9e35a80f8..94d86100c 100644 --- a/poky/meta/recipes-extended/cpio/cpio_2.13.bb +++ b/poky/meta/recipes-extended/cpio/cpio_2.13.bb @@ -23,7 +23,9 @@ do_install () { if [ "${base_bindir}" != "${bindir}" ]; then install -d ${D}${base_bindir}/ mv "${D}${bindir}/cpio" "${D}${base_bindir}/cpio" - rmdir ${D}${bindir}/ + if [ "${sbindir}" != "${bindir}" ]; then + rmdir ${D}${bindir}/ + fi fi # Avoid conflicts with the version from tar @@ -46,4 +48,4 @@ ALTERNATIVE_LINK_NAME[cpio] = "${base_bindir}/cpio" ALTERNATIVE_PRIORITY[rmt] = "50" ALTERNATIVE_LINK_NAME[rmt] = "${sbindir}/rmt" -BBCLASSEXTEND = "native" +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng/no_daddr_t.patch b/poky/meta/recipes-extended/stress-ng/stress-ng/no_daddr_t.patch index 877f4f062..dba4494b9 100644 --- a/poky/meta/recipes-extended/stress-ng/stress-ng/no_daddr_t.patch +++ b/poky/meta/recipes-extended/stress-ng/stress-ng/no_daddr_t.patch @@ -1,4 +1,7 @@ -Define daddr_t if __DADDR_T_TYPE is not defined +From 55e11765af2bdc8adfac87dab1fb2682f7e6c236 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 9 Jun 2020 22:10:28 -0700 +Subject: [PATCH] Define daddr_t if __DADDR_T_TYPE is not defined glibc defined daddr_t but musl does not, ideally it should not be used and simple int type is enough. However, its better to leave glibc behavior @@ -7,9 +10,16 @@ as it is and only define it to int if daddr_t is not provided by libc Upstream-Status: Pending Signed-off-by: Khem Raj + +--- + stress-ng.h | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/stress-ng.h b/stress-ng.h +index 1a66293..802dc25 100644 --- a/stress-ng.h +++ b/stress-ng.h -@@ -3750,6 +3750,10 @@ struct shim_statx { +@@ -3763,6 +3763,10 @@ struct shim_statx { uint64_t __spare2[14]; }; @@ -19,4 +29,4 @@ Signed-off-by: Khem Raj + /* old ustat struct */ struct shim_ustat { - daddr_t f_tfree; + #if defined(HAVE_DADDR_T) diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.12.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.12.bb deleted file mode 100644 index c00086c09..000000000 --- a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.12.bb +++ /dev/null @@ -1,26 +0,0 @@ -SUMMARY = "System load testing utility" -DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ -imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." -HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" - -SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ - file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ - file://no_daddr_t.patch \ - " -SRC_URI[sha256sum] = "0ccf437ca1876a3e8a55986c6481697045203a17f5994cb2f5096cd461d18031" - -DEPENDS = "coreutils-native" - -PROVIDES = "stress" -RPROVIDES_${PN} = "stress" -RREPLACES_${PN} = "stress" -RCONFLICTS_${PN} = "stress" - -inherit bash-completion - -do_install() { - oe_runmake DESTDIR=${D} install -} - diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.14.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.14.bb new file mode 100644 index 000000000..f1d91da2b --- /dev/null +++ b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.14.bb @@ -0,0 +1,26 @@ +SUMMARY = "System load testing utility" +DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ +imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." +HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" + +SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ + file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ + file://no_daddr_t.patch \ + " +SRC_URI[sha256sum] = "b21436fdbd9dc482a3fd95ae27cccf0097d0f226361ea3785215f7a4ad50136b" + +DEPENDS = "coreutils-native" + +PROVIDES = "stress" +RPROVIDES_${PN} = "stress" +RREPLACES_${PN} = "stress" +RCONFLICTS_${PN} = "stress" + +inherit bash-completion + +do_install() { + oe_runmake DESTDIR=${D} install +} + diff --git a/poky/meta/recipes-extended/sudo/sudo/0001-Include-sys-types.h-for-id_t-definition.patch b/poky/meta/recipes-extended/sudo/sudo/0001-Include-sys-types.h-for-id_t-definition.patch deleted file mode 100644 index eb36cd49b..000000000 --- a/poky/meta/recipes-extended/sudo/sudo/0001-Include-sys-types.h-for-id_t-definition.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 386e2c2fa2ab2e02ef71c268a57205139be329ab Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 31 Aug 2015 07:07:49 +0000 -Subject: [PATCH] Include sys/types.h for id_t definition - -/sudo_util.h:219:14: error: unknown type name 'id_t' - __dso_public id_t sudo_strtoid_v1(const char *str, const char *sep, - char **endp, const char **errstr); - ^ - make[1]: *** [preserve_fds.o] Error 1 - -Signed-off-by: Khem Raj ---- -Upstream-Status: Pending - - include/sudo_util.h | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/include/sudo_util.h b/include/sudo_util.h -index 89c9f89..ac0855a 100644 ---- a/include/sudo_util.h -+++ b/include/sudo_util.h -@@ -17,6 +17,8 @@ - #ifndef SUDO_UTIL_H - #define SUDO_UTIL_H - -+#include -+ - #ifdef HAVE_STDBOOL_H - # include - #else --- -2.5.1 - diff --git a/poky/meta/recipes-extended/sudo/sudo_1.9.0.bb b/poky/meta/recipes-extended/sudo/sudo_1.9.0.bb deleted file mode 100644 index 76266537b..000000000 --- a/poky/meta/recipes-extended/sudo/sudo_1.9.0.bb +++ /dev/null @@ -1,48 +0,0 @@ -require sudo.inc - -SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \ - ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \ - file://0001-Include-sys-types.h-for-id_t-definition.patch \ - " - -PAM_SRC_URI = "file://sudo.pam" - -SRC_URI[md5sum] = "060b91a6b171cb7ce587222664549b2c" -SRC_URI[sha256sum] = "ab231439c5dfdf4ecbef74f10d5f7e9686c2255c2f3887085b5c5e13281bf95c" - -DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}" -RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}" - -CACHED_CONFIGUREVARS = " \ - ac_cv_type_rsize_t=no \ - ac_cv_path_MVPROG=${base_bindir}/mv \ - ac_cv_path_BSHELLPROG=${base_bindir}/sh \ - ac_cv_path_SENDMAILPROG=${sbindir}/sendmail \ - ac_cv_path_VIPROG=${base_bindir}/vi \ - " - -EXTRA_OECONF += " \ - ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '--with-pam', '--without-pam', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '--enable-tmpfiles.d=${nonarch_libdir}/tmpfiles.d', '--disable-tmpfiles.d', d)} \ - --with-vardir=/var/lib/sudo \ - " - -do_install_append () { - if [ "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" ]; then - install -D -m 644 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo - if ${@bb.utils.contains('PACKAGECONFIG', 'pam-wheel', 'true', 'false', d)} ; then - echo 'auth required pam_wheel.so use_uid' >>${D}${sysconfdir}/pam.d/sudo - sed -i 's/# \(%wheel ALL=(ALL) ALL\)/\1/' ${D}${sysconfdir}/sudoers - fi - fi - - chmod 4111 ${D}${bindir}/sudo - chmod 0440 ${D}${sysconfdir}/sudoers - - # Explicitly remove the /run directory to avoid QA error - rmdir -p --ignore-fail-on-non-empty ${D}/run/sudo -} - -FILES_${PN} += "${nonarch_libdir}/tmpfiles.d" -FILES_${PN}-dev += "${libexecdir}/${BPN}/lib*${SOLIBSDEV} ${libexecdir}/${BPN}/*.la \ - ${libexecdir}/lib*${SOLIBSDEV} ${libexecdir}/*.la" diff --git a/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb b/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb new file mode 100644 index 000000000..aac505af6 --- /dev/null +++ b/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb @@ -0,0 +1,46 @@ +require sudo.inc + +SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \ + ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \ + " + +PAM_SRC_URI = "file://sudo.pam" + +SRC_URI[sha256sum] = "294116cefe10a02773917fc7440d8384b925955bc96a6e0eaa1977c83b34adff" + +DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}" +RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}" + +CACHED_CONFIGUREVARS = " \ + ac_cv_type_rsize_t=no \ + ac_cv_path_MVPROG=${base_bindir}/mv \ + ac_cv_path_BSHELLPROG=${base_bindir}/sh \ + ac_cv_path_SENDMAILPROG=${sbindir}/sendmail \ + ac_cv_path_VIPROG=${base_bindir}/vi \ + " + +EXTRA_OECONF += " \ + ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '--with-pam', '--without-pam', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '--enable-tmpfiles.d=${nonarch_libdir}/tmpfiles.d', '--disable-tmpfiles.d', d)} \ + --with-vardir=/var/lib/sudo \ + " + +do_install_append () { + if [ "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" ]; then + install -D -m 644 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo + if ${@bb.utils.contains('PACKAGECONFIG', 'pam-wheel', 'true', 'false', d)} ; then + echo 'auth required pam_wheel.so use_uid' >>${D}${sysconfdir}/pam.d/sudo + sed -i 's/# \(%wheel ALL=(ALL) ALL\)/\1/' ${D}${sysconfdir}/sudoers + fi + fi + + chmod 4111 ${D}${bindir}/sudo + chmod 0440 ${D}${sysconfdir}/sudoers + + # Explicitly remove the /sudo directory to avoid QA error + rmdir -p --ignore-fail-on-non-empty ${D}/sudo +} + +FILES_${PN} += "${nonarch_libdir}/tmpfiles.d" +FILES_${PN}-dev += "${libexecdir}/${BPN}/lib*${SOLIBSDEV} ${libexecdir}/${BPN}/*.la \ + ${libexecdir}/lib*${SOLIBSDEV} ${libexecdir}/*.la" diff --git a/poky/meta/recipes-gnome/gnome/adwaita-icon-theme_3.36.1.bb b/poky/meta/recipes-gnome/gnome/adwaita-icon-theme_3.36.1.bb index 473e1fc0b..2d78bea2c 100644 --- a/poky/meta/recipes-gnome/gnome/adwaita-icon-theme_3.36.1.bb +++ b/poky/meta/recipes-gnome/gnome/adwaita-icon-theme_3.36.1.bb @@ -4,7 +4,9 @@ BUGTRACKER = "https://gitlab.gnome.org/GNOME/adwaita-icon-theme/issues" SECTION = "x11/gnome" LICENSE = "LGPL-3.0 | CC-BY-SA-3.0" -LIC_FILES_CHKSUM = "file://COPYING;md5=c84cac88e46fc07647ea07e6c24eeb7c" +LIC_FILES_CHKSUM = "file://COPYING;md5=c84cac88e46fc07647ea07e6c24eeb7c \ + file://COPYING_CCBYSA3;md5=96143d33de3a79321b1006c4e8ed07e7 \ + file://COPYING_LGPL;md5=e6a600fd5e1d9cbde2d983680233ad02" inherit allarch autotools pkgconfig gettext gtk-icon-cache upstream-version-is-even diff --git a/poky/meta/recipes-gnome/gtk+/gtk+3.inc b/poky/meta/recipes-gnome/gtk+/gtk+3.inc index e1ecf504a..8d5edb7ee 100644 --- a/poky/meta/recipes-gnome/gtk+/gtk+3.inc +++ b/poky/meta/recipes-gnome/gtk+/gtk+3.inc @@ -1,3 +1,4 @@ + SUMMARY = "Multi-platform toolkit for creating GUIs" DESCRIPTION = "GTK+ is a multi-platform toolkit for creating graphical user interfaces. Offering a complete \ set of widgets, GTK+ is suitable for projects ranging from small one-off projects to complete application suites." @@ -31,6 +32,13 @@ do_configure_prepend() { ln -s ${TARGET_PREFIX}libtool libtool #delete a file that will get confused with generated one in ${B} rm -f ${S}/gtk/gtktypefuncs.c + + # These files are generated by wayland-scanner but will race over modification + # time between the copies in the sysroot from wayland-protocols and the copy + # in the source tree. Solve the race by deleting so they need to be regenerated. + # 3.24.22 will not be shipping these files so this can be deleted then: + # https://gitlab.gnome.org/GNOME/gtk/-/merge_requests/2183 + rm -f ${S}/modules/input/text-input-unstable-v3*.[ch] } EXTRA_OECONF += " \ diff --git a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.20.bb b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.20.bb deleted file mode 100644 index e5744a4b0..000000000 --- a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.20.bb +++ /dev/null @@ -1,19 +0,0 @@ -require gtk+3.inc - -MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" - -SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar.xz \ - file://0001-Hardcoded-libtool.patch \ - file://0002-Do-not-try-to-initialize-GL-without-libGL.patch \ - file://0003-Add-disable-opengl-configure-option.patch \ - file://link_fribidi.patch \ - " -SRC_URI[md5sum] = "b302acc0a4b42e2980ef18628f9ce951" -SRC_URI[sha256sum] = "2dac69f716e8d04ba7a95091589e2baaec95dcace932cb15839163db479b1df3" - -S = "${WORKDIR}/gtk+-${PV}" - -LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ - file://gtk/gtk.h;endline=25;md5=1d8dc0fccdbfa26287a271dce88af737 \ - file://gdk/gdk.h;endline=25;md5=c920ce39dc88c6f06d3e7c50e08086f2 \ - file://tests/testgtk.c;endline=25;md5=cb732daee1d82af7a2bf953cf3cf26f1" diff --git a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb new file mode 100644 index 000000000..70c0e66db --- /dev/null +++ b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb @@ -0,0 +1,19 @@ +require gtk+3.inc + +MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" + +SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar.xz \ + file://0001-Hardcoded-libtool.patch \ + file://0002-Do-not-try-to-initialize-GL-without-libGL.patch \ + file://0003-Add-disable-opengl-configure-option.patch \ + file://link_fribidi.patch \ + " +SRC_URI[md5sum] = "95afed6c860d27de827db66434d681da" +SRC_URI[sha256sum] = "aeea6ae7cd35e83dfc7699be716519faefca346c62e784dd1a37d9df94c08f52" + +S = "${WORKDIR}/gtk+-${PV}" + +LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ + file://gtk/gtk.h;endline=25;md5=1d8dc0fccdbfa26287a271dce88af737 \ + file://gdk/gdk.h;endline=25;md5=c920ce39dc88c6f06d3e7c50e08086f2 \ + file://tests/testgtk.c;endline=25;md5=cb732daee1d82af7a2bf953cf3cf26f1" diff --git a/poky/meta/recipes-graphics/cogl/cogl-1.0/0001-configure.ac-don-t-require-eglmesaext.h.patch b/poky/meta/recipes-graphics/cogl/cogl-1.0/0001-configure.ac-don-t-require-eglmesaext.h.patch new file mode 100644 index 000000000..328984a63 --- /dev/null +++ b/poky/meta/recipes-graphics/cogl/cogl-1.0/0001-configure.ac-don-t-require-eglmesaext.h.patch @@ -0,0 +1,92 @@ +From bb9765a926588ebfe1eb324fbbe8fc22d419eebe Mon Sep 17 00:00:00 2001 +From: Max Krummenacher +Date: Thu, 25 Jun 2020 11:27:40 +0000 +Subject: [PATCH] configure.ac: don't require eglmesaext.h + +E.g. the Vivante EGL implementation does not provide eglmesaext.h. + +The commit moves the check for header file existence outside of the +check for existence of a egl packageconfig and makes the existence +of eglmesaext.h optional. + +fixes commit fb1acfec ("Fix building against libglvnd-provided EGL headers") +Signed-off-by: Max Krummenacher + +Upstream-Status: Submitted [https://gitlab.gnome.org/GNOME/cogl/-/merge_requests/28] +Signed-off-by: Max Krummenacher +--- + configure.ac | 51 ++++++++++++++++++++++++++++++++------------------- + 1 file changed, 32 insertions(+), 19 deletions(-) + +diff --git a/configure.ac b/configure.ac +index b7ba95d..0d1d8de 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1212,22 +1212,6 @@ AS_IF([test "x$NEED_EGL" = "xyes" && test "x$EGL_CHECKED" != "xyes"], + PKG_CHECK_EXISTS([egl], + [COGL_PKG_REQUIRES="$COGL_PKG_REQUIRES egl"], + [ +- AC_CHECK_HEADERS( +- [EGL/egl.h], +- [], +- [AC_MSG_ERROR([Unable to locate required EGL headers])]) +- AC_CHECK_HEADERS( +- [EGL/eglext.h], +- [], +- [AC_MSG_ERROR([Unable to locate required EGL headers])], +- [#include ]) +- AC_CHECK_HEADERS( +- [EGL/eglmesaext.h], +- [], +- [AC_MSG_ERROR([Unable to locate required EGL headers])], +- [#include +-#include ]) +- + AC_CHECK_LIB(EGL, [eglInitialize], + [COGL_EXTRA_LDFLAGS="$COGL_EXTRA_LDFLAGS -lEGL"], + [AC_MSG_ERROR([Unable to locate required EGL library])]) +@@ -1236,9 +1220,38 @@ AS_IF([test "x$NEED_EGL" = "xyes" && test "x$EGL_CHECKED" != "xyes"], + ] + ) + +- COGL_EGL_INCLUDES="#include +-#include +-#include " ++dnl Test for the existence of egl headers. ++dnl egl*.h includes eglplatform.h which on __unix__ defaults to a X11 platform. ++dnl In that case AC_CHECK_HEADERS failes if X11 headers are not available. ++dnl Set the usual include guard define and, if the EGL implementation doesn't ++dnl use that guard fall back to USE_OZONE and EGL_NO_X11 platforms which don't ++dnl require additional headers. ++ AC_CHECK_HEADERS( ++ [EGL/egl.h], ++ [COGL_EGL_INCLUDES="#include "], ++ [AC_MSG_ERROR([Unable to locate required EGL headers])], ++ [#define __egl_h_ ++ #define USE_OZONE ++ #define EGL_NO_X11]) ++ AC_CHECK_HEADERS( ++ [EGL/eglext.h], ++ [COGL_EGL_INCLUDES="$COGL_EGL_INCLUDES ++#include "], ++ [AC_MSG_ERROR([Unable to locate required EGL headers])], ++ [#define __eglext_h_ ++ #define USE_OZONE ++ #define EGL_NO_X11 ++$COGL_EGL_INCLUDES]) ++ AC_CHECK_HEADERS( ++ [EGL/eglmesaext.h], ++ [COGL_EGL_INCLUDES="$COGL_EGL_INCLUDES ++#include "], ++ [], ++ [#define __eglmesaext_h_ ++#define USE_OZONE ++#define EGL_NO_X11 ++$COGL_EGL_INCLUDES]) ++ + AC_SUBST([COGL_EGL_INCLUDES]) + ]) + +-- +2.20.1 + diff --git a/poky/meta/recipes-graphics/cogl/cogl-1.0_1.22.8.bb b/poky/meta/recipes-graphics/cogl/cogl-1.0_1.22.8.bb index 78f6b0e7a..b9446fab3 100644 --- a/poky/meta/recipes-graphics/cogl/cogl-1.0_1.22.8.bb +++ b/poky/meta/recipes-graphics/cogl/cogl-1.0_1.22.8.bb @@ -1,5 +1,6 @@ require cogl-1.0.inc +SRC_URI += "file://0001-configure.ac-don-t-require-eglmesaext.h.patch" SRC_URI[archive.sha256sum] = "a805b2b019184710ff53d0496f9f0ce6dcca420c141a0f4f6fcc02131581d759" LIC_FILES_CHKSUM = "file://COPYING;md5=1b1a508d91d25ca607c83f92f3e31c84" diff --git a/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb b/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb deleted file mode 100644 index 62ad50427..000000000 --- a/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.4.bb +++ /dev/null @@ -1,59 +0,0 @@ -SUMMARY = "Hardware accelerated JPEG compression/decompression library" -DESCRIPTION = "libjpeg-turbo is a derivative of libjpeg that uses SIMD instructions (MMX, SSE2, NEON) to accelerate baseline JPEG compression and decompression" -HOMEPAGE = "http://libjpeg-turbo.org/" - -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://cdjpeg.h;endline=13;md5=8184bcc7c4ac7b9edc6a7bc00f231d0b \ - file://jpeglib.h;endline=16;md5=7ea97dc83b0f59052ee837e61ef0e08f \ - file://djpeg.c;endline=11;md5=c59e19811c006cb38f82d6477134d314 \ -" -DEPENDS_append_x86-64_class-target = " nasm-native" -DEPENDS_append_x86_class-target = " nasm-native" - -SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \ - file://0001-libjpeg-turbo-fix-package_qa-error.patch \ - " - -SRC_URI[md5sum] = "d01d9e0c28c27bc0de9f4e2e8ff49855" -SRC_URI[sha256sum] = "33dd8547efd5543639e890efbf2ef52d5a21df81faf41bb940657af916a23406" -UPSTREAM_CHECK_URI = "http://sourceforge.net/projects/libjpeg-turbo/files/" -UPSTREAM_CHECK_REGEX = "/libjpeg-turbo/files/(?P(\d+[\.\-_]*)+)/" - -PE = "1" - -# Drop-in replacement for jpeg -PROVIDES = "jpeg" -RPROVIDES_${PN} += "jpeg" -RREPLACES_${PN} += "jpeg" -RCONFLICTS_${PN} += "jpeg" - -inherit cmake pkgconfig - -export NASMENV = "--debug-prefix-map=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}" - -# Add nasm-native dependency consistently for all build arches is hard -EXTRA_OECMAKE_append_class-native = " -DWITH_SIMD=False" -EXTRA_OECMAKE_append_class-nativesdk = " -DWITH_SIMD=False" - -# Work around missing x32 ABI support -EXTRA_OECMAKE_append_class-target = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", "-DWITH_SIMD=False", "", d)}" - -# Work around missing non-floating point ABI support in MIPS -EXTRA_OECMAKE_append_class-target = " ${@bb.utils.contains("MIPSPKGSFX_FPU", "-nf", "-DWITH_SIMD=False", "", d)}" - -# Provide a workaround if Altivec unit is not present in PPC -EXTRA_OECMAKE_append_class-target_powerpc = " ${@bb.utils.contains("TUNE_FEATURES", "altivec", "", "-DWITH_SIMD=False", d)}" -EXTRA_OECMAKE_append_class-target_powerpc64 = " ${@bb.utils.contains("TUNE_FEATURES", "altivec", "", "-DWITH_SIMD=False", d)}" - -DEBUG_OPTIMIZATION_append_armv4 = " ${@bb.utils.contains('TUNE_CCARGS', '-mthumb', '-fomit-frame-pointer', '', d)}" -DEBUG_OPTIMIZATION_append_armv5 = " ${@bb.utils.contains('TUNE_CCARGS', '-mthumb', '-fomit-frame-pointer', '', d)}" - -PACKAGES =+ "jpeg-tools libturbojpeg" - -DESCRIPTION_jpeg-tools = "The jpeg-tools package includes client programs to access libjpeg functionality. These tools allow for the compression, decompression, transformation and display of JPEG files and benchmarking of the libjpeg library." -FILES_jpeg-tools = "${bindir}/*" - -DESCRIPTION_libturbojpeg = "A SIMD-accelerated JPEG codec which provides only TurboJPEG APIs" -FILES_libturbojpeg = "${libdir}/libturbojpeg.so.*" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.5.bb b/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.5.bb new file mode 100644 index 000000000..9729fb5dd --- /dev/null +++ b/poky/meta/recipes-graphics/jpeg/libjpeg-turbo_2.0.5.bb @@ -0,0 +1,59 @@ +SUMMARY = "Hardware accelerated JPEG compression/decompression library" +DESCRIPTION = "libjpeg-turbo is a derivative of libjpeg that uses SIMD instructions (MMX, SSE2, NEON) to accelerate baseline JPEG compression and decompression" +HOMEPAGE = "http://libjpeg-turbo.org/" + +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://cdjpeg.h;endline=13;md5=8184bcc7c4ac7b9edc6a7bc00f231d0b \ + file://jpeglib.h;endline=16;md5=7ea97dc83b0f59052ee837e61ef0e08f \ + file://djpeg.c;endline=11;md5=c59e19811c006cb38f82d6477134d314 \ +" +DEPENDS_append_x86-64_class-target = " nasm-native" +DEPENDS_append_x86_class-target = " nasm-native" + +SRC_URI = "${SOURCEFORGE_MIRROR}/${BPN}/${BPN}-${PV}.tar.gz \ + file://0001-libjpeg-turbo-fix-package_qa-error.patch \ + " + +SRC_URI[md5sum] = "3a7dc293918775fc933f81e2bce36464" +SRC_URI[sha256sum] = "16f8f6f2715b3a38ab562a84357c793dd56ae9899ce130563c72cd93d8357b5d" +UPSTREAM_CHECK_URI = "http://sourceforge.net/projects/libjpeg-turbo/files/" +UPSTREAM_CHECK_REGEX = "/libjpeg-turbo/files/(?P(\d+[\.\-_]*)+)/" + +PE = "1" + +# Drop-in replacement for jpeg +PROVIDES = "jpeg" +RPROVIDES_${PN} += "jpeg" +RREPLACES_${PN} += "jpeg" +RCONFLICTS_${PN} += "jpeg" + +inherit cmake pkgconfig + +export NASMENV = "--debug-prefix-map=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR}" + +# Add nasm-native dependency consistently for all build arches is hard +EXTRA_OECMAKE_append_class-native = " -DWITH_SIMD=False" +EXTRA_OECMAKE_append_class-nativesdk = " -DWITH_SIMD=False" + +# Work around missing x32 ABI support +EXTRA_OECMAKE_append_class-target = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", "-DWITH_SIMD=False", "", d)}" + +# Work around missing non-floating point ABI support in MIPS +EXTRA_OECMAKE_append_class-target = " ${@bb.utils.contains("MIPSPKGSFX_FPU", "-nf", "-DWITH_SIMD=False", "", d)}" + +# Provide a workaround if Altivec unit is not present in PPC +EXTRA_OECMAKE_append_class-target_powerpc = " ${@bb.utils.contains("TUNE_FEATURES", "altivec", "", "-DWITH_SIMD=False", d)}" +EXTRA_OECMAKE_append_class-target_powerpc64 = " ${@bb.utils.contains("TUNE_FEATURES", "altivec", "", "-DWITH_SIMD=False", d)}" + +DEBUG_OPTIMIZATION_append_armv4 = " ${@bb.utils.contains('TUNE_CCARGS', '-mthumb', '-fomit-frame-pointer', '', d)}" +DEBUG_OPTIMIZATION_append_armv5 = " ${@bb.utils.contains('TUNE_CCARGS', '-mthumb', '-fomit-frame-pointer', '', d)}" + +PACKAGES =+ "jpeg-tools libturbojpeg" + +DESCRIPTION_jpeg-tools = "The jpeg-tools package includes client programs to access libjpeg functionality. These tools allow for the compression, decompression, transformation and display of JPEG files and benchmarking of the libjpeg library." +FILES_jpeg-tools = "${bindir}/*" + +DESCRIPTION_libturbojpeg = "A SIMD-accelerated JPEG codec which provides only TurboJPEG APIs" +FILES_libturbojpeg = "${libdir}/libturbojpeg.so.*" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch b/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch index cd35a1f85..91e59d14e 100644 --- a/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch +++ b/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch @@ -1,4 +1,4 @@ -From df835389699b32bb6610b39972502e323f8e09e5 Mon Sep 17 00:00:00 2001 +From dd1d15c75f6ff8ee96cf1e7b74e582bff3183ef6 Mon Sep 17 00:00:00 2001 From: Alistair Francis Date: Thu, 14 Nov 2019 13:08:31 -0800 Subject: [PATCH] meson.build: make TLS ELF optional @@ -15,10 +15,10 @@ Signed-off-by: Alistair Francis 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/meson.build b/meson.build -index 09e3759..a954118 100644 +index 261b588..311436e 100644 --- a/meson.build +++ b/meson.build -@@ -387,7 +387,7 @@ if with_egl and not (with_platform_drm or with_platform_surfaceless or with_plat +@@ -392,7 +392,7 @@ if with_egl and not (with_platform_drm or with_platform_surfaceless or with_plat endif # Android uses emutls for versions <= P/28. For USE_ELF_TLS we need ELF TLS. @@ -28,10 +28,10 @@ index 09e3759..a954118 100644 endif diff --git a/meson_options.txt b/meson_options.txt -index 626baf3..637ff14 100644 +index ab43150..d7b1555 100644 --- a/meson_options.txt +++ b/meson_options.txt -@@ -341,6 +341,12 @@ option( +@@ -355,6 +355,12 @@ option( value : true, description : 'Enable direct rendering in GLX and EGL for DRI', ) @@ -42,5 +42,5 @@ index 626baf3..637ff14 100644 + description : 'Enable TLS support in ELF', +) option( - 'I-love-half-baked-turnips', + 'prefer-iris', type : 'boolean', diff --git a/poky/meta/recipes-graphics/mesa/files/0005-vc4-use-intmax_t-for-formatted-output-of-timespec-me.patch b/poky/meta/recipes-graphics/mesa/files/0005-vc4-use-intmax_t-for-formatted-output-of-timespec-me.patch index 901feca55..dacb1ea1c 100644 --- a/poky/meta/recipes-graphics/mesa/files/0005-vc4-use-intmax_t-for-formatted-output-of-timespec-me.patch +++ b/poky/meta/recipes-graphics/mesa/files/0005-vc4-use-intmax_t-for-formatted-output-of-timespec-me.patch @@ -1,4 +1,4 @@ -From 38a313474e127d61e749866423e708fc86ed9ec5 Mon Sep 17 00:00:00 2001 +From 281a636353666bfdd373c62591e744087e750e89 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 4 Dec 2019 14:15:28 -0800 Subject: [PATCH] vc4: use intmax_t for formatted output of timespec members @@ -13,13 +13,14 @@ error: format specifies type 'long' but the argument has type 'time_t' (aka 'lon Upstream-Status: Submitted [https://gitlab.freedesktop.org/mesa/mesa/merge_requests/2966] Signed-off-by: Khem Raj + --- src/gallium/drivers/v3d/v3d_bufmgr.c | 4 ++-- src/gallium/drivers/vc4/vc4_bufmgr.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/gallium/drivers/v3d/v3d_bufmgr.c b/src/gallium/drivers/v3d/v3d_bufmgr.c -index b3e4d053cc0..c514bf00bf0 100644 +index 31a0803..cc2e2af 100644 --- a/src/gallium/drivers/v3d/v3d_bufmgr.c +++ b/src/gallium/drivers/v3d/v3d_bufmgr.c @@ -80,8 +80,8 @@ v3d_bo_dump_stats(struct v3d_screen *screen) @@ -27,27 +28,24 @@ index b3e4d053cc0..c514bf00bf0 100644 struct timespec time; clock_gettime(CLOCK_MONOTONIC, &time); - fprintf(stderr, " now: %ld\n", -- time.tv_sec); +- (long)time.tv_sec); + fprintf(stderr, " now: %jd\n", + (intmax_t)time.tv_sec); } } diff --git a/src/gallium/drivers/vc4/vc4_bufmgr.c b/src/gallium/drivers/vc4/vc4_bufmgr.c -index 5ec360934c0..bf05f6cadd6 100644 +index a786e8e..975d49e 100644 --- a/src/gallium/drivers/vc4/vc4_bufmgr.c +++ b/src/gallium/drivers/vc4/vc4_bufmgr.c -@@ -107,8 +107,8 @@ vc4_bo_dump_stats(struct vc4_screen *screen) +@@ -99,8 +99,8 @@ vc4_bo_dump_stats(struct vc4_screen *screen) struct timespec time; clock_gettime(CLOCK_MONOTONIC, &time); - fprintf(stderr, " now: %ld\n", -- time.tv_sec); +- (long)time.tv_sec); + fprintf(stderr, " now: %jd\n", + (intmax_t)time.tv_sec); } } --- -2.24.0 - diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.0.7.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.0.7.bb deleted file mode 100644 index e50782be1..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa-gl_20.0.7.bb +++ /dev/null @@ -1,15 +0,0 @@ -require mesa.inc - -SUMMARY += " (OpenGL only, no EGL/GLES)" - -PROVIDES = "virtual/libgl virtual/mesa" - -S = "${WORKDIR}/mesa-${PV}" - -# At least one DRI rendering engine is required to build mesa. -# When no X11 is available, use osmesa for the rendering engine. -PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" -PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" - -# When NOT using X11, we need to make sure we have swrast available. -DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb new file mode 100644 index 000000000..e50782be1 --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb @@ -0,0 +1,15 @@ +require mesa.inc + +SUMMARY += " (OpenGL only, no EGL/GLES)" + +PROVIDES = "virtual/libgl virtual/mesa" + +S = "${WORKDIR}/mesa-${PV}" + +# At least one DRI rendering engine is required to build mesa. +# When no X11 is available, use osmesa for the rendering engine. +PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" +PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" + +# When NOT using X11, we need to make sure we have swrast available. +DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa.inc b/poky/meta/recipes-graphics/mesa/mesa.inc index 965e12d17..4fe5f025e 100644 --- a/poky/meta/recipes-graphics/mesa/mesa.inc +++ b/poky/meta/recipes-graphics/mesa/mesa.inc @@ -23,8 +23,7 @@ SRC_URI = "https://mesa.freedesktop.org/archive/mesa-${PV}.tar.xz \ file://0001-meson-misdetects-64bit-atomics-on-mips-clang.patch \ " -SRC_URI[md5sum] = "609cb7664204e031d4c65c602c652a28" -SRC_URI[sha256sum] = "fe6e258fe772c3cd2ac01741bf7408058c3ac02d66acff9a6e669bd72e3ea178" +SRC_URI[sha256sum] = "283dff72814c8a80ce1ff8271e3f055895d26f4da3f4362acc49193e635780cb" UPSTREAM_CHECK_GITTAGREGEX = "mesa-(?P\d+(\.\d+)+)" diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.0.7.bb b/poky/meta/recipes-graphics/mesa/mesa_20.0.7.bb deleted file mode 100644 index d17d9eb04..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa_20.0.7.bb +++ /dev/null @@ -1 +0,0 @@ -require ${BPN}.inc diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb b/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb new file mode 100644 index 000000000..96e8aa38d --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb @@ -0,0 +1,2 @@ +require ${BPN}.inc + diff --git a/poky/meta/recipes-graphics/piglit/piglit_git.bb b/poky/meta/recipes-graphics/piglit/piglit_git.bb index 47950c863..e531ffc40 100644 --- a/poky/meta/recipes-graphics/piglit/piglit_git.bb +++ b/poky/meta/recipes-graphics/piglit/piglit_git.bb @@ -10,7 +10,7 @@ SRC_URI = "git://gitlab.freedesktop.org/mesa/piglit.git;protocol=https \ " UPSTREAM_CHECK_COMMITS = "1" -SRCREV = "5cc4fd749375f37e8e8725a86072d8cff7173b98" +SRCREV = "7d76fca56e8ca72ab6809f6f0234b5971af5690a" # (when PV goes above 1.0 remove the trailing r) PV = "1.0+gitr${SRCPV}" diff --git a/poky/meta/recipes-graphics/xorg-app/xinit_1.4.1.bb b/poky/meta/recipes-graphics/xorg-app/xinit_1.4.1.bb index c9e28d9bb..301ea1c24 100644 --- a/poky/meta/recipes-graphics/xorg-app/xinit_1.4.1.bb +++ b/poky/meta/recipes-graphics/xorg-app/xinit_1.4.1.bb @@ -19,4 +19,7 @@ SRC_URI[sha256sum] = "de9b8f617b68a70f6caf87da01fcf0ebd2b75690cdcba9c921d0ef54fa EXTRA_OECONF = "ac_cv_path_MCOOKIE=${bindir}/mcookie" +PACKAGECONFIG ??= "rxvt" +PACKAGECONFIG[rxvt] = "--with-xterm=rxvt,,,rxvt-unicode" + RDEPENDS_${PN} += "util-linux-mcookie" diff --git a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200519.bb b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200519.bb deleted file mode 100644 index 18f44bcca..000000000 --- a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200519.bb +++ /dev/null @@ -1,946 +0,0 @@ -SUMMARY = "Firmware files for use with Linux kernel" -SECTION = "kernel" - -LICENSE = "\ - Firmware-Abilis \ - & Firmware-adsp_sst \ - & Firmware-agere \ - & Firmware-amdgpu \ - & Firmware-amd-ucode \ - & Firmware-amlogic_vdec \ - & Firmware-atheros_firmware \ - & Firmware-atmel \ - & Firmware-broadcom_bcm43xx \ - & Firmware-ca0132 \ - & Firmware-cavium \ - & Firmware-chelsio_firmware \ - & Firmware-cw1200 \ - & Firmware-cypress \ - & Firmware-dib0700 \ - & Firmware-e100 \ - & Firmware-ene_firmware \ - & Firmware-fw_sst_0f28 \ - & Firmware-go7007 \ - & Firmware-GPLv2 \ - & Firmware-hfi1_firmware \ - & Firmware-i2400m \ - & Firmware-i915 \ - & Firmware-ibt_firmware \ - & Firmware-ice \ - & Firmware-it913x \ - & Firmware-iwlwifi_firmware \ - & Firmware-IntcSST2 \ - & Firmware-kaweth \ - & Firmware-Marvell \ - & Firmware-moxa \ - & Firmware-myri10ge_firmware \ - & Firmware-netronome \ - & Firmware-nvidia \ - & Firmware-OLPC \ - & Firmware-ath9k-htc \ - & Firmware-phanfw \ - & Firmware-qat \ - & Firmware-qcom \ - & Firmware-qla1280 \ - & Firmware-qla2xxx \ - & Firmware-qualcommAthos_ar3k \ - & Firmware-qualcommAthos_ath10k \ - & Firmware-r8a779x_usb3 \ - & Firmware-radeon \ - & Firmware-ralink_a_mediatek_company_firmware \ - & Firmware-ralink-firmware \ - & Firmware-rtlwifi_firmware \ - & Firmware-imx-sdma_firmware \ - & Firmware-siano \ - & Firmware-tda7706-firmware \ - & Firmware-ti-connectivity \ - & Firmware-ti-keystone \ - & Firmware-ueagle-atm4-firmware \ - & Firmware-via_vt6656 \ - & Firmware-wl1251 \ - & Firmware-xc4000 \ - & Firmware-xc5000 \ - & Firmware-xc5000c \ - & WHENCE \ -" - -LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \ - file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \ - file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \ - file://LICENSE.amdgpu;md5=d357524f5099e2a3db3c1838921c593f \ - file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \ - file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \ - file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \ - file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \ - file://LICENCE.broadcom_bcm43xx;md5=3160c14df7228891b868060e1951dfbc \ - file://LICENCE.ca0132;md5=209b33e66ee5be0461f13d31da392198 \ - file://LICENCE.cadence;md5=009f46816f6956cfb75ede13d3e1cee0 \ - file://LICENCE.cavium;md5=c37aaffb1ebe5939b2580d073a95daea \ - file://LICENCE.chelsio_firmware;md5=819aa8c3fa453f1b258ed8d168a9d903 \ - file://LICENCE.cw1200;md5=f0f770864e7a8444a5c5aa9d12a3a7ed \ - file://LICENCE.cypress;md5=48cd9436c763bf873961f9ed7b5c147b \ - file://LICENSE.dib0700;md5=f7411825c8a555a1a3e5eab9ca773431 \ - file://LICENCE.e100;md5=ec0f84136766df159a3ae6d02acdf5a8 \ - file://LICENCE.ene_firmware;md5=ed67f0f62f8f798130c296720b7d3921 \ - file://LICENCE.fw_sst_0f28;md5=6353931c988ad52818ae733ac61cd293 \ - file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \ - file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \ - file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \ - file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \ - file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \ - file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \ - file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ - file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \ - file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \ - file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \ - file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \ - file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \ - file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \ - file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \ - file://LICENCE.Netronome;md5=4add08f2577086d44447996503cddf5f \ - file://LICENCE.nvidia;md5=4428a922ed3ba2ceec95f076a488ce07 \ - file://LICENCE.NXP;md5=58bb8ba632cd729b9ba6183bc6aed36f \ - file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \ - file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \ - file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \ - file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ - file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \ - file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \ - file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \ - file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \ - file://LICENSE.QualcommAtheros_ath10k;md5=cb42b686ee5f5cb890275e4321db60a8 \ - file://LICENCE.r8a779x_usb3;md5=4c1671656153025d7076105a5da7e498 \ - file://LICENSE.radeon;md5=68ec28bacb3613200bca44f404c69b16 \ - file://LICENCE.ralink_a_mediatek_company_firmware;md5=728f1a85fd53fd67fa8d7afb080bc435 \ - file://LICENCE.ralink-firmware.txt;md5=ab2c269277c45476fb449673911a2dfd \ - file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \ - file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \ - file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \ - file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \ - file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \ - file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \ - file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \ - file://LICENCE.via_vt6656;md5=e4159694cba42d4377a912e78a6e850f \ - file://LICENCE.wl1251;md5=ad3f81922bb9e197014bb187289d3b5b \ - file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \ - file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \ - file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \ - file://WHENCE;md5=c15cee50b1a59d27106a37c2929d5291 \ - " - -# These are not common licenses, set NO_GENERIC_LICENSE for them -# so that the license files will be copied from fetched source -NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENCE.Abilis" -NO_GENERIC_LICENSE[Firmware-adsp_sst] = "LICENCE.adsp_sst" -NO_GENERIC_LICENSE[Firmware-agere] = "LICENCE.agere" -NO_GENERIC_LICENSE[Firmware-amdgpu] = "LICENSE.amdgpu" -NO_GENERIC_LICENSE[Firmware-amd-ucode] = "LICENSE.amd-ucode" -NO_GENERIC_LICENSE[Firmware-amlogic_vdec] = "LICENSE.amlogic_vdec" -NO_GENERIC_LICENSE[Firmware-atheros_firmware] = "LICENCE.atheros_firmware" -NO_GENERIC_LICENSE[Firmware-atmel] = "LICENSE.atmel" -NO_GENERIC_LICENSE[Firmware-broadcom_bcm43xx] = "LICENCE.broadcom_bcm43xx" -NO_GENERIC_LICENSE[Firmware-ca0132] = "LICENCE.ca0132" -NO_GENERIC_LICENSE[Firmware-cadence] = "LICENCE.cadence" -NO_GENERIC_LICENSE[Firmware-cavium] = "LICENCE.cavium" -NO_GENERIC_LICENSE[Firmware-chelsio_firmware] = "LICENCE.chelsio_firmware" -NO_GENERIC_LICENSE[Firmware-cw1200] = "LICENCE.cw1200" -NO_GENERIC_LICENSE[Firmware-cypress] = "LICENCE.cypress" -NO_GENERIC_LICENSE[Firmware-dib0700] = "LICENSE.dib0700" -NO_GENERIC_LICENSE[Firmware-e100] = "LICENCE.e100" -NO_GENERIC_LICENSE[Firmware-ene_firmware] = "LICENCE.ene_firmware" -NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28" -NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007" -NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2" -NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware" -NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m" -NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915" -NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware" -NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice" -NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2" -NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x" -NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware" -NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth" -NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell" -NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek" -NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa" -NO_GENERIC_LICENSE[Firmware-myri10ge_firmware] = "LICENCE.myri10ge_firmware" -NO_GENERIC_LICENSE[Firmware-netronome] = "LICENCE.Netronome" -NO_GENERIC_LICENSE[Firmware-nvidia] = "LICENCE.nvidia" -NO_GENERIC_LICENSE[Firmware-OLPC] = "LICENCE.OLPC" -NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware" -NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw" -NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware" -NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom" -NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280" -NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx" -NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k" -NO_GENERIC_LICENSE[Firmware-qualcommAthos_ath10k] = "LICENSE.QualcommAtheros_ath10k" -NO_GENERIC_LICENSE[Firmware-r8a779x_usb3] = "LICENCE.r8a779x_usb3" -NO_GENERIC_LICENSE[Firmware-radeon] = "LICENSE.radeon" -NO_GENERIC_LICENSE[Firmware-ralink_a_mediatek_company_firmware] = "LICENCE.ralink_a_mediatek_company_firmware" -NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt" -NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt" -NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano" -NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware" -NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt" -NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity" -NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone" -NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware" -NO_GENERIC_LICENSE[Firmware-via_vt6656] = "LICENCE.via_vt6656" -NO_GENERIC_LICENSE[Firmware-wl1251] = "LICENCE.wl1251" -NO_GENERIC_LICENSE[Firmware-xc4000] = "LICENCE.xc4000" -NO_GENERIC_LICENSE[Firmware-xc5000] = "LICENCE.xc5000" -NO_GENERIC_LICENSE[Firmware-xc5000c] = "LICENCE.xc5000c" -NO_GENERIC_LICENSE[WHENCE] = "WHENCE" - -PE = "1" - -SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz" - -SRC_URI[sha256sum] = "584c49c58291136b966ceffb0a456a672c23d4d759bab8bf86cbbe28061e415e" - -inherit allarch - -CLEANBROKEN = "1" - -do_compile() { - : -} - -do_install() { - oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install - cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/ -} - - -PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \ - ${PN}-mt7601u-license ${PN}-mt7601u \ - ${PN}-radeon-license ${PN}-radeon \ - ${PN}-marvell-license ${PN}-pcie8897 ${PN}-pcie8997 \ - ${PN}-sd8686 ${PN}-sd8688 ${PN}-sd8787 ${PN}-sd8797 ${PN}-sd8801 ${PN}-sd8887 ${PN}-sd8897 \ - ${PN}-usb8997 \ - ${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \ - ${PN}-vt6656-license ${PN}-vt6656 \ - ${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \ - ${PN}-rtl8168 \ - ${PN}-cypress-license \ - ${PN}-broadcom-license \ - ${PN}-bcm-0bb4-0306 \ - ${PN}-bcm43143 \ - ${PN}-bcm43236b \ - ${PN}-bcm43241b0 \ - ${PN}-bcm43241b4 \ - ${PN}-bcm43241b5 \ - ${PN}-bcm43242a \ - ${PN}-bcm4329 \ - ${PN}-bcm4329-fullmac \ - ${PN}-bcm4330 \ - ${PN}-bcm4334 \ - ${PN}-bcm43340 \ - ${PN}-bcm4335 \ - ${PN}-bcm43362 \ - ${PN}-bcm4339 \ - ${PN}-bcm43430 \ - ${PN}-bcm43430a0 \ - ${PN}-bcm43455 \ - ${PN}-bcm4350 \ - ${PN}-bcm4350c2 \ - ${PN}-bcm4354 \ - ${PN}-bcm4356 \ - ${PN}-bcm4356-pcie \ - ${PN}-bcm43569 \ - ${PN}-bcm43570 \ - ${PN}-bcm4358 \ - ${PN}-bcm43602 \ - ${PN}-bcm4366b \ - ${PN}-bcm4366c \ - ${PN}-bcm4371 \ - ${PN}-bcm4373 \ - ${PN}-bcm43xx \ - ${PN}-bcm43xx-hdr \ - ${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k \ - ${PN}-gplv2-license ${PN}-carl9170 \ - ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-qca \ - \ - ${PN}-imx-sdma-license ${PN}-imx-sdma-imx6q ${PN}-imx-sdma-imx7d \ - \ - ${PN}-iwlwifi-license ${PN}-iwlwifi \ - ${PN}-iwlwifi-135-6 \ - ${PN}-iwlwifi-3160-7 ${PN}-iwlwifi-3160-8 ${PN}-iwlwifi-3160-9 \ - ${PN}-iwlwifi-3160-10 ${PN}-iwlwifi-3160-12 ${PN}-iwlwifi-3160-13 \ - ${PN}-iwlwifi-3160-16 ${PN}-iwlwifi-3160-17 \ - ${PN}-iwlwifi-6000-4 ${PN}-iwlwifi-6000g2a-5 ${PN}-iwlwifi-6000g2a-6 \ - ${PN}-iwlwifi-6000g2b-5 ${PN}-iwlwifi-6000g2b-6 \ - ${PN}-iwlwifi-6050-4 ${PN}-iwlwifi-6050-5 \ - ${PN}-iwlwifi-7260 \ - ${PN}-iwlwifi-7265 \ - ${PN}-iwlwifi-7265d ${PN}-iwlwifi-8000c ${PN}-iwlwifi-8265 \ - ${PN}-iwlwifi-9000 \ - ${PN}-iwlwifi-misc \ - ${PN}-ibt-license ${PN}-ibt ${PN}-ibt-misc \ - ${PN}-ibt-11-5 ${PN}-ibt-12-16 ${PN}-ibt-hw-37-7 ${PN}-ibt-hw-37-8 \ - ${PN}-ibt-17 \ - ${PN}-i915-license ${PN}-i915 \ - ${PN}-ice-license ${PN}-ice \ - ${PN}-adsp-sst-license ${PN}-adsp-sst \ - ${PN}-bnx2-mips \ - ${PN}-liquidio \ - ${PN}-netronome-license ${PN}-netronome \ - ${PN}-qat ${PN}-qat-license \ - ${PN}-qcom-license \ - ${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 \ - ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 ${PN}-qcom-adreno-a630 \ - ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \ - ${PN}-whence-license \ - ${PN}-license \ - " - -# For atheros -LICENSE_${PN}-ar9170 = "Firmware-atheros_firmware" -LICENSE_${PN}-ath6k = "Firmware-atheros_firmware" -LICENSE_${PN}-ath9k = "Firmware-atheros_firmware" -LICENSE_${PN}-atheros-license = "Firmware-atheros_firmware" - -FILES_${PN}-atheros-license = "${nonarch_base_libdir}/firmware/LICENCE.atheros_firmware" -FILES_${PN}-ar9170 = " \ - ${nonarch_base_libdir}/firmware/ar9170*.fw \ -" -FILES_${PN}-ath6k = " \ - ${nonarch_base_libdir}/firmware/ath6k \ -" -FILES_${PN}-ath9k = " \ - ${nonarch_base_libdir}/firmware/ar9271.fw \ - ${nonarch_base_libdir}/firmware/ar7010*.fw \ - ${nonarch_base_libdir}/firmware/htc_9271.fw \ - ${nonarch_base_libdir}/firmware/htc_7010.fw \ - ${nonarch_base_libdir}/firmware/ath9k_htc/htc_7010-1.4.0.fw \ - ${nonarch_base_libdir}/firmware/ath9k_htc/htc_9271-1.4.0.fw \ -" - -RDEPENDS_${PN}-ar9170 += "${PN}-atheros-license" -RDEPENDS_${PN}-ath6k += "${PN}-atheros-license" -RDEPENDS_${PN}-ath9k += "${PN}-atheros-license" - -# For carl9170 -LICENSE_${PN}-carl9170 = "Firmware-GPLv2" -LICENSE_${PN}-gplv2-license = "Firmware-GPLv2" - -FILES_${PN}-gplv2-license = "${nonarch_base_libdir}/firmware/GPL-2" -FILES_${PN}-carl9170 = " \ - ${nonarch_base_libdir}/firmware/carl9170*.fw \ -" - -RDEPENDS_${PN}-carl9170 += "${PN}-gplv2-license" - -# For QualCommAthos -LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k" -LICENSE_${PN}-ar3k-license = "Firmware-qualcommAthos_ar3k" -LICENSE_${PN}-ath10k = "Firmware-qualcommAthos_ath10k" -LICENSE_${PN}-ath10k-license = "Firmware-qualcommAthos_ath10k" -LICENSE_${PN}-qca = "Firmware-qualcommAthos_ath10k" - -FILES_${PN}-ar3k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ar3k" -FILES_${PN}-ar3k = " \ - ${nonarch_base_libdir}/firmware/ar3k \ -" - -FILES_${PN}-ath10k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ath10k" -FILES_${PN}-ath10k = " \ - ${nonarch_base_libdir}/firmware/ath10k \ -" - -FILES_${PN}-qca = " \ - ${nonarch_base_libdir}/firmware/qca \ -" - -RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license" -RDEPENDS_${PN}-ath10k += "${PN}-ath10k-license" -RDEPENDS_${PN}-qca += "${PN}-ath10k-license" - -# For ralink -LICENSE_${PN}-ralink = "Firmware-ralink-firmware" -LICENSE_${PN}-ralink-license = "Firmware-ralink-firmware" - -FILES_${PN}-ralink-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink-firmware.txt" -FILES_${PN}-ralink = " \ - ${nonarch_base_libdir}/firmware/rt*.bin \ -" - -RDEPENDS_${PN}-ralink += "${PN}-ralink-license" - -# For mediatek MT7601U -LICENSE_${PN}-mt7601u = "Firmware-ralink_a_mediatek_company_firmware" -LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware" - -FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware" -FILES_${PN}-mt7601u = " \ - ${nonarch_base_libdir}/firmware/mt7601u.bin \ -" - -RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license" - -# For radeon -LICENSE_${PN}-radeon = "Firmware-radeon" -LICENSE_${PN}-radeon-license = "Firmware-radeon" - -FILES_${PN}-radeon-license = "${nonarch_base_libdir}/firmware/LICENSE.radeon" -FILES_${PN}-radeon = " \ - ${nonarch_base_libdir}/firmware/radeon \ -" - -RDEPENDS_${PN}-radeon += "${PN}-radeon-license" - -# For marvell -LICENSE_${PN}-pcie8897 = "Firmware-Marvell" -LICENSE_${PN}-pcie8997 = "Firmware-Marvell" -LICENSE_${PN}-sd8686 = "Firmware-Marvell" -LICENSE_${PN}-sd8688 = "Firmware-Marvell" -LICENSE_${PN}-sd8787 = "Firmware-Marvell" -LICENSE_${PN}-sd8797 = "Firmware-Marvell" -LICENSE_${PN}-sd8801 = "Firmware-Marvell" -LICENSE_${PN}-sd8887 = "Firmware-Marvell" -LICENSE_${PN}-sd8897 = "Firmware-Marvell" -LICENSE_${PN}-usb8997 = "Firmware-Marvell" -LICENSE_${PN}-marvell-license = "Firmware-Marvell" - -FILES_${PN}-marvell-license = "${nonarch_base_libdir}/firmware/LICENCE.Marvell" -FILES_${PN}-pcie8897 = " \ - ${nonarch_base_libdir}/firmware/mrvl/pcie8897_uapsta.bin \ -" -FILES_${PN}-pcie8997 = " \ - ${nonarch_base_libdir}/firmware/mrvl/pcie8997_wlan_v4.bin \ - ${nonarch_base_libdir}/firmware/mrvl/pcieuart8997_combo_v4.bin \ - ${nonarch_base_libdir}/firmware/mrvl/pcieusb8997_combo_v4.bin \ -" -FILES_${PN}-sd8686 = " \ - ${nonarch_base_libdir}/firmware/libertas/sd8686_v9* \ - ${nonarch_base_libdir}/firmware/sd8686* \ -" -FILES_${PN}-sd8688 = " \ - ${nonarch_base_libdir}/firmware/libertas/sd8688* \ - ${nonarch_base_libdir}/firmware/mrvl/sd8688* \ -" -FILES_${PN}-sd8787 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8787_uapsta.bin \ -" -FILES_${PN}-sd8797 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8797_uapsta.bin \ -" -FILES_${PN}-sd8801 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8801_uapsta.bin \ -" -FILES_${PN}-sd8887 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8887_uapsta.bin \ -" -FILES_${PN}-sd8897 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8897_uapsta.bin \ -" -FILES_${PN}-usb8997 = " \ - ${nonarch_base_libdir}/firmware/mrvl/usbusb8997_combo_v4.bin \ -" - -RDEPENDS_${PN}-sd8686 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8688 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8787 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8797 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8801 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8887 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8897 += "${PN}-marvell-license" -RDEPENDS_${PN}-usb8997 += "${PN}-marvell-license" - -# For netronome -LICENSE_${PN}-netronome = "Firmware-netronome" - -FILES_${PN}-netronome-license = " \ - ${nonarch_base_libdir}/firmware/LICENCE.Netronome \ -" -FILES_${PN}-netronome = " \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0081*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0096*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0097*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0099*.nffw \ -" - -RDEPENDS_${PN}-netronome += "${PN}-netronome-license" - -# For rtl -LICENSE_${PN}-rtl8188 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192cu = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8168 = "WHENCE" - -FILES_${PN}-rtl-license = " \ - ${nonarch_base_libdir}/firmware/LICENCE.rtlwifi_firmware.txt \ -" -FILES_${PN}-rtl8188 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8188*.bin \ -" -FILES_${PN}-rtl8192cu = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cufw*.bin \ -" -FILES_${PN}-rtl8192ce = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cfw*.bin \ -" -FILES_${PN}-rtl8192su = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8712u.bin \ -" -FILES_${PN}-rtl8723 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8723*.bin \ -" -FILES_${PN}-rtl8821 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8821*.bin \ -" -FILES_${PN}-rtl8168 = " \ - ${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \ -" - -RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license" - -# For ti-connectivity -LICENSE_${PN}-wlcommon = "Firmware-ti-connectivity" -LICENSE_${PN}-wl12xx = "Firmware-ti-connectivity" -LICENSE_${PN}-wl18xx = "Firmware-ti-connectivity" -LICENSE_${PN}-ti-connectivity-license = "Firmware-ti-connectivity" - -FILES_${PN}-ti-connectivity-license = "${nonarch_base_libdir}/firmware/LICENCE.ti-connectivity" -# wl18xx optionally needs wl1271-nvs.bin (which itself is a symlink to -# wl127x-nvs.bin) - see linux/drivers/net/wireless/ti/wlcore/sdio.c -# and drivers/net/wireless/ti/wlcore/spi.c. -# While they're optional and actually only used to override the MAC -# address on wl18xx, driver loading will delay (by udev timout - 60s) -# if not there. So let's make it available always. Because it's a -# symlink, both need to go to wlcommon. -FILES_${PN}-wlcommon = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/TI* \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl127x-nvs.bin \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl1271-nvs.bin \ -" -FILES_${PN}-wl12xx = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl12* \ -" -FILES_${PN}-wl18xx = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl18* \ -" - -RDEPENDS_${PN}-wl12xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" -RDEPENDS_${PN}-wl18xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" - -# For vt6656 -LICENSE_${PN}-vt6656 = "Firmware-via_vt6656" -LICENSE_${PN}-vt6656-license = "Firmware-via_vt6656" - -FILES_${PN}-vt6656-license = "${nonarch_base_libdir}/firmware/LICENCE.via_vt6656" -FILES_${PN}-vt6656 = " \ - ${nonarch_base_libdir}/firmware/vntwusb.fw \ -" - -RDEPENDS_${PN}-vt6656 = "${PN}-vt6656-license" - -# For broadcom - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e " \${PN}-$pkg \\"; done | sort -u - -LICENSE_${PN}-broadcom-license = "Firmware-broadcom_bcm43xx" -FILES_${PN}-broadcom-license = "${nonarch_base_libdir}/firmware/LICENCE.broadcom_bcm43xx" - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo "$i - $pkg"; echo -e "FILES_\${PN}-$pkg = \"\${nonarch_base_libdir}/firmware/brcm/$i\""; done | grep ^FILES - -FILES_${PN}-bcm43xx = "${nonarch_base_libdir}/firmware/brcm/bcm43xx-0.fw" -FILES_${PN}-bcm43xx-hdr = "${nonarch_base_libdir}/firmware/brcm/bcm43xx_hdr-0.fw" -FILES_${PN}-bcm4329-fullmac = "${nonarch_base_libdir}/firmware/brcm/bcm4329-fullmac-4.bin" -FILES_${PN}-bcm43236b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43236b.bin" -FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bin" -FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*" -FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin" -FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin" -FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin" -FILES_${PN}-bcm43241b0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b0-sdio.bin" -FILES_${PN}-bcm43241b4 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b4-sdio.bin" -FILES_${PN}-bcm43241b5 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b5-sdio.bin" -FILES_${PN}-bcm43242a = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43242a.bin" -FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \ -" -FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*" -FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*" -FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin" -FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin" -FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin" -FILES_${PN}-bcm43569 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43569.bin" -FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin" -FILES_${PN}-bcm4358 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4358-pcie.bin" -FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \ -" -FILES_${PN}-bcm4366b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366b-pcie.bin" -FILES_${PN}-bcm4366c = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366c-pcie.bin" -FILES_${PN}-bcm4371 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4371-pcie.bin" - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e "LICENSE_\${PN}-$pkg = \"Firmware-broadcom_bcm43xx\"\nRDEPENDS_\${PN}-$pkg += \"\${PN}-broadcom-license\""; done -# Currently 1st one and last 6 have cypress LICENSE - -LICENSE_${PN}-bcm43xx = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43xx += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43xx-hdr = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43xx-hdr += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4329-fullmac = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4329-fullmac += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43236b = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43236b += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4329 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4329 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4330 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4330 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4334 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4334 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4335 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4335 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4339 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4339 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b0 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b0 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b4 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b4 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b5 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b5 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43242a = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43242a += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43143 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43143 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43430a0 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43430a0 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43455 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43455 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4350c2 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4350c2 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4350 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4350 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4356 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4356 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43569 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43569 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43570 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43570 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4358 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4358 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43602 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43602 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4366b = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4366b += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4366c = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4366c += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4371 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4371 += "${PN}-broadcom-license" - -# For broadcom cypress - -LICENSE_${PN}-cypress-license = "Firmware-cypress" -FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress" - -FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd" -FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*" -FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*" -FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*" -FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin" -FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*" -FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \ -" - -LICENSE_${PN}-bcm-0bb4-0306 = "Firmware-cypress" -RDEPENDS_${PN}-bcm-0bb4-0306 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43340 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43340 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43362 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43362 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43430 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43430 += "${PN}-cypress-license" -LICENSE_${PN}-bcm4354 = "Firmware-cypress" -RDEPENDS_${PN}-bcm4354 += "${PN}-cypress-license" -LICENSE_${PN}-bcm4356-pcie = "Firmware-cypress" -RDEPENDS_${PN}-bcm4356-pcie += "${PN}-cypress-license" -LICENSE_${PN}-bcm4373 = "Firmware-cypress" -RDEPENDS_${PN}-bcm4373 += "${PN}-cypress-license" - -# For Broadcom bnx2-mips -# -# which is a separate case to the other Broadcom firmwares since its -# license is contained in the shared WHENCE file. - -LICENSE_${PN}-bnx2-mips = "WHENCE" -LICENSE_${PN}-whence-license = "WHENCE" - -FILES_${PN}-bnx2-mips = "${nonarch_base_libdir}/firmware/bnx2/bnx2-mips-09-6.2.1b.fw" -FILES_${PN}-whence-license = "${nonarch_base_libdir}/firmware/WHENCE" - -RDEPENDS_${PN}-bnx2-mips += "${PN}-whence-license" - -# For imx-sdma -LICENSE_${PN}-imx-sdma-imx6q = "Firmware-imx-sdma_firmware" -LICENSE_${PN}-imx-sdma-imx7d = "Firmware-imx-sdma_firmware" -LICENSE_${PN}-imx-sdma-license = "Firmware-imx-sdma_firmware" - -FILES_${PN}-imx-sdma-imx6q = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx6q.bin" - -RPROVIDES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" -RREPLACES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" -RCONFLICTS_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" - -FILES_${PN}-imx-sdma-imx7d = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx7d.bin" - -FILES_${PN}-imx-sdma-license = "${nonarch_base_libdir}/firmware/LICENSE.sdma_firmware" - -RDEPENDS_${PN}-imx-sdma-imx6q += "${PN}-imx-sdma-license" -RDEPENDS_${PN}-imx-sdma-imx7d += "${PN}-imx-sdma-license" - -# For iwlwifi -LICENSE_${PN}-iwlwifi = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-135-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-7 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-8 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-9 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-10 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-12 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-13 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-16 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-17 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000-4 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2a-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2a-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2b-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2b-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6050-4 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6050-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7260 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7265 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7265d = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-8000c = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-8265 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-9000 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-misc = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-license = "Firmware-iwlwifi_firmware" - - -FILES_${PN}-iwlwifi-license = "${nonarch_base_libdir}/firmware/LICENCE.iwlwifi_firmware" -FILES_${PN}-iwlwifi-135-6 = "${nonarch_base_libdir}/firmware/iwlwifi-135-6.ucode" -FILES_${PN}-iwlwifi-3160-7 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-7.ucode" -FILES_${PN}-iwlwifi-3160-8 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-8.ucode" -FILES_${PN}-iwlwifi-3160-9 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-9.ucode" -FILES_${PN}-iwlwifi-3160-10 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-10.ucode" -FILES_${PN}-iwlwifi-3160-12 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-12.ucode" -FILES_${PN}-iwlwifi-3160-13 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-13.ucode" -FILES_${PN}-iwlwifi-3160-16 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-16.ucode" -FILES_${PN}-iwlwifi-3160-17 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-17.ucode" -FILES_${PN}-iwlwifi-6000-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6000-4.ucode" -FILES_${PN}-iwlwifi-6000g2a-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-5.ucode" -FILES_${PN}-iwlwifi-6000g2a-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-6.ucode" -FILES_${PN}-iwlwifi-6000g2b-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-5.ucode" -FILES_${PN}-iwlwifi-6000g2b-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-6.ucode" -FILES_${PN}-iwlwifi-6050-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-4.ucode" -FILES_${PN}-iwlwifi-6050-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-5.ucode" -FILES_${PN}-iwlwifi-7260 = "${nonarch_base_libdir}/firmware/iwlwifi-7260-*.ucode" -FILES_${PN}-iwlwifi-7265 = "${nonarch_base_libdir}/firmware/iwlwifi-7265-*.ucode" -FILES_${PN}-iwlwifi-7265d = "${nonarch_base_libdir}/firmware/iwlwifi-7265D-*.ucode" -FILES_${PN}-iwlwifi-8000c = "${nonarch_base_libdir}/firmware/iwlwifi-8000C-*.ucode" -FILES_${PN}-iwlwifi-8265 = "${nonarch_base_libdir}/firmware/iwlwifi-8265-*.ucode" -FILES_${PN}-iwlwifi-9000 = "${nonarch_base_libdir}/firmware/iwlwifi-9000-*.ucode" -FILES_${PN}-iwlwifi-misc = "${nonarch_base_libdir}/firmware/iwlwifi-*.ucode" - -RDEPENDS_${PN}-iwlwifi-135-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-7 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-8 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-9 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-10 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-12 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-13 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-16 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-17 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000-4 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2a-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2a-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2b-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2b-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6050-4 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6050-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7265d = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-8000c = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-8265 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-9000 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-misc = "${PN}-iwlwifi-license" - -# -iwlwifi-misc is a "catch all" package that includes all the iwlwifi -# firmwares that are not already included in other -iwlwifi- packages. -# -iwlwifi is a virtual package that depends upon all iwlwifi packages. -# These are distinct in order to allow the -misc firmwares to be installed -# without pulling in every other iwlwifi package. -ALLOW_EMPTY_${PN}-iwlwifi = "1" -ALLOW_EMPTY_${PN}-iwlwifi-misc = "1" - -# Handle package updating for the newly merged iwlwifi groupings -RPROVIDES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" -RREPLACES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" -RCONFLICTS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" - -RPROVIDES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" -RREPLACES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" -RCONFLICTS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" - -# For ibt -LICENSE_${PN}-ibt-license = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-hw-37-7 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-hw-37-8 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-11-5 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-12-16 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-17 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-misc = "Firmware-ibt_firmware" - -FILES_${PN}-ibt-license = "${nonarch_base_libdir}/firmware/LICENCE.ibt_firmware" -FILES_${PN}-ibt-hw-37-7 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.7*.bseq" -FILES_${PN}-ibt-hw-37-8 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.8*.bseq" -FILES_${PN}-ibt-11-5 = "${nonarch_base_libdir}/firmware/intel/ibt-11-5.sfi ${nonarch_base_libdir}/firmware/intel/ibt-11-5.ddc" -FILES_${PN}-ibt-12-16 = "${nonarch_base_libdir}/firmware/intel/ibt-12-16.sfi ${nonarch_base_libdir}/firmware/intel/ibt-12-16.ddc" -FILES_${PN}-ibt-17 = "${nonarch_base_libdir}/firmware/intel/ibt-17-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-17-*.ddc" -FILES_${PN}-ibt-misc = "${nonarch_base_libdir}/firmware/ibt-*" - -RDEPENDS_${PN}-ibt-hw-37-7 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-hw-37.8 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-11-5 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-12-16 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-17 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-misc = "${PN}-ibt-license" - -ALLOW_EMPTY_${PN}-ibt= "1" -ALLOW_EMPTY_${PN}-ibt-misc = "1" - -LICENSE_${PN}-i915 = "Firmware-i915" -LICENSE_${PN}-i915-license = "Firmware-i915" -FILES_${PN}-i915-license = "${nonarch_base_libdir}/firmware/LICENSE.i915" -FILES_${PN}-i915 = "${nonarch_base_libdir}/firmware/i915" -RDEPENDS_${PN}-i915 = "${PN}-i915-license" - -LICENSE_${PN}-ice = "Firmware-ice" -LICENSE_${PN}-ice-license = "Firmware-ice" -FILES_${PN}-ice-license = "${nonarch_base_libdir}/firmware/LICENSE.ice" -FILES_${PN}-ice = "${nonarch_base_libdir}/firmware/intel/ice" -RDEPENDS_${PN}-ice = "${PN}-ice-license" - -FILES_${PN}-adsp-sst-license = "${nonarch_base_libdir}/firmware/LICENCE.adsp_sst" -LICENSE_${PN}-adsp-sst = "Firmware-adsp_sst" -LICENSE_${PN}-adsp-sst-license = "Firmware-adsp_sst" -FILES_${PN}-adsp-sst = "${nonarch_base_libdir}/firmware/intel/dsp_fw*" -RDEPENDS_${PN}-adsp-sst = "${PN}-adsp-sst-license" - -# For QAT -LICENSE_${PN}-qat = "Firmware-qat" -LICENSE_${PN}-qat-license = "Firmware-qat" -FILES_${PN}-qat-license = "${nonarch_base_libdir}/firmware/LICENCE.qat_firmware" -FILES_${PN}-qat = "${nonarch_base_libdir}/firmware/qat*.bin" -RDEPENDS_${PN}-qat = "${PN}-qat-license" - -# For QCOM VPU/GPU and SDM845 -LICENSE_${PN}-qcom-license = "Firmware-qcom" -FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt" -FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*" -FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*" -FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw" -FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*" -FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*" -FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*" -FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*" -FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn" -RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-venus-4.2 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license" - -FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio" - -# For other firmwares -# Maybe split out to separate packages when needed. -LICENSE_${PN} = "\ - Firmware-Abilis \ - & Firmware-agere \ - & Firmware-amdgpu \ - & Firmware-amd-ucode \ - & Firmware-atmel \ - & Firmware-ca0132 \ - & Firmware-cavium \ - & Firmware-chelsio_firmware \ - & Firmware-cw1200 \ - & Firmware-dib0700 \ - & Firmware-e100 \ - & Firmware-ene_firmware \ - & Firmware-fw_sst_0f28 \ - & Firmware-go7007 \ - & Firmware-hfi1_firmware \ - & Firmware-i2400m \ - & Firmware-ibt_firmware \ - & Firmware-it913x \ - & Firmware-IntcSST2 \ - & Firmware-kaweth \ - & Firmware-moxa \ - & Firmware-myri10ge_firmware \ - & Firmware-nvidia \ - & Firmware-OLPC \ - & Firmware-ath9k-htc \ - & Firmware-phanfw \ - & Firmware-qat \ - & Firmware-qcom \ - & Firmware-qla1280 \ - & Firmware-qla2xxx \ - & Firmware-r8a779x_usb3 \ - & Firmware-radeon \ - & Firmware-ralink_a_mediatek_company_firmware \ - & Firmware-ralink-firmware \ - & Firmware-imx-sdma_firmware \ - & Firmware-siano \ - & Firmware-tda7706-firmware \ - & Firmware-ti-connectivity \ - & Firmware-ti-keystone \ - & Firmware-ueagle-atm4-firmware \ - & Firmware-wl1251 \ - & Firmware-xc4000 \ - & Firmware-xc5000 \ - & Firmware-xc5000c \ - & WHENCE \ -" - -FILES_${PN}-license += "${nonarch_base_libdir}/firmware/LICEN*" -FILES_${PN} += "${nonarch_base_libdir}/firmware/*" -RDEPENDS_${PN} += "${PN}-license" -RDEPENDS_${PN} += "${PN}-whence-license" - -# Make linux-firmware depend on all of the split-out packages. -# Make linux-firmware-iwlwifi depend on all of the split-out iwlwifi packages. -# Make linux-firmware-ibt depend on all of the split-out ibt packages. -python populate_packages_prepend () { - firmware_pkgs = oe.utils.packages_filter_out_system(d) - d.appendVar('RRECOMMENDS_linux-firmware', ' ' + ' '.join(firmware_pkgs)) - - iwlwifi_pkgs = filter(lambda x: x.find('-iwlwifi-') != -1, firmware_pkgs) - d.appendVar('RRECOMMENDS_linux-firmware-iwlwifi', ' ' + ' '.join(iwlwifi_pkgs)) - - ibt_pkgs = filter(lambda x: x.find('-ibt-') != -1, firmware_pkgs) - d.appendVar('RRECOMMENDS_linux-firmware-ibt', ' ' + ' '.join(ibt_pkgs)) -} - -# Firmware files are generally not ran on the CPU, so they can be -# allarch despite being architecture specific -INSANE_SKIP = "arch" diff --git a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb new file mode 100644 index 000000000..392e03a10 --- /dev/null +++ b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb @@ -0,0 +1,946 @@ +SUMMARY = "Firmware files for use with Linux kernel" +SECTION = "kernel" + +LICENSE = "\ + Firmware-Abilis \ + & Firmware-adsp_sst \ + & Firmware-agere \ + & Firmware-amdgpu \ + & Firmware-amd-ucode \ + & Firmware-amlogic_vdec \ + & Firmware-atheros_firmware \ + & Firmware-atmel \ + & Firmware-broadcom_bcm43xx \ + & Firmware-ca0132 \ + & Firmware-cavium \ + & Firmware-chelsio_firmware \ + & Firmware-cw1200 \ + & Firmware-cypress \ + & Firmware-dib0700 \ + & Firmware-e100 \ + & Firmware-ene_firmware \ + & Firmware-fw_sst_0f28 \ + & Firmware-go7007 \ + & Firmware-GPLv2 \ + & Firmware-hfi1_firmware \ + & Firmware-i2400m \ + & Firmware-i915 \ + & Firmware-ibt_firmware \ + & Firmware-ice \ + & Firmware-it913x \ + & Firmware-iwlwifi_firmware \ + & Firmware-IntcSST2 \ + & Firmware-kaweth \ + & Firmware-Marvell \ + & Firmware-moxa \ + & Firmware-myri10ge_firmware \ + & Firmware-netronome \ + & Firmware-nvidia \ + & Firmware-OLPC \ + & Firmware-ath9k-htc \ + & Firmware-phanfw \ + & Firmware-qat \ + & Firmware-qcom \ + & Firmware-qla1280 \ + & Firmware-qla2xxx \ + & Firmware-qualcommAthos_ar3k \ + & Firmware-qualcommAthos_ath10k \ + & Firmware-r8a779x_usb3 \ + & Firmware-radeon \ + & Firmware-ralink_a_mediatek_company_firmware \ + & Firmware-ralink-firmware \ + & Firmware-rtlwifi_firmware \ + & Firmware-imx-sdma_firmware \ + & Firmware-siano \ + & Firmware-tda7706-firmware \ + & Firmware-ti-connectivity \ + & Firmware-ti-keystone \ + & Firmware-ueagle-atm4-firmware \ + & Firmware-via_vt6656 \ + & Firmware-wl1251 \ + & Firmware-xc4000 \ + & Firmware-xc5000 \ + & Firmware-xc5000c \ + & WHENCE \ +" + +LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \ + file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \ + file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \ + file://LICENSE.amdgpu;md5=d357524f5099e2a3db3c1838921c593f \ + file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \ + file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \ + file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \ + file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \ + file://LICENCE.broadcom_bcm43xx;md5=3160c14df7228891b868060e1951dfbc \ + file://LICENCE.ca0132;md5=209b33e66ee5be0461f13d31da392198 \ + file://LICENCE.cadence;md5=009f46816f6956cfb75ede13d3e1cee0 \ + file://LICENCE.cavium;md5=c37aaffb1ebe5939b2580d073a95daea \ + file://LICENCE.chelsio_firmware;md5=819aa8c3fa453f1b258ed8d168a9d903 \ + file://LICENCE.cw1200;md5=f0f770864e7a8444a5c5aa9d12a3a7ed \ + file://LICENCE.cypress;md5=48cd9436c763bf873961f9ed7b5c147b \ + file://LICENSE.dib0700;md5=f7411825c8a555a1a3e5eab9ca773431 \ + file://LICENCE.e100;md5=ec0f84136766df159a3ae6d02acdf5a8 \ + file://LICENCE.ene_firmware;md5=ed67f0f62f8f798130c296720b7d3921 \ + file://LICENCE.fw_sst_0f28;md5=6353931c988ad52818ae733ac61cd293 \ + file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \ + file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \ + file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \ + file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \ + file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \ + file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \ + file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ + file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \ + file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \ + file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \ + file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \ + file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \ + file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \ + file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \ + file://LICENCE.Netronome;md5=4add08f2577086d44447996503cddf5f \ + file://LICENCE.nvidia;md5=4428a922ed3ba2ceec95f076a488ce07 \ + file://LICENCE.NXP;md5=58bb8ba632cd729b9ba6183bc6aed36f \ + file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \ + file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \ + file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \ + file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ + file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \ + file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \ + file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \ + file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \ + file://LICENSE.QualcommAtheros_ath10k;md5=cb42b686ee5f5cb890275e4321db60a8 \ + file://LICENCE.r8a779x_usb3;md5=4c1671656153025d7076105a5da7e498 \ + file://LICENSE.radeon;md5=68ec28bacb3613200bca44f404c69b16 \ + file://LICENCE.ralink_a_mediatek_company_firmware;md5=728f1a85fd53fd67fa8d7afb080bc435 \ + file://LICENCE.ralink-firmware.txt;md5=ab2c269277c45476fb449673911a2dfd \ + file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \ + file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \ + file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \ + file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \ + file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \ + file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \ + file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \ + file://LICENCE.via_vt6656;md5=e4159694cba42d4377a912e78a6e850f \ + file://LICENCE.wl1251;md5=ad3f81922bb9e197014bb187289d3b5b \ + file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \ + file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \ + file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \ + file://WHENCE;md5=d373d30188c38dabffec0d3cc87abbfd \ + " + +# These are not common licenses, set NO_GENERIC_LICENSE for them +# so that the license files will be copied from fetched source +NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENCE.Abilis" +NO_GENERIC_LICENSE[Firmware-adsp_sst] = "LICENCE.adsp_sst" +NO_GENERIC_LICENSE[Firmware-agere] = "LICENCE.agere" +NO_GENERIC_LICENSE[Firmware-amdgpu] = "LICENSE.amdgpu" +NO_GENERIC_LICENSE[Firmware-amd-ucode] = "LICENSE.amd-ucode" +NO_GENERIC_LICENSE[Firmware-amlogic_vdec] = "LICENSE.amlogic_vdec" +NO_GENERIC_LICENSE[Firmware-atheros_firmware] = "LICENCE.atheros_firmware" +NO_GENERIC_LICENSE[Firmware-atmel] = "LICENSE.atmel" +NO_GENERIC_LICENSE[Firmware-broadcom_bcm43xx] = "LICENCE.broadcom_bcm43xx" +NO_GENERIC_LICENSE[Firmware-ca0132] = "LICENCE.ca0132" +NO_GENERIC_LICENSE[Firmware-cadence] = "LICENCE.cadence" +NO_GENERIC_LICENSE[Firmware-cavium] = "LICENCE.cavium" +NO_GENERIC_LICENSE[Firmware-chelsio_firmware] = "LICENCE.chelsio_firmware" +NO_GENERIC_LICENSE[Firmware-cw1200] = "LICENCE.cw1200" +NO_GENERIC_LICENSE[Firmware-cypress] = "LICENCE.cypress" +NO_GENERIC_LICENSE[Firmware-dib0700] = "LICENSE.dib0700" +NO_GENERIC_LICENSE[Firmware-e100] = "LICENCE.e100" +NO_GENERIC_LICENSE[Firmware-ene_firmware] = "LICENCE.ene_firmware" +NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28" +NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007" +NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2" +NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware" +NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m" +NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915" +NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware" +NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice" +NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2" +NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x" +NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware" +NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth" +NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell" +NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek" +NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa" +NO_GENERIC_LICENSE[Firmware-myri10ge_firmware] = "LICENCE.myri10ge_firmware" +NO_GENERIC_LICENSE[Firmware-netronome] = "LICENCE.Netronome" +NO_GENERIC_LICENSE[Firmware-nvidia] = "LICENCE.nvidia" +NO_GENERIC_LICENSE[Firmware-OLPC] = "LICENCE.OLPC" +NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware" +NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw" +NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware" +NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom" +NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280" +NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx" +NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k" +NO_GENERIC_LICENSE[Firmware-qualcommAthos_ath10k] = "LICENSE.QualcommAtheros_ath10k" +NO_GENERIC_LICENSE[Firmware-r8a779x_usb3] = "LICENCE.r8a779x_usb3" +NO_GENERIC_LICENSE[Firmware-radeon] = "LICENSE.radeon" +NO_GENERIC_LICENSE[Firmware-ralink_a_mediatek_company_firmware] = "LICENCE.ralink_a_mediatek_company_firmware" +NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt" +NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt" +NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano" +NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware" +NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt" +NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity" +NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone" +NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware" +NO_GENERIC_LICENSE[Firmware-via_vt6656] = "LICENCE.via_vt6656" +NO_GENERIC_LICENSE[Firmware-wl1251] = "LICENCE.wl1251" +NO_GENERIC_LICENSE[Firmware-xc4000] = "LICENCE.xc4000" +NO_GENERIC_LICENSE[Firmware-xc5000] = "LICENCE.xc5000" +NO_GENERIC_LICENSE[Firmware-xc5000c] = "LICENCE.xc5000c" +NO_GENERIC_LICENSE[WHENCE] = "WHENCE" + +PE = "1" + +SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz" + +SRC_URI[sha256sum] = "962d3ae197d226c8259f9cc7746f7ef12a9d23787cd56bd27302021ba6339722" + +inherit allarch + +CLEANBROKEN = "1" + +do_compile() { + : +} + +do_install() { + oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install + cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/ +} + + +PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \ + ${PN}-mt7601u-license ${PN}-mt7601u \ + ${PN}-radeon-license ${PN}-radeon \ + ${PN}-marvell-license ${PN}-pcie8897 ${PN}-pcie8997 \ + ${PN}-sd8686 ${PN}-sd8688 ${PN}-sd8787 ${PN}-sd8797 ${PN}-sd8801 ${PN}-sd8887 ${PN}-sd8897 \ + ${PN}-usb8997 \ + ${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \ + ${PN}-vt6656-license ${PN}-vt6656 \ + ${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \ + ${PN}-rtl8168 \ + ${PN}-cypress-license \ + ${PN}-broadcom-license \ + ${PN}-bcm-0bb4-0306 \ + ${PN}-bcm43143 \ + ${PN}-bcm43236b \ + ${PN}-bcm43241b0 \ + ${PN}-bcm43241b4 \ + ${PN}-bcm43241b5 \ + ${PN}-bcm43242a \ + ${PN}-bcm4329 \ + ${PN}-bcm4329-fullmac \ + ${PN}-bcm4330 \ + ${PN}-bcm4334 \ + ${PN}-bcm43340 \ + ${PN}-bcm4335 \ + ${PN}-bcm43362 \ + ${PN}-bcm4339 \ + ${PN}-bcm43430 \ + ${PN}-bcm43430a0 \ + ${PN}-bcm43455 \ + ${PN}-bcm4350 \ + ${PN}-bcm4350c2 \ + ${PN}-bcm4354 \ + ${PN}-bcm4356 \ + ${PN}-bcm4356-pcie \ + ${PN}-bcm43569 \ + ${PN}-bcm43570 \ + ${PN}-bcm4358 \ + ${PN}-bcm43602 \ + ${PN}-bcm4366b \ + ${PN}-bcm4366c \ + ${PN}-bcm4371 \ + ${PN}-bcm4373 \ + ${PN}-bcm43xx \ + ${PN}-bcm43xx-hdr \ + ${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k \ + ${PN}-gplv2-license ${PN}-carl9170 \ + ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-qca \ + \ + ${PN}-imx-sdma-license ${PN}-imx-sdma-imx6q ${PN}-imx-sdma-imx7d \ + \ + ${PN}-iwlwifi-license ${PN}-iwlwifi \ + ${PN}-iwlwifi-135-6 \ + ${PN}-iwlwifi-3160-7 ${PN}-iwlwifi-3160-8 ${PN}-iwlwifi-3160-9 \ + ${PN}-iwlwifi-3160-10 ${PN}-iwlwifi-3160-12 ${PN}-iwlwifi-3160-13 \ + ${PN}-iwlwifi-3160-16 ${PN}-iwlwifi-3160-17 \ + ${PN}-iwlwifi-6000-4 ${PN}-iwlwifi-6000g2a-5 ${PN}-iwlwifi-6000g2a-6 \ + ${PN}-iwlwifi-6000g2b-5 ${PN}-iwlwifi-6000g2b-6 \ + ${PN}-iwlwifi-6050-4 ${PN}-iwlwifi-6050-5 \ + ${PN}-iwlwifi-7260 \ + ${PN}-iwlwifi-7265 \ + ${PN}-iwlwifi-7265d ${PN}-iwlwifi-8000c ${PN}-iwlwifi-8265 \ + ${PN}-iwlwifi-9000 \ + ${PN}-iwlwifi-misc \ + ${PN}-ibt-license ${PN}-ibt ${PN}-ibt-misc \ + ${PN}-ibt-11-5 ${PN}-ibt-12-16 ${PN}-ibt-hw-37-7 ${PN}-ibt-hw-37-8 \ + ${PN}-ibt-17 \ + ${PN}-i915-license ${PN}-i915 \ + ${PN}-ice-license ${PN}-ice \ + ${PN}-adsp-sst-license ${PN}-adsp-sst \ + ${PN}-bnx2-mips \ + ${PN}-liquidio \ + ${PN}-netronome-license ${PN}-netronome \ + ${PN}-qat ${PN}-qat-license \ + ${PN}-qcom-license \ + ${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 \ + ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 ${PN}-qcom-adreno-a630 \ + ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \ + ${PN}-whence-license \ + ${PN}-license \ + " + +# For atheros +LICENSE_${PN}-ar9170 = "Firmware-atheros_firmware" +LICENSE_${PN}-ath6k = "Firmware-atheros_firmware" +LICENSE_${PN}-ath9k = "Firmware-atheros_firmware" +LICENSE_${PN}-atheros-license = "Firmware-atheros_firmware" + +FILES_${PN}-atheros-license = "${nonarch_base_libdir}/firmware/LICENCE.atheros_firmware" +FILES_${PN}-ar9170 = " \ + ${nonarch_base_libdir}/firmware/ar9170*.fw \ +" +FILES_${PN}-ath6k = " \ + ${nonarch_base_libdir}/firmware/ath6k \ +" +FILES_${PN}-ath9k = " \ + ${nonarch_base_libdir}/firmware/ar9271.fw \ + ${nonarch_base_libdir}/firmware/ar7010*.fw \ + ${nonarch_base_libdir}/firmware/htc_9271.fw \ + ${nonarch_base_libdir}/firmware/htc_7010.fw \ + ${nonarch_base_libdir}/firmware/ath9k_htc/htc_7010-1.4.0.fw \ + ${nonarch_base_libdir}/firmware/ath9k_htc/htc_9271-1.4.0.fw \ +" + +RDEPENDS_${PN}-ar9170 += "${PN}-atheros-license" +RDEPENDS_${PN}-ath6k += "${PN}-atheros-license" +RDEPENDS_${PN}-ath9k += "${PN}-atheros-license" + +# For carl9170 +LICENSE_${PN}-carl9170 = "Firmware-GPLv2" +LICENSE_${PN}-gplv2-license = "Firmware-GPLv2" + +FILES_${PN}-gplv2-license = "${nonarch_base_libdir}/firmware/GPL-2" +FILES_${PN}-carl9170 = " \ + ${nonarch_base_libdir}/firmware/carl9170*.fw \ +" + +RDEPENDS_${PN}-carl9170 += "${PN}-gplv2-license" + +# For QualCommAthos +LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k" +LICENSE_${PN}-ar3k-license = "Firmware-qualcommAthos_ar3k" +LICENSE_${PN}-ath10k = "Firmware-qualcommAthos_ath10k" +LICENSE_${PN}-ath10k-license = "Firmware-qualcommAthos_ath10k" +LICENSE_${PN}-qca = "Firmware-qualcommAthos_ath10k" + +FILES_${PN}-ar3k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ar3k" +FILES_${PN}-ar3k = " \ + ${nonarch_base_libdir}/firmware/ar3k \ +" + +FILES_${PN}-ath10k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ath10k" +FILES_${PN}-ath10k = " \ + ${nonarch_base_libdir}/firmware/ath10k \ +" + +FILES_${PN}-qca = " \ + ${nonarch_base_libdir}/firmware/qca \ +" + +RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license" +RDEPENDS_${PN}-ath10k += "${PN}-ath10k-license" +RDEPENDS_${PN}-qca += "${PN}-ath10k-license" + +# For ralink +LICENSE_${PN}-ralink = "Firmware-ralink-firmware" +LICENSE_${PN}-ralink-license = "Firmware-ralink-firmware" + +FILES_${PN}-ralink-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink-firmware.txt" +FILES_${PN}-ralink = " \ + ${nonarch_base_libdir}/firmware/rt*.bin \ +" + +RDEPENDS_${PN}-ralink += "${PN}-ralink-license" + +# For mediatek MT7601U +LICENSE_${PN}-mt7601u = "Firmware-ralink_a_mediatek_company_firmware" +LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware" + +FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware" +FILES_${PN}-mt7601u = " \ + ${nonarch_base_libdir}/firmware/mt7601u.bin \ +" + +RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license" + +# For radeon +LICENSE_${PN}-radeon = "Firmware-radeon" +LICENSE_${PN}-radeon-license = "Firmware-radeon" + +FILES_${PN}-radeon-license = "${nonarch_base_libdir}/firmware/LICENSE.radeon" +FILES_${PN}-radeon = " \ + ${nonarch_base_libdir}/firmware/radeon \ +" + +RDEPENDS_${PN}-radeon += "${PN}-radeon-license" + +# For marvell +LICENSE_${PN}-pcie8897 = "Firmware-Marvell" +LICENSE_${PN}-pcie8997 = "Firmware-Marvell" +LICENSE_${PN}-sd8686 = "Firmware-Marvell" +LICENSE_${PN}-sd8688 = "Firmware-Marvell" +LICENSE_${PN}-sd8787 = "Firmware-Marvell" +LICENSE_${PN}-sd8797 = "Firmware-Marvell" +LICENSE_${PN}-sd8801 = "Firmware-Marvell" +LICENSE_${PN}-sd8887 = "Firmware-Marvell" +LICENSE_${PN}-sd8897 = "Firmware-Marvell" +LICENSE_${PN}-usb8997 = "Firmware-Marvell" +LICENSE_${PN}-marvell-license = "Firmware-Marvell" + +FILES_${PN}-marvell-license = "${nonarch_base_libdir}/firmware/LICENCE.Marvell" +FILES_${PN}-pcie8897 = " \ + ${nonarch_base_libdir}/firmware/mrvl/pcie8897_uapsta.bin \ +" +FILES_${PN}-pcie8997 = " \ + ${nonarch_base_libdir}/firmware/mrvl/pcie8997_wlan_v4.bin \ + ${nonarch_base_libdir}/firmware/mrvl/pcieuart8997_combo_v4.bin \ + ${nonarch_base_libdir}/firmware/mrvl/pcieusb8997_combo_v4.bin \ +" +FILES_${PN}-sd8686 = " \ + ${nonarch_base_libdir}/firmware/libertas/sd8686_v9* \ + ${nonarch_base_libdir}/firmware/sd8686* \ +" +FILES_${PN}-sd8688 = " \ + ${nonarch_base_libdir}/firmware/libertas/sd8688* \ + ${nonarch_base_libdir}/firmware/mrvl/sd8688* \ +" +FILES_${PN}-sd8787 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8787_uapsta.bin \ +" +FILES_${PN}-sd8797 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8797_uapsta.bin \ +" +FILES_${PN}-sd8801 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8801_uapsta.bin \ +" +FILES_${PN}-sd8887 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8887_uapsta.bin \ +" +FILES_${PN}-sd8897 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8897_uapsta.bin \ +" +FILES_${PN}-usb8997 = " \ + ${nonarch_base_libdir}/firmware/mrvl/usbusb8997_combo_v4.bin \ +" + +RDEPENDS_${PN}-sd8686 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8688 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8787 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8797 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8801 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8887 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8897 += "${PN}-marvell-license" +RDEPENDS_${PN}-usb8997 += "${PN}-marvell-license" + +# For netronome +LICENSE_${PN}-netronome = "Firmware-netronome" + +FILES_${PN}-netronome-license = " \ + ${nonarch_base_libdir}/firmware/LICENCE.Netronome \ +" +FILES_${PN}-netronome = " \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0081*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0096*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0097*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0099*.nffw \ +" + +RDEPENDS_${PN}-netronome += "${PN}-netronome-license" + +# For rtl +LICENSE_${PN}-rtl8188 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192cu = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8168 = "WHENCE" + +FILES_${PN}-rtl-license = " \ + ${nonarch_base_libdir}/firmware/LICENCE.rtlwifi_firmware.txt \ +" +FILES_${PN}-rtl8188 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8188*.bin \ +" +FILES_${PN}-rtl8192cu = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cufw*.bin \ +" +FILES_${PN}-rtl8192ce = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cfw*.bin \ +" +FILES_${PN}-rtl8192su = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8712u.bin \ +" +FILES_${PN}-rtl8723 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8723*.bin \ +" +FILES_${PN}-rtl8821 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8821*.bin \ +" +FILES_${PN}-rtl8168 = " \ + ${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \ +" + +RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license" + +# For ti-connectivity +LICENSE_${PN}-wlcommon = "Firmware-ti-connectivity" +LICENSE_${PN}-wl12xx = "Firmware-ti-connectivity" +LICENSE_${PN}-wl18xx = "Firmware-ti-connectivity" +LICENSE_${PN}-ti-connectivity-license = "Firmware-ti-connectivity" + +FILES_${PN}-ti-connectivity-license = "${nonarch_base_libdir}/firmware/LICENCE.ti-connectivity" +# wl18xx optionally needs wl1271-nvs.bin (which itself is a symlink to +# wl127x-nvs.bin) - see linux/drivers/net/wireless/ti/wlcore/sdio.c +# and drivers/net/wireless/ti/wlcore/spi.c. +# While they're optional and actually only used to override the MAC +# address on wl18xx, driver loading will delay (by udev timout - 60s) +# if not there. So let's make it available always. Because it's a +# symlink, both need to go to wlcommon. +FILES_${PN}-wlcommon = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/TI* \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl127x-nvs.bin \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl1271-nvs.bin \ +" +FILES_${PN}-wl12xx = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl12* \ +" +FILES_${PN}-wl18xx = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl18* \ +" + +RDEPENDS_${PN}-wl12xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" +RDEPENDS_${PN}-wl18xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" + +# For vt6656 +LICENSE_${PN}-vt6656 = "Firmware-via_vt6656" +LICENSE_${PN}-vt6656-license = "Firmware-via_vt6656" + +FILES_${PN}-vt6656-license = "${nonarch_base_libdir}/firmware/LICENCE.via_vt6656" +FILES_${PN}-vt6656 = " \ + ${nonarch_base_libdir}/firmware/vntwusb.fw \ +" + +RDEPENDS_${PN}-vt6656 = "${PN}-vt6656-license" + +# For broadcom + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e " \${PN}-$pkg \\"; done | sort -u + +LICENSE_${PN}-broadcom-license = "Firmware-broadcom_bcm43xx" +FILES_${PN}-broadcom-license = "${nonarch_base_libdir}/firmware/LICENCE.broadcom_bcm43xx" + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo "$i - $pkg"; echo -e "FILES_\${PN}-$pkg = \"\${nonarch_base_libdir}/firmware/brcm/$i\""; done | grep ^FILES + +FILES_${PN}-bcm43xx = "${nonarch_base_libdir}/firmware/brcm/bcm43xx-0.fw" +FILES_${PN}-bcm43xx-hdr = "${nonarch_base_libdir}/firmware/brcm/bcm43xx_hdr-0.fw" +FILES_${PN}-bcm4329-fullmac = "${nonarch_base_libdir}/firmware/brcm/bcm4329-fullmac-4.bin" +FILES_${PN}-bcm43236b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43236b.bin" +FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bin" +FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*" +FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin" +FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin" +FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin" +FILES_${PN}-bcm43241b0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b0-sdio.bin" +FILES_${PN}-bcm43241b4 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b4-sdio.bin" +FILES_${PN}-bcm43241b5 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b5-sdio.bin" +FILES_${PN}-bcm43242a = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43242a.bin" +FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \ +" +FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*" +FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*" +FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin" +FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin" +FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin" +FILES_${PN}-bcm43569 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43569.bin" +FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin" +FILES_${PN}-bcm4358 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4358-pcie.bin" +FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \ +" +FILES_${PN}-bcm4366b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366b-pcie.bin" +FILES_${PN}-bcm4366c = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366c-pcie.bin" +FILES_${PN}-bcm4371 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4371-pcie.bin" + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e "LICENSE_\${PN}-$pkg = \"Firmware-broadcom_bcm43xx\"\nRDEPENDS_\${PN}-$pkg += \"\${PN}-broadcom-license\""; done +# Currently 1st one and last 6 have cypress LICENSE + +LICENSE_${PN}-bcm43xx = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43xx += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43xx-hdr = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43xx-hdr += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4329-fullmac = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4329-fullmac += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43236b = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43236b += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4329 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4329 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4330 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4330 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4334 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4334 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4335 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4335 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4339 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4339 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b0 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b0 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b4 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b4 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b5 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b5 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43242a = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43242a += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43143 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43143 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43430a0 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43430a0 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43455 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43455 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4350c2 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4350c2 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4350 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4350 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4356 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4356 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43569 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43569 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43570 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43570 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4358 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4358 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43602 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43602 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4366b = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4366b += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4366c = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4366c += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4371 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4371 += "${PN}-broadcom-license" + +# For broadcom cypress + +LICENSE_${PN}-cypress-license = "Firmware-cypress" +FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress" + +FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd" +FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*" +FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*" +FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*" +FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin" +FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*" +FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \ +" + +LICENSE_${PN}-bcm-0bb4-0306 = "Firmware-cypress" +RDEPENDS_${PN}-bcm-0bb4-0306 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43340 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43340 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43362 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43362 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43430 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43430 += "${PN}-cypress-license" +LICENSE_${PN}-bcm4354 = "Firmware-cypress" +RDEPENDS_${PN}-bcm4354 += "${PN}-cypress-license" +LICENSE_${PN}-bcm4356-pcie = "Firmware-cypress" +RDEPENDS_${PN}-bcm4356-pcie += "${PN}-cypress-license" +LICENSE_${PN}-bcm4373 = "Firmware-cypress" +RDEPENDS_${PN}-bcm4373 += "${PN}-cypress-license" + +# For Broadcom bnx2-mips +# +# which is a separate case to the other Broadcom firmwares since its +# license is contained in the shared WHENCE file. + +LICENSE_${PN}-bnx2-mips = "WHENCE" +LICENSE_${PN}-whence-license = "WHENCE" + +FILES_${PN}-bnx2-mips = "${nonarch_base_libdir}/firmware/bnx2/bnx2-mips-09-6.2.1b.fw" +FILES_${PN}-whence-license = "${nonarch_base_libdir}/firmware/WHENCE" + +RDEPENDS_${PN}-bnx2-mips += "${PN}-whence-license" + +# For imx-sdma +LICENSE_${PN}-imx-sdma-imx6q = "Firmware-imx-sdma_firmware" +LICENSE_${PN}-imx-sdma-imx7d = "Firmware-imx-sdma_firmware" +LICENSE_${PN}-imx-sdma-license = "Firmware-imx-sdma_firmware" + +FILES_${PN}-imx-sdma-imx6q = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx6q.bin" + +RPROVIDES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" +RREPLACES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" +RCONFLICTS_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" + +FILES_${PN}-imx-sdma-imx7d = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx7d.bin" + +FILES_${PN}-imx-sdma-license = "${nonarch_base_libdir}/firmware/LICENSE.sdma_firmware" + +RDEPENDS_${PN}-imx-sdma-imx6q += "${PN}-imx-sdma-license" +RDEPENDS_${PN}-imx-sdma-imx7d += "${PN}-imx-sdma-license" + +# For iwlwifi +LICENSE_${PN}-iwlwifi = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-135-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-7 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-8 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-9 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-10 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-12 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-13 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-16 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-17 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000-4 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2a-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2a-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2b-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2b-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6050-4 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6050-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7260 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7265 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7265d = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-8000c = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-8265 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-9000 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-misc = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-license = "Firmware-iwlwifi_firmware" + + +FILES_${PN}-iwlwifi-license = "${nonarch_base_libdir}/firmware/LICENCE.iwlwifi_firmware" +FILES_${PN}-iwlwifi-135-6 = "${nonarch_base_libdir}/firmware/iwlwifi-135-6.ucode" +FILES_${PN}-iwlwifi-3160-7 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-7.ucode" +FILES_${PN}-iwlwifi-3160-8 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-8.ucode" +FILES_${PN}-iwlwifi-3160-9 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-9.ucode" +FILES_${PN}-iwlwifi-3160-10 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-10.ucode" +FILES_${PN}-iwlwifi-3160-12 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-12.ucode" +FILES_${PN}-iwlwifi-3160-13 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-13.ucode" +FILES_${PN}-iwlwifi-3160-16 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-16.ucode" +FILES_${PN}-iwlwifi-3160-17 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-17.ucode" +FILES_${PN}-iwlwifi-6000-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6000-4.ucode" +FILES_${PN}-iwlwifi-6000g2a-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-5.ucode" +FILES_${PN}-iwlwifi-6000g2a-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-6.ucode" +FILES_${PN}-iwlwifi-6000g2b-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-5.ucode" +FILES_${PN}-iwlwifi-6000g2b-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-6.ucode" +FILES_${PN}-iwlwifi-6050-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-4.ucode" +FILES_${PN}-iwlwifi-6050-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-5.ucode" +FILES_${PN}-iwlwifi-7260 = "${nonarch_base_libdir}/firmware/iwlwifi-7260-*.ucode" +FILES_${PN}-iwlwifi-7265 = "${nonarch_base_libdir}/firmware/iwlwifi-7265-*.ucode" +FILES_${PN}-iwlwifi-7265d = "${nonarch_base_libdir}/firmware/iwlwifi-7265D-*.ucode" +FILES_${PN}-iwlwifi-8000c = "${nonarch_base_libdir}/firmware/iwlwifi-8000C-*.ucode" +FILES_${PN}-iwlwifi-8265 = "${nonarch_base_libdir}/firmware/iwlwifi-8265-*.ucode" +FILES_${PN}-iwlwifi-9000 = "${nonarch_base_libdir}/firmware/iwlwifi-9000-*.ucode" +FILES_${PN}-iwlwifi-misc = "${nonarch_base_libdir}/firmware/iwlwifi-*.ucode" + +RDEPENDS_${PN}-iwlwifi-135-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-7 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-8 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-9 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-10 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-12 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-13 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-16 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-17 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000-4 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2a-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2a-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2b-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2b-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6050-4 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6050-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7265d = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-8000c = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-8265 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-9000 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-misc = "${PN}-iwlwifi-license" + +# -iwlwifi-misc is a "catch all" package that includes all the iwlwifi +# firmwares that are not already included in other -iwlwifi- packages. +# -iwlwifi is a virtual package that depends upon all iwlwifi packages. +# These are distinct in order to allow the -misc firmwares to be installed +# without pulling in every other iwlwifi package. +ALLOW_EMPTY_${PN}-iwlwifi = "1" +ALLOW_EMPTY_${PN}-iwlwifi-misc = "1" + +# Handle package updating for the newly merged iwlwifi groupings +RPROVIDES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" +RREPLACES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" +RCONFLICTS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" + +RPROVIDES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" +RREPLACES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" +RCONFLICTS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" + +# For ibt +LICENSE_${PN}-ibt-license = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-hw-37-7 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-hw-37-8 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-11-5 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-12-16 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-17 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-misc = "Firmware-ibt_firmware" + +FILES_${PN}-ibt-license = "${nonarch_base_libdir}/firmware/LICENCE.ibt_firmware" +FILES_${PN}-ibt-hw-37-7 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.7*.bseq" +FILES_${PN}-ibt-hw-37-8 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.8*.bseq" +FILES_${PN}-ibt-11-5 = "${nonarch_base_libdir}/firmware/intel/ibt-11-5.sfi ${nonarch_base_libdir}/firmware/intel/ibt-11-5.ddc" +FILES_${PN}-ibt-12-16 = "${nonarch_base_libdir}/firmware/intel/ibt-12-16.sfi ${nonarch_base_libdir}/firmware/intel/ibt-12-16.ddc" +FILES_${PN}-ibt-17 = "${nonarch_base_libdir}/firmware/intel/ibt-17-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-17-*.ddc" +FILES_${PN}-ibt-misc = "${nonarch_base_libdir}/firmware/ibt-*" + +RDEPENDS_${PN}-ibt-hw-37-7 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-hw-37.8 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-11-5 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-12-16 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-17 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-misc = "${PN}-ibt-license" + +ALLOW_EMPTY_${PN}-ibt= "1" +ALLOW_EMPTY_${PN}-ibt-misc = "1" + +LICENSE_${PN}-i915 = "Firmware-i915" +LICENSE_${PN}-i915-license = "Firmware-i915" +FILES_${PN}-i915-license = "${nonarch_base_libdir}/firmware/LICENSE.i915" +FILES_${PN}-i915 = "${nonarch_base_libdir}/firmware/i915" +RDEPENDS_${PN}-i915 = "${PN}-i915-license" + +LICENSE_${PN}-ice = "Firmware-ice" +LICENSE_${PN}-ice-license = "Firmware-ice" +FILES_${PN}-ice-license = "${nonarch_base_libdir}/firmware/LICENSE.ice" +FILES_${PN}-ice = "${nonarch_base_libdir}/firmware/intel/ice" +RDEPENDS_${PN}-ice = "${PN}-ice-license" + +FILES_${PN}-adsp-sst-license = "${nonarch_base_libdir}/firmware/LICENCE.adsp_sst" +LICENSE_${PN}-adsp-sst = "Firmware-adsp_sst" +LICENSE_${PN}-adsp-sst-license = "Firmware-adsp_sst" +FILES_${PN}-adsp-sst = "${nonarch_base_libdir}/firmware/intel/dsp_fw*" +RDEPENDS_${PN}-adsp-sst = "${PN}-adsp-sst-license" + +# For QAT +LICENSE_${PN}-qat = "Firmware-qat" +LICENSE_${PN}-qat-license = "Firmware-qat" +FILES_${PN}-qat-license = "${nonarch_base_libdir}/firmware/LICENCE.qat_firmware" +FILES_${PN}-qat = "${nonarch_base_libdir}/firmware/qat*.bin" +RDEPENDS_${PN}-qat = "${PN}-qat-license" + +# For QCOM VPU/GPU and SDM845 +LICENSE_${PN}-qcom-license = "Firmware-qcom" +FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt" +FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*" +FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*" +FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw" +FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*" +FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*" +FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*" +FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*" +FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn" +RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-venus-4.2 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license" + +FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio" + +# For other firmwares +# Maybe split out to separate packages when needed. +LICENSE_${PN} = "\ + Firmware-Abilis \ + & Firmware-agere \ + & Firmware-amdgpu \ + & Firmware-amd-ucode \ + & Firmware-atmel \ + & Firmware-ca0132 \ + & Firmware-cavium \ + & Firmware-chelsio_firmware \ + & Firmware-cw1200 \ + & Firmware-dib0700 \ + & Firmware-e100 \ + & Firmware-ene_firmware \ + & Firmware-fw_sst_0f28 \ + & Firmware-go7007 \ + & Firmware-hfi1_firmware \ + & Firmware-i2400m \ + & Firmware-ibt_firmware \ + & Firmware-it913x \ + & Firmware-IntcSST2 \ + & Firmware-kaweth \ + & Firmware-moxa \ + & Firmware-myri10ge_firmware \ + & Firmware-nvidia \ + & Firmware-OLPC \ + & Firmware-ath9k-htc \ + & Firmware-phanfw \ + & Firmware-qat \ + & Firmware-qcom \ + & Firmware-qla1280 \ + & Firmware-qla2xxx \ + & Firmware-r8a779x_usb3 \ + & Firmware-radeon \ + & Firmware-ralink_a_mediatek_company_firmware \ + & Firmware-ralink-firmware \ + & Firmware-imx-sdma_firmware \ + & Firmware-siano \ + & Firmware-tda7706-firmware \ + & Firmware-ti-connectivity \ + & Firmware-ti-keystone \ + & Firmware-ueagle-atm4-firmware \ + & Firmware-wl1251 \ + & Firmware-xc4000 \ + & Firmware-xc5000 \ + & Firmware-xc5000c \ + & WHENCE \ +" + +FILES_${PN}-license += "${nonarch_base_libdir}/firmware/LICEN*" +FILES_${PN} += "${nonarch_base_libdir}/firmware/*" +RDEPENDS_${PN} += "${PN}-license" +RDEPENDS_${PN} += "${PN}-whence-license" + +# Make linux-firmware depend on all of the split-out packages. +# Make linux-firmware-iwlwifi depend on all of the split-out iwlwifi packages. +# Make linux-firmware-ibt depend on all of the split-out ibt packages. +python populate_packages_prepend () { + firmware_pkgs = oe.utils.packages_filter_out_system(d) + d.appendVar('RRECOMMENDS_linux-firmware', ' ' + ' '.join(firmware_pkgs)) + + iwlwifi_pkgs = filter(lambda x: x.find('-iwlwifi-') != -1, firmware_pkgs) + d.appendVar('RRECOMMENDS_linux-firmware-iwlwifi', ' ' + ' '.join(iwlwifi_pkgs)) + + ibt_pkgs = filter(lambda x: x.find('-ibt-') != -1, firmware_pkgs) + d.appendVar('RRECOMMENDS_linux-firmware-ibt', ' ' + ' '.join(ibt_pkgs)) +} + +# Firmware files are generally not ran on the CPU, so they can be +# allarch despite being architecture specific +INSANE_SKIP = "arch" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb b/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb index 8a61b2276..175836ef9 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto-dev.git;branch=${KBRANCH};name SRCREV_machine ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' SRCREV_meta ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' -LINUX_VERSION ?= "5.7-rc+" +LINUX_VERSION ?= "5.8-rc+" LINUX_VERSION_EXTENSION ?= "-yoctodev-${LINUX_KERNEL_TYPE}" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb index a9213c243..a4b593b96 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "1afad41c91e13f957bbe61fa7fdc05a77e84d489" -SRCREV_meta ?= "aafb8f095e97013d6e55b09ed150369cbe0c6476" +SRCREV_machine ?= "6415a4e7c405526f97049ede833f52127a5ce7a1" +SRCREV_meta ?= "416566e1f01c3c02ca5b3a03d0943df387d521f0" SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" -LINUX_VERSION ?= "5.4.43" +LINUX_VERSION ?= "5.4.50" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb index aab53bed6..0622d3383 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb @@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "5.4.43" +LINUX_VERSION ?= "5.4.50" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" @@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine_qemuarm ?= "c3864bf2906c5075305bf675c27788505ea93047" -SRCREV_machine ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_meta ?= "aafb8f095e97013d6e55b09ed150369cbe0c6476" +SRCREV_machine_qemuarm ?= "1a8a14e45f01cfee926c5b35d2d67e6f1a7eebfc" +SRCREV_machine ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_meta ?= "416566e1f01c3c02ca5b3a03d0943df387d521f0" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb index 01795e616..2476b3502 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb @@ -12,16 +12,16 @@ KBRANCH_qemux86 ?= "v5.4/standard/base" KBRANCH_qemux86-64 ?= "v5.4/standard/base" KBRANCH_qemumips64 ?= "v5.4/standard/mti-malta64" -SRCREV_machine_qemuarm ?= "6c628db39bf48f7dd6cd95ae826bcaa18a56df1d" -SRCREV_machine_qemuarm64 ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_machine_qemumips ?= "c0cd2937ae195344ece04663b30b2049427b3c57" -SRCREV_machine_qemuppc ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_machine_qemuriscv64 ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_machine_qemux86 ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_machine_qemux86-64 ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_machine_qemumips64 ?= "6254ff1d776b75bc3d9c2c66c083fbc622091cd4" -SRCREV_machine ?= "9e1b13d7f9d84f691fb9988c5f53c5ab62b8a5e9" -SRCREV_meta ?= "aafb8f095e97013d6e55b09ed150369cbe0c6476" +SRCREV_machine_qemuarm ?= "99743105f331e90852ccb9e72ce26134dbcafec6" +SRCREV_machine_qemuarm64 ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_machine_qemumips ?= "886870e5abaeaaf753fb50a1e5be56336c44c642" +SRCREV_machine_qemuppc ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_machine_qemuriscv64 ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_machine_qemux86 ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_machine_qemux86-64 ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_machine_qemumips64 ?= "7548abbb409eeef6f0575eed25231090f902559e" +SRCREV_machine ?= "94667198aabf869571bdff5291a24956796faddf" +SRCREV_meta ?= "416566e1f01c3c02ca5b3a03d0943df387d521f0" # remap qemuarm to qemuarma15 for the 5.4 kernel # KMACHINE_qemuarm ?= "qemuarma15" @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" -LINUX_VERSION ?= "5.4.43" +LINUX_VERSION ?= "5.4.50" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" DEPENDS += "openssl-native util-linux-native" diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb index 8d3fb475c..c0df0cab3 100644 --- a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb +++ b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb @@ -32,11 +32,11 @@ python do_package_prepend() { BBCLASSEXTEND = "devupstream:target" LIC_FILES_CHKSUM_class-devupstream = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" DEFAULT_PREFERENCE_class-devupstream = "-1" -SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.11 \ +SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.12 \ file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ " -SRCREV_class-devupstream = "17c413953603f063f2a9d6c3788bec914ce6f955" -PV_class-devupstream = "2.11.2+git${SRCPV}" +SRCREV_class-devupstream = "11441f8f17f7825f529e2f6c54d3605771709260" +PV_class-devupstream = "2.12.1+git${SRCPV}" S_class-devupstream = "${WORKDIR}/git" SRCREV_FORMAT ?= "lttng_git" diff --git a/poky/meta/recipes-kernel/modutils-initscripts/modutils-initscripts.bb b/poky/meta/recipes-kernel/modutils-initscripts/modutils-initscripts.bb index 0f3df5577..881b7db92 100644 --- a/poky/meta/recipes-kernel/modutils-initscripts/modutils-initscripts.bb +++ b/poky/meta/recipes-kernel/modutils-initscripts/modutils-initscripts.bb @@ -24,7 +24,7 @@ do_install () { PACKAGE_WRITE_DEPS_append = " ${@bb.utils.contains('DISTRO_FEATURES','systemd','systemd-systemctl-native','',d)}" pkg_postinst_${PN} () { - if ${@bb.utils.contains('DISTRO_FEATURES','systemd','true','false',d)}; then + if type systemctl >/dev/null 2>/dev/null; then if [ -n "$D" ]; then OPTS="--root=$D" fi diff --git a/poky/meta/recipes-kernel/systemtap/systemtap_git.inc b/poky/meta/recipes-kernel/systemtap/systemtap_git.inc index 116e83fe0..4ec0703f2 100644 --- a/poky/meta/recipes-kernel/systemtap/systemtap_git.inc +++ b/poky/meta/recipes-kernel/systemtap/systemtap_git.inc @@ -1,7 +1,7 @@ LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" -SRCREV = "044a0640985ef007c0b2fb6eaf660d9d51800cda" -PV = "4.2" +SRCREV = "c9c23c987d819d07c6b96b54f8e03188fecd9e46" +PV = "4.3" SRC_URI = "git://sourceware.org/git/systemtap.git \ file://0001-Do-not-let-configure-write-a-python-location-into-th.patch \ diff --git a/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.2.bb deleted file mode 100644 index 9a4082ff9..000000000 --- a/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.2.bb +++ /dev/null @@ -1,43 +0,0 @@ -SUMMARY = "ALSA sound library" -HOMEPAGE = "http://www.alsa-project.org" -BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking" -SECTION = "libs/multimedia" -LICENSE = "LGPLv2.1 & GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=a916467b91076e631dd8edb7424769c7 \ - file://src/socket.c;md5=285675b45e83f571c6a957fe4ab79c93;beginline=9;endline=24 \ - " - -SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" -SRC_URI[md5sum] = "82cdc23a5233d5ed319d2cbc89af5ca5" -SRC_URI[sha256sum] = "d8e853d8805574777bbe40937812ad1419c9ea7210e176f0def3e6ed255ab3ec" - -inherit autotools pkgconfig - -EXTRA_OECONF += " \ - ${@bb.utils.contains('TARGET_FPU', 'soft', '--with-softfloat', '', d)} \ - --disable-python \ -" - -PACKAGES =+ "alsa-server alsa-conf libatopology" - -FILES_alsa-server = "${bindir}/*" -FILES_alsa-conf = "${datadir}/alsa/" -FILES_libatopology = "${libdir}/libatopology.so.*" - -RDEPENDS_${PN}_class-target = "alsa-conf alsa-ucm-conf" -RDEPENDS_libatopology_class-target = "alsa-topology-conf" - -# upgrade path -RPROVIDES_${PN} = "libasound" -RREPLACES_${PN} = "libasound" -RCONFLICTS_${PN} = "libasound" - -RPROVIDES_${PN}-dev = "alsa-dev" -RREPLACES_${PN}-dev = "alsa-dev" -RCONFLICTS_${PN}-dev = "alsa-dev" - -RPROVIDES_alsa-conf = "alsa-conf-base" -RREPLACES_alsa-conf = "alsa-conf-base" -RCONFLICTS_alsa-conf = "alsa-conf-base" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.3.1.bb b/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.3.1.bb new file mode 100644 index 000000000..1eb56f405 --- /dev/null +++ b/poky/meta/recipes-multimedia/alsa/alsa-lib_1.2.3.1.bb @@ -0,0 +1,42 @@ +SUMMARY = "ALSA sound library" +HOMEPAGE = "http://www.alsa-project.org" +BUGTRACKER = "http://alsa-project.org/main/index.php/Bug_Tracking" +SECTION = "libs/multimedia" +LICENSE = "LGPLv2.1 & GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=a916467b91076e631dd8edb7424769c7 \ + file://src/socket.c;md5=285675b45e83f571c6a957fe4ab79c93;beginline=9;endline=24 \ + " + +SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" +SRC_URI[sha256sum] = "1244585515fbebce7d0e53656c1fb614f99accb8413115ce3efb9a9309c4c892" + +inherit autotools pkgconfig + +EXTRA_OECONF += " \ + ${@bb.utils.contains('TARGET_FPU', 'soft', '--with-softfloat', '', d)} \ + --disable-python \ +" + +PACKAGES =+ "alsa-server alsa-conf libatopology" + +FILES_alsa-server = "${bindir}/*" +FILES_alsa-conf = "${datadir}/alsa/" +FILES_libatopology = "${libdir}/libatopology.so.*" + +RDEPENDS_${PN}_class-target = "alsa-conf alsa-ucm-conf" +RDEPENDS_libatopology_class-target = "alsa-topology-conf" + +# upgrade path +RPROVIDES_${PN} = "libasound" +RREPLACES_${PN} = "libasound" +RCONFLICTS_${PN} = "libasound" + +RPROVIDES_${PN}-dev = "alsa-dev" +RREPLACES_${PN}-dev = "alsa-dev" +RCONFLICTS_${PN}-dev = "alsa-dev" + +RPROVIDES_alsa-conf = "alsa-conf-base" +RREPLACES_alsa-conf = "alsa-conf-base" +RCONFLICTS_alsa-conf = "alsa-conf-base" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.2.bb deleted file mode 100644 index 2901794cb..000000000 --- a/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.2.bb +++ /dev/null @@ -1,19 +0,0 @@ -SUMMARY = "ALSA topology configuration files" -HOMEPAGE = "https://alsa-project.org" -BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking" -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=20d74d74db9741697903372ad001d3b4" - -SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" -SRC_URI[sha256sum] = "b472d6b567c78173bd69543d9cffc9e379c80eb763c3afc8d5b24d5610d19425" - -inherit allarch - -do_install() { - install -d ${D}/usr/share/alsa - cp -r ${S}/topology ${D}/usr/share/alsa -} - -PACKAGES = "${PN}" - -FILES_${PN} = "*" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb new file mode 100644 index 000000000..fd949be8a --- /dev/null +++ b/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb @@ -0,0 +1,19 @@ +SUMMARY = "ALSA topology configuration files" +HOMEPAGE = "https://alsa-project.org" +BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking" +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=20d74d74db9741697903372ad001d3b4" + +SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" +SRC_URI[sha256sum] = "833f99b2cbda34e0cfef867ef1d2e6a74fe276bb7fc525a573be32077f629dff" + +inherit allarch + +do_install() { + install -d ${D}/usr/share/alsa + cp -r ${S}/topology ${D}/usr/share/alsa +} + +PACKAGES = "${PN}" + +FILES_${PN} = "*" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.2.bb deleted file mode 100644 index 1a524d49b..000000000 --- a/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.2.bb +++ /dev/null @@ -1,20 +0,0 @@ -SUMMARY = "ALSA Use Case Manager configuration" -HOMEPAGE = "https://alsa-project.org" -BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking" -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=20d74d74db9741697903372ad001d3b4" - -SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" -SRC_URI[sha256sum] = "7ebfd929bc85a51f16fa3c8c4db13faa2ea6ff2b2266fc36d6198bdafe73c40c" - -inherit allarch - -do_install() { - install -d ${D}/usr/share/alsa - cp -r ${S}/ucm ${D}/usr/share/alsa - cp -r ${S}/ucm2 ${D}/usr/share/alsa -} - -PACKAGES = "${PN}" - -FILES_${PN} = "*" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb new file mode 100644 index 000000000..19eeabff7 --- /dev/null +++ b/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb @@ -0,0 +1,20 @@ +SUMMARY = "ALSA Use Case Manager configuration" +HOMEPAGE = "https://alsa-project.org" +BUGTRACKER = "https://alsa-project.org/wiki/Bug_Tracking" +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=20d74d74db9741697903372ad001d3b4" + +SRC_URI = "https://www.alsa-project.org/files/pub/lib/${BP}.tar.bz2" +SRC_URI[sha256sum] = "1bc24da04bb27a75e323c9f0fb03e44705b6bb8a8baf255b94b41d457d590d00" + +inherit allarch + +do_install() { + install -d ${D}/usr/share/alsa + cp -r ${S}/ucm ${D}/usr/share/alsa + cp -r ${S}/ucm2 ${D}/usr/share/alsa +} + +PACKAGES = "${PN}" + +FILES_${PN} = "*" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.2.bb deleted file mode 100644 index 048fef68a..000000000 --- a/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.2.bb +++ /dev/null @@ -1,25 +0,0 @@ -require alsa-utils.inc - -SUMMARY = "Shell scripts that show help info and create ALSA configuration files" -PROVIDES = "alsa-utils-alsaconf" - -FILESEXTRAPATHS_prepend := "${THISDIR}/alsa-utils:" - -PACKAGES = "${PN}" -RDEPENDS_${PN} += "bash" - -FILES_${PN} = "${sbindir}/alsaconf \ - ${sbindir}/alsa-info.sh \ - ${sbindir}/alsabat-test.sh \ - " - -S = "${WORKDIR}/alsa-utils-${PV}" - -do_install() { - install -d ${D}${sbindir} - install -m 0755 ${B}/alsaconf/alsaconf ${D}${sbindir}/ - install -m 0755 ${S}/alsa-info/alsa-info.sh ${D}${sbindir}/ - if ${@bb.utils.contains('PACKAGECONFIG', 'bat', 'true', 'false', d)}; then - install -m 0755 ${S}/bat/alsabat-test.sh ${D}${sbindir}/ - fi -} diff --git a/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.3.bb new file mode 100644 index 000000000..048fef68a --- /dev/null +++ b/poky/meta/recipes-multimedia/alsa/alsa-utils-scripts_1.2.3.bb @@ -0,0 +1,25 @@ +require alsa-utils.inc + +SUMMARY = "Shell scripts that show help info and create ALSA configuration files" +PROVIDES = "alsa-utils-alsaconf" + +FILESEXTRAPATHS_prepend := "${THISDIR}/alsa-utils:" + +PACKAGES = "${PN}" +RDEPENDS_${PN} += "bash" + +FILES_${PN} = "${sbindir}/alsaconf \ + ${sbindir}/alsa-info.sh \ + ${sbindir}/alsabat-test.sh \ + " + +S = "${WORKDIR}/alsa-utils-${PV}" + +do_install() { + install -d ${D}${sbindir} + install -m 0755 ${B}/alsaconf/alsaconf ${D}${sbindir}/ + install -m 0755 ${S}/alsa-info/alsa-info.sh ${D}${sbindir}/ + if ${@bb.utils.contains('PACKAGECONFIG', 'bat', 'true', 'false', d)}; then + install -m 0755 ${S}/bat/alsabat-test.sh ${D}${sbindir}/ + fi +} diff --git a/poky/meta/recipes-multimedia/alsa/alsa-utils.inc b/poky/meta/recipes-multimedia/alsa/alsa-utils.inc index 8bbc5d3ef..b523a5a83 100644 --- a/poky/meta/recipes-multimedia/alsa/alsa-utils.inc +++ b/poky/meta/recipes-multimedia/alsa/alsa-utils.inc @@ -21,8 +21,7 @@ PACKAGECONFIG[manpages] = "--enable-xmlto, --disable-xmlto, xmlto-native docbook # alsa-utils specified in SRC_URI due to alsa-utils-scripts recipe SRC_URI = "https://www.alsa-project.org/files/pub/utils/alsa-utils-${PV}.tar.bz2" -SRC_URI[md5sum] = "00612234ff4722c8f7f8f7a83ff9bc63" -SRC_URI[sha256sum] = "44807bd578c5f6df6e91a11b8d37e546424a5a1ea8d8e659ee359fe01730e4f3" +SRC_URI[sha256sum] = "ff19ae48c22938de7a491bdb39db74a2eee2546013f39bf1a86185e426f921aa" # On build machines with python-docutils (not python3-docutils !!) installed # rst2man (not rst2man.py) is detected and compile fails with diff --git a/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.2.bb deleted file mode 100644 index ff8945e5c..000000000 --- a/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.2.bb +++ /dev/null @@ -1 +0,0 @@ -require alsa-utils.inc diff --git a/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.3.bb new file mode 100644 index 000000000..3430288da --- /dev/null +++ b/poky/meta/recipes-multimedia/alsa/alsa-utils_1.2.3.bb @@ -0,0 +1,2 @@ +require alsa-utils.inc + diff --git a/poky/meta/recipes-multimedia/ffmpeg/ffmpeg_4.3.bb b/poky/meta/recipes-multimedia/ffmpeg/ffmpeg_4.3.bb index 2b0c38d78..b4cf278cb 100644 --- a/poky/meta/recipes-multimedia/ffmpeg/ffmpeg_4.3.bb +++ b/poky/meta/recipes-multimedia/ffmpeg/ffmpeg_4.3.bb @@ -57,6 +57,7 @@ PACKAGECONFIG[avresample] = "--enable-avresample,--disable-avresample" # features to support PACKAGECONFIG[alsa] = "--enable-alsa,--disable-alsa,alsa-lib" +PACKAGECONFIG[altivec] = "--enable-altivec,--disable-altivec," PACKAGECONFIG[bzlib] = "--enable-bzlib,--disable-bzlib,bzip2" PACKAGECONFIG[fdk-aac] = "--enable-libfdk-aac --enable-nonfree,--disable-libfdk-aac,fdk-aac" PACKAGECONFIG[gpl] = "--enable-gpl,--disable-gpl" diff --git a/poky/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb b/poky/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb index cc7a7e78e..e23d23d9c 100644 --- a/poky/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb +++ b/poky/meta/recipes-multimedia/gstreamer/gst-examples_1.16.0.bb @@ -15,6 +15,7 @@ S = "${WORKDIR}/git" inherit meson pkgconfig features_check +UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+\.(\d*[02468])+(\.\d+)+)" ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" diff --git a/poky/meta/recipes-sato/puzzles/puzzles_git.bb b/poky/meta/recipes-sato/puzzles/puzzles_git.bb index 1e530519f..a0f3b5d9c 100644 --- a/poky/meta/recipes-sato/puzzles/puzzles_git.bb +++ b/poky/meta/recipes-sato/puzzles/puzzles_git.bb @@ -17,7 +17,7 @@ SRC_URI = "git://git.tartarus.org/simon/puzzles.git \ " UPSTREAM_CHECK_COMMITS = "1" -SRCREV = "66b9e8c7de0eecb3d85d9a1766fab1082848448b" +SRCREV = "9aa7b7cdfb2bcd200f45941a58d6ae698882a2d4" PE = "2" PV = "0.0+git${SRCPV}" diff --git a/poky/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc b/poky/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc index b568f0458..b064a63ca 100644 --- a/poky/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc +++ b/poky/meta/recipes-sato/rxvt-unicode/rxvt-unicode.inc @@ -30,6 +30,8 @@ EXTRA_OECONF = "--enable-xim \ --enable-combining --disable-perl \ --with-x=${STAGING_DIR_HOST}${prefix}" +EXTRA_OECONF_append_libc-musl = " --disable-wtmp" + PACKAGECONFIG ??= "" PACKAGECONFIG[startup] = "--enable-startup-notification,--disable-startup-notification,startup-notification," diff --git a/poky/meta/recipes-support/curl/curl_7.71.0.bb b/poky/meta/recipes-support/curl/curl_7.71.0.bb deleted file mode 100644 index c1abe0138..000000000 --- a/poky/meta/recipes-support/curl/curl_7.71.0.bb +++ /dev/null @@ -1,83 +0,0 @@ -SUMMARY = "Command line tool and library for client-side URL transfers" -HOMEPAGE = "http://curl.haxx.se/" -BUGTRACKER = "http://curl.haxx.se/mail/list.cgi?list=curl-tracker" -SECTION = "console/network" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=2e9fb35867314fe31c6a4977ef7dd531" - -SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \ - file://0001-replace-krb5-config-with-pkg-config.patch \ -" - -SRC_URI[sha256sum] = "600f00ac2481a89548a4141ddf983fd9386165e1960bac91d0a1c81dca5dd341" - -CVE_PRODUCT = "curl libcurl" -inherit autotools pkgconfig binconfig multilib_header - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib" -PACKAGECONFIG_class-native = "ipv6 proxy ssl threaded-resolver verbose zlib" -PACKAGECONFIG_class-nativesdk = "ipv6 proxy ssl threaded-resolver verbose zlib" - -# 'ares' and 'threaded-resolver' are mutually exclusive -PACKAGECONFIG[ares] = "--enable-ares,--disable-ares,c-ares,,,threaded-resolver" -PACKAGECONFIG[brotli] = "--with-brotli,--without-brotli,brotli" -PACKAGECONFIG[builtinmanual] = "--enable-manual,--disable-manual" -PACKAGECONFIG[dict] = "--enable-dict,--disable-dict," -PACKAGECONFIG[gnutls] = "--with-gnutls,--without-gnutls,gnutls" -PACKAGECONFIG[gopher] = "--enable-gopher,--disable-gopher," -PACKAGECONFIG[imap] = "--enable-imap,--disable-imap," -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," -PACKAGECONFIG[krb5] = "--with-gssapi,--without-gssapi,krb5" -PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap," -PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps," -PACKAGECONFIG[libidn] = "--with-libidn2,--without-libidn2,libidn2" -PACKAGECONFIG[libssh2] = "--with-libssh2,--without-libssh2,libssh2" -PACKAGECONFIG[mbedtls] = "--with-mbedtls=${STAGING_DIR_TARGET},--without-mbedtls,mbedtls" -PACKAGECONFIG[mqtt] = "--enable-mqtt,--disable-mqtt," -PACKAGECONFIG[nghttp2] = "--with-nghttp2,--without-nghttp2,nghttp2" -PACKAGECONFIG[pop3] = "--enable-pop3,--disable-pop3," -PACKAGECONFIG[proxy] = "--enable-proxy,--disable-proxy," -PACKAGECONFIG[rtmpdump] = "--with-librtmp,--without-librtmp,rtmpdump" -PACKAGECONFIG[rtsp] = "--enable-rtsp,--disable-rtsp," -PACKAGECONFIG[smb] = "--enable-smb,--disable-smb," -PACKAGECONFIG[smtp] = "--enable-smtp,--disable-smtp," -PACKAGECONFIG[ssl] = "--with-ssl --with-random=/dev/urandom,--without-ssl,openssl" -PACKAGECONFIG[nss] = "--with-nss,--without-nss,nss" -PACKAGECONFIG[telnet] = "--enable-telnet,--disable-telnet," -PACKAGECONFIG[tftp] = "--enable-tftp,--disable-tftp," -PACKAGECONFIG[threaded-resolver] = "--enable-threaded-resolver,--disable-threaded-resolver,,,,ares" -PACKAGECONFIG[verbose] = "--enable-verbose,--disable-verbose" -PACKAGECONFIG[zlib] = "--with-zlib=${STAGING_LIBDIR}/../,--without-zlib,zlib" - -EXTRA_OECONF = " \ - --disable-libcurl-option \ - --disable-ntlm-wb \ - --enable-crypto-auth \ - --with-ca-bundle=${sysconfdir}/ssl/certs/ca-certificates.crt \ - --without-libmetalink \ - --without-libpsl \ - --enable-debug \ - --enable-optimize \ - --disable-curldebug \ -" - -do_install_append_class-target() { - # cleanup buildpaths from curl-config - sed -i \ - -e 's,--sysroot=${STAGING_DIR_TARGET},,g' \ - -e 's,--with-libtool-sysroot=${STAGING_DIR_TARGET},,g' \ - -e 's|${DEBUG_PREFIX_MAP}||g' \ - ${D}${bindir}/curl-config -} - -PACKAGES =+ "lib${BPN}" - -FILES_lib${BPN} = "${libdir}/lib*.so.*" -RRECOMMENDS_lib${BPN} += "ca-certificates" - -FILES_${PN} += "${datadir}/zsh" - -inherit multilib_script -MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/curl-config" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/curl/curl_7.71.1.bb b/poky/meta/recipes-support/curl/curl_7.71.1.bb new file mode 100644 index 000000000..f028f1fdd --- /dev/null +++ b/poky/meta/recipes-support/curl/curl_7.71.1.bb @@ -0,0 +1,83 @@ +SUMMARY = "Command line tool and library for client-side URL transfers" +HOMEPAGE = "http://curl.haxx.se/" +BUGTRACKER = "http://curl.haxx.se/mail/list.cgi?list=curl-tracker" +SECTION = "console/network" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=2e9fb35867314fe31c6a4977ef7dd531" + +SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \ + file://0001-replace-krb5-config-with-pkg-config.patch \ +" + +SRC_URI[sha256sum] = "9d52a4d80554f9b0d460ea2be5d7be99897a1a9f681ffafe739169afd6b4f224" + +CVE_PRODUCT = "curl libcurl" +inherit autotools pkgconfig binconfig multilib_header + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib" +PACKAGECONFIG_class-native = "ipv6 proxy ssl threaded-resolver verbose zlib" +PACKAGECONFIG_class-nativesdk = "ipv6 proxy ssl threaded-resolver verbose zlib" + +# 'ares' and 'threaded-resolver' are mutually exclusive +PACKAGECONFIG[ares] = "--enable-ares,--disable-ares,c-ares,,,threaded-resolver" +PACKAGECONFIG[brotli] = "--with-brotli,--without-brotli,brotli" +PACKAGECONFIG[builtinmanual] = "--enable-manual,--disable-manual" +PACKAGECONFIG[dict] = "--enable-dict,--disable-dict," +PACKAGECONFIG[gnutls] = "--with-gnutls,--without-gnutls,gnutls" +PACKAGECONFIG[gopher] = "--enable-gopher,--disable-gopher," +PACKAGECONFIG[imap] = "--enable-imap,--disable-imap," +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +PACKAGECONFIG[krb5] = "--with-gssapi,--without-gssapi,krb5" +PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap," +PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps," +PACKAGECONFIG[libidn] = "--with-libidn2,--without-libidn2,libidn2" +PACKAGECONFIG[libssh2] = "--with-libssh2,--without-libssh2,libssh2" +PACKAGECONFIG[mbedtls] = "--with-mbedtls=${STAGING_DIR_TARGET},--without-mbedtls,mbedtls" +PACKAGECONFIG[mqtt] = "--enable-mqtt,--disable-mqtt," +PACKAGECONFIG[nghttp2] = "--with-nghttp2,--without-nghttp2,nghttp2" +PACKAGECONFIG[pop3] = "--enable-pop3,--disable-pop3," +PACKAGECONFIG[proxy] = "--enable-proxy,--disable-proxy," +PACKAGECONFIG[rtmpdump] = "--with-librtmp,--without-librtmp,rtmpdump" +PACKAGECONFIG[rtsp] = "--enable-rtsp,--disable-rtsp," +PACKAGECONFIG[smb] = "--enable-smb,--disable-smb," +PACKAGECONFIG[smtp] = "--enable-smtp,--disable-smtp," +PACKAGECONFIG[ssl] = "--with-ssl --with-random=/dev/urandom,--without-ssl,openssl" +PACKAGECONFIG[nss] = "--with-nss,--without-nss,nss" +PACKAGECONFIG[telnet] = "--enable-telnet,--disable-telnet," +PACKAGECONFIG[tftp] = "--enable-tftp,--disable-tftp," +PACKAGECONFIG[threaded-resolver] = "--enable-threaded-resolver,--disable-threaded-resolver,,,,ares" +PACKAGECONFIG[verbose] = "--enable-verbose,--disable-verbose" +PACKAGECONFIG[zlib] = "--with-zlib=${STAGING_LIBDIR}/../,--without-zlib,zlib" + +EXTRA_OECONF = " \ + --disable-libcurl-option \ + --disable-ntlm-wb \ + --enable-crypto-auth \ + --with-ca-bundle=${sysconfdir}/ssl/certs/ca-certificates.crt \ + --without-libmetalink \ + --without-libpsl \ + --enable-debug \ + --enable-optimize \ + --disable-curldebug \ +" + +do_install_append_class-target() { + # cleanup buildpaths from curl-config + sed -i \ + -e 's,--sysroot=${STAGING_DIR_TARGET},,g' \ + -e 's,--with-libtool-sysroot=${STAGING_DIR_TARGET},,g' \ + -e 's|${DEBUG_PREFIX_MAP}||g' \ + ${D}${bindir}/curl-config +} + +PACKAGES =+ "lib${BPN}" + +FILES_lib${BPN} = "${libdir}/lib*.so.*" +RRECOMMENDS_lib${BPN} += "ca-certificates" + +FILES_${PN} += "${datadir}/zsh" + +inherit multilib_script +MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/curl-config" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_147.bb b/poky/meta/recipes-support/diffoscope/diffoscope_147.bb deleted file mode 100644 index 3db38fa32..000000000 --- a/poky/meta/recipes-support/diffoscope/diffoscope_147.bb +++ /dev/null @@ -1,17 +0,0 @@ -SUMMARY = "in-depth comparison of files, archives, and directories" -HOMEPAGE = "https://diffoscope.org/" -LICENSE = "GPL-3.0+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -PYPI_PACKAGE = "diffoscope" - -inherit pypi setuptools3 - -SRC_URI[sha256sum] = "5b5fdaa4d900c891b319e4b9a006eb16824a0b61fdbe15e75acbbe35602e2da6" - -RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" - -# Dependencies don't build for musl -COMPATIBLE_HOST_libc-musl = 'null' - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_150.bb b/poky/meta/recipes-support/diffoscope/diffoscope_150.bb new file mode 100644 index 000000000..58bd60a2c --- /dev/null +++ b/poky/meta/recipes-support/diffoscope/diffoscope_150.bb @@ -0,0 +1,17 @@ +SUMMARY = "in-depth comparison of files, archives, and directories" +HOMEPAGE = "https://diffoscope.org/" +LICENSE = "GPL-3.0+" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +PYPI_PACKAGE = "diffoscope" + +inherit pypi setuptools3 + +SRC_URI[sha256sum] = "0b9e4ae401fe4cbb8ce89b0bcabe608581d0ed53b91a28f18337179c7494af57" + +RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" + +# Dependencies don't build for musl +COMPATIBLE_HOST_libc-musl = 'null' + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/iso-codes/iso-codes_4.5.0.bb b/poky/meta/recipes-support/iso-codes/iso-codes_4.5.0.bb index 6f9868ffe..9d02f5c79 100644 --- a/poky/meta/recipes-support/iso-codes/iso-codes_4.5.0.bb +++ b/poky/meta/recipes-support/iso-codes/iso-codes_4.5.0.bb @@ -5,7 +5,7 @@ BUGTRACKER = "https://salsa.debian.org/iso-codes-team/iso-codes/issues" LICENSE = "LGPLv2.1" LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" -SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=http" +SRC_URI = "git://salsa.debian.org/iso-codes-team/iso-codes.git;protocol=http;branch=main;" SRCREV = "a36019e5014bff251f83d522ddcfebaecf52afd3" # inherit gettext cannot be used, because it adds gettext-native to BASEDEPENDS which diff --git a/poky/meta/recipes-support/libcheck/libcheck_0.14.0.bb b/poky/meta/recipes-support/libcheck/libcheck_0.14.0.bb deleted file mode 100644 index a88f009cd..000000000 --- a/poky/meta/recipes-support/libcheck/libcheck_0.14.0.bb +++ /dev/null @@ -1,29 +0,0 @@ -SUMMARY = "Check - unit testing framework for C code" -HOMEPAGE = "https://libcheck.github.io/check/" -SECTION = "devel" - -LICENSE = "LGPLv2.1+" -LIC_FILES_CHKSUM = "file://COPYING.LESSER;md5=2d5025d4aa3495befef8f17206a5b0a1" - -SRC_URI = "https://github.com/${BPN}/check/releases/download/${PV}/check-${PV}.tar.gz \ - file://not-echo-compiler-info-to-check_stdint.h.patch" -SRC_URI[md5sum] = "270e82a445be6026040267a5e11cc94b" -SRC_URI[sha256sum] = "bd0f0ca1be65b70238b32f8e9fe5d36dc2fbf7a759b7edf28e75323a7d74f30b" -UPSTREAM_CHECK_URI = "https://github.com/libcheck/check/releases/" - -S = "${WORKDIR}/check-${PV}" - -inherit autotools pkgconfig texinfo - -CACHED_CONFIGUREVARS += "ac_cv_path_AWK_PATH=${bindir}/gawk" - -RREPLACES_${PN} = "check (<= 0.9.5)" - -BBCLASSEXTEND = "native nativesdk" - -PACKAGES =+ "checkmk" - -FILES_checkmk = "${bindir}/checkmk" - -RDEPENDS_checkmk = "gawk" - diff --git a/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb b/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb new file mode 100644 index 000000000..177c0edc8 --- /dev/null +++ b/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb @@ -0,0 +1,28 @@ +SUMMARY = "Check - unit testing framework for C code" +HOMEPAGE = "https://libcheck.github.io/check/" +SECTION = "devel" + +LICENSE = "LGPLv2.1+" +LIC_FILES_CHKSUM = "file://COPYING.LESSER;md5=2d5025d4aa3495befef8f17206a5b0a1" + +SRC_URI = "https://github.com/${BPN}/check/releases/download/${PV}/check-${PV}.tar.gz \ + file://not-echo-compiler-info-to-check_stdint.h.patch" +SRC_URI[sha256sum] = "aea2e3c68fa6e1e92378e744b1c0db350ccda4b6bd0d19530d0ae185b3d1ac60" +UPSTREAM_CHECK_URI = "https://github.com/libcheck/check/releases/" + +S = "${WORKDIR}/check-${PV}" + +inherit autotools pkgconfig texinfo + +CACHED_CONFIGUREVARS += "ac_cv_path_AWK_PATH=${bindir}/gawk" + +RREPLACES_${PN} = "check (<= 0.9.5)" + +BBCLASSEXTEND = "native nativesdk" + +PACKAGES =+ "checkmk" + +FILES_checkmk = "${bindir}/checkmk" + +RDEPENDS_checkmk = "gawk" + diff --git a/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch b/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch new file mode 100644 index 000000000..20610bee5 --- /dev/null +++ b/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch @@ -0,0 +1,43 @@ +From 6efe006e99a7e739afbf7fe8937445c82630fc8f Mon Sep 17 00:00:00 2001 +From: David Michael +Date: Mon, 1 Jun 2020 10:24:53 -0400 +Subject: [PATCH] build: Fix cross-compiling into a separate build dir. + +* configure.ac: Create the src directory before writing into it. +* src/Makefile.am (EXTRA_DIST): Add gen-lock-obj.sh. + +-- + +Upstream-Status: Backport +Signed-off-by: David Michael +Signed-off-by: Alexander Kanavin +--- + configure.ac | 1 + + src/Makefile.am | 2 +- + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/configure.ac b/configure.ac +index def8bba..8c0d845 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -598,6 +598,7 @@ if test x$cross_compiling = xyes; then + case $host in + *-*-linux*) + lock_obj_h_generated=yes ++ mkdir src + LOCK_ABI_VERSION=1 host=$host host_alias=$host_alias \ + CC=$CC OBJDUMP=$host_alias-objdump \ + ac_ext=$ac_ext ac_objext=$ac_objext \ +diff --git a/src/Makefile.am b/src/Makefile.am +index 2fb83c0..d773877 100644 +--- a/src/Makefile.am ++++ b/src/Makefile.am +@@ -102,7 +102,7 @@ EXTRA_DIST = mkstrtable.awk err-sources.h.in err-codes.h.in \ + gpg-error.vers gpg-error.def.in \ + versioninfo.rc.in gpg-error.w32-manifest.in \ + gpg-error-config-test.sh gpg-error.pc.in \ +- $(lock_obj_pub) ++ gen-lock-obj.sh $(lock_obj_pub) + + BUILT_SOURCES = $(srcdir)/err-sources.h $(srcdir)/err-codes.h \ + code-to-errno.h code-from-errno.h \ diff --git a/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch b/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch new file mode 100644 index 000000000..e6f6c09ba --- /dev/null +++ b/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch @@ -0,0 +1,134 @@ +From fcb414abb62223e66dba413d0ca86eab3ea5bbc3 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Sun, 21 Jun 2020 13:54:47 +0000 +Subject: [PATCH] src-gen-lock-obj.sh: add a file + +This is erroneously missing from the tarball; it will show +up in the next release tarball, as upstream has fixed the +packaging in master. + +Upstream-Status: Inappropriate +Signed-off-by: Alexander Kanavin +--- + src/gen-lock-obj.sh | 112 ++++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 112 insertions(+) + create mode 100755 src/gen-lock-obj.sh + +diff --git a/src/gen-lock-obj.sh b/src/gen-lock-obj.sh +new file mode 100755 +index 0000000..13858cf +--- /dev/null ++++ b/src/gen-lock-obj.sh +@@ -0,0 +1,112 @@ ++#! /bin/sh ++# ++# gen-lock-obj.sh - Build tool to construct the lock object. ++# ++# Copyright (C) 2020 g10 Code GmbH ++# ++# This file is part of libgpg-error. ++# ++# libgpg-error is free software; you can redistribute it and/or ++# modify it under the terms of the GNU Lesser General Public License ++# as published by the Free Software Foundation; either version 2.1 of ++# the License, or (at your option) any later version. ++# ++# libgpg-error is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# Lesser General Public License for more details. ++# ++# You should have received a copy of the GNU Lesser General Public ++# License along with this program; if not, see . ++# ++ ++# ++# Following variables should be defined to invoke this script ++# ++# CC ++# OBJDUMP ++# AWK ++# ac_ext ++# ac_object ++# host ++# LOCK_ABI_VERSION ++# ++# An example: ++# ++# LOCK_ABI_VERSION=1 host=x86_64-pc-linux-gnu host_alias=x86_64-linux-gnu \ ++# CC=$host_alias-gcc OBJDUMP=$host_alias-objdump ac_ext=c ac_objext=o \ ++# AWK=gawk ./gen-lock-obj.sh ++# ++ ++AWK_VERSION_OUTPUT=$($AWK 'BEGIN { print PROCINFO["version"] }') ++if test -n "$AWK_VERSION_OUTPUT"; then ++ # It's GNU awk, which supports PROCINFO. ++ AWK_OPTION=--non-decimal-data ++fi ++ ++cat <<'EOF' >conftest.$ac_ext ++#include ++pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; ++EOF ++ ++if $CC -c conftest.$ac_ext; then : ++ ac_mtx_size=$($OBJDUMP -j .bss -t conftest.$ac_objext \ ++ | $AWK $AWK_OPTION ' ++/mtx$/ { mtx_size = int("0x" $5) } ++END { print mtx_size }') ++else ++ echo "Can't determine mutex size" ++ exit 1 ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ ++cat < Date: Fri, 21 Aug 2020 15:58:33 -0500 Subject: poky: subtree update:23deb29c1b..c67f57c09e Adrian Bunk (1): librsvg: Upgrade 2.40.20 -> 2.40.21 Alejandro Hernandez (1): musl: Upgrade to latest release 1.2.1 Alex Kiernan (8): systemd: Upgrade v245.6 -> v246 systemd: Move musl patches to SRC_URI_MUSL systemd: Fix path to modules-load.d et al nfs-utils: Drop StandardError=syslog from systemd unit openssh: Drop StandardError=syslog from systemd unit volatile-binds: Drop StandardOutput=syslog from systemd unit systemd: Upgrade v246 -> v246.1 systemd: Upgrade v246.1 -> v246.2 Alexander Kanavin (16): sysvinit: update 2.96 -> 2.97 kbd: update 2.2.0 -> 2.3.0 gnu-config: update to latest revision go: update 1.14.4 -> 1.14.6 meson: update 0.54.3 -> 0.55.0 nasm: update 2.14.02 -> 2.15.03 glib-2.0: correct build with latest meson rsync: update 3.2.1 -> 3.2.2 vala: update 0.48.6 -> 0.48.7 logrotate: update 3.16.0 -> 3.17.0 mesa: update 20.1.2 -> 20.1.4 libcap: update 2.36 -> 2.41 net-tools: fix upstream version check meson.bbclass: add a cups-config entry oeqa: write @OETestTag content into json test reports for each case libhandy: upstream has moved to gnome Alistair Francis (1): binutils: Remove RISC-V PIE patch Andrei Gherzan (2): initscripts: Fix various shellcheck warnings in populate-volatile.sh initscripts: Fix populate-volatile.sh bug when file/dir exists Anuj Mittal (4): harfbuzz: upgrade 2.6.8 -> 2.7.1 sqlite3: upgrade 3.32.3 -> 3.33.0 stress-ng: upgrade 0.11.17 -> 0.11.18 x264: upgrade to latest revision Armin Kuster (1): glibc: Secruity fix for CVE-2020-6096 Bruce Ashfield (25): linux-yocto/5.4: update to v5.4.53 linux-yocto/5.4: fix perf build with binutils 2.35 kernel/yocto: allow dangling KERNEL_FEATURES linux-yocto/5.4: update to v5.4.54 systemtap: update to 4.3 latest kernel-devsrc: fix x86 (32bit) on target module build lttng-modules: update to 2.12.2 (fixes v5.8+ builds) yocto-bsps: update reference BSPs to 5.4.54 kernel-yocto: enhance configuration queue analysis capabilities strace: update to 5.8 (fix build against v5.8 uapi headers) linux-yocto-rt/5.4: update to rt32 linux-yocto/5.4: update to v5.4.56 linux-yocto/5.4: update to v5.4.57 kernel-yocto: set cwd before querying the meta data dir kernel-yocto: make # is not set matching more precise kernel-yocto: split meta data gathering into patch and config phases make-mod-scripts: add HOSTCXX definitions and gmp-native dependency kernel-devsrc: fix on target modules prepare for ARM kernel-devsrc: 5.8 + gcc10 require gcc-plugins + libmpc-dev linux-yocto/5.4: update to v5.4.58 linux-yocto/5.4: perf cs-etm: Move definition of 'traceid_list' global variable from header file libc-headers: update to v5.8 linux-yocto: introduce 5.8 reference kernel kernel-yocto/5.8: add gmp-native dependency linux-yocto/5.8: update to v5.8.1 Chandana kalluri (1): qemu.inc: Use virtual/libgl instead of mesa Changhyeok Bae (2): iproute2: upgrade 5.7.0 -> 5.8.0 ethtool: upgrade 5.7 -> 5.8 Changqing Li (5): layer.conf: fix adwaita-icon-theme signature change problem gtk-icon-cache.bbclass: add features_check gcc-runtime.inc: fix m32 compile fail with x86-64 compiler libffi: fix multilib header conflict gpgme: fix multilib header conflict Chen Qi (3): grub: set CVE_PRODUCT to grub2 runqemu: fix permission check of /dev/vhost-net fribidi: extend CVE_PRODUCT to include fribidi Chris Laplante (11): lib/oe/log_colorizer.py: add LogColorizerProxyProgressHandler bitbake: build: print traceback if progress handler can't be created bitbake: build: create_progress_handler: fix calling 'get' on NoneType bitbake: progress: modernize syntax, format bitbake: progress: fix hypothetical NameError if 'progress' isn't set bitbake: progress: filter ANSI escape codes before looking for progress text bitbake: tests/color: add test suite for ANSI color code filtering bitbake: data: emit filename/lineno information for shell functions bitbake: build: print a backtrace when a Bash shell function fails bitbake: build: print a backtrace with the original metadata locations of Bash shell funcs bitbake: build: make shell traps less chatty when 'bitbake -v' is used Dan Callaghan (1): stress-ng: create a symlink for /usr/bin/stress Daniel Ammann (1): wic: fix typo Daniel Gomez (1): allarch: Add missing allarch ttf-bitstream-vera Diego Sueiro (1): cml1: Add the option to choose the .config root dir Dmitry Baryshkov (3): mesa: enable freedreno Vulkan driver if freedreno is enabled arch-armv8-2a.inc: add tune include for armv8.2a tune-cortexa55.inc: switch to using armv8.2a include file Fredrik Gustafsson (13): package_manager: Move to package_manager/__init__.py rpm: Move manifest to its own subdir ipk: Move ipk manifest to its own subdir deb: Move deb manifest to its own subdir rpm: Move rootfs to its own dir ipk: Move rootfs to its own dir deb: Move rootfs to its own dir rpm: Move sdk to its own dir ipk: Move sdk to its own dir deb: Move sdk to its own dir rpm: Move package manager to its own dir ipk: Move package manager to its own dir deb: Move package manager to its own dir Guillaume Champagne (1): weston: add missing packageconfigs Jeremy Puhlman (1): gobject-introspection: disable scanner caching in install Joe Slater (3): libdnf: allow reproducible binary builds gconf: use python3 gcr: make sure gcr-oids.h is generated Jonathan Richardson (1): cortex-m0plus.inc: Add tuning for cortex M0 plus Joshua Watt (3): bitbake: bitbake: command: Handle multiconfig in findSigInfo lib/oe/reproducible.py: Fix git HEAD check perl: Add check for non-arch Storable.pm file Khasim Mohammed (2): wic/bootimg-efi: Add support for IMAGE_BOOT_FILES wic/bootimg-efi: Update docs for IMAGE_BOOT_FILES support in bootimg-efi Khem Raj (23): qemumips: Use 34Kf CPU emulation libunwind: Backport a fix for -fno-common option to compile dhcp: Use -fcommon compiler option inetutils: Fix build with -fno-common libomxil: Use -fcommon compiler option kexec-tools: Fix build with -fno-common distcc: Fix build with -fno-common libacpi: Fix build with -fno-common minicom: Fix build when using -fno-common binutils: Upgrade to 2.35 release xf86-video-intel: Fix build with -fno-common glibc: Upgrade to 2.32 release go: Upgrade to 1.14.7 webkitgtk: Upgrade to 2.28.4 kexec-tools: Fix additional duplicate symbols on aarch64/x86_64 builds gcc: Upgrade to 10.2.0 buildcpio.py: Apply patch to fix build with -fno-common buildgalculator: Patch to fix build with -fno-common localedef: Update to include floatn.h fix xserver-xorg: Fix build with -fno-common/mips binutils: Let crosssdk gold linker generate 4096 btyes long .interp section gcc-cross-canadian: Correct the regexp to delete versioned gcc binary curl: Upgrade to 7.72.0 Konrad Weihmann (2): rootfs-post: remove traling blanks from tasks cve-update: handle baseMetricV2 as optional Lee Chee Yang (4): buildhistory: use pid for temporary txt file name checklayer: check layer in BBLAYERS before test ghostscript: fix CVE-2020-15900 qemu : fix CVE-2020-15863 Mark Hatle (1): package.bbclass: Sort shlib2 output for hash equivalency Martin Jansa (2): net-tools: upgrade to latest revision in upstream repo instead of old debian snapshot perf: backport a fix for confusing non-fatal error Matt Madison (1): cogl-1.0: correct X11 dependencies Matthew (3): ltp: remove --with-power-management-testsuite from EXTRA_OECONF ltp: remove OOM tests from runtest/mm ltp: make copyFrom scp command non-fatal Mikko Rapeli (2): alsa-topology-conf: use ${datadir} in do_install() alsa-ucm-conf: use ${datadir} in do_install() Ming Liu (3): conf/machine: set UBOOT_MACHINE for qemumips and qemumips64 multilib.conf: add u-boot to NON_MULTILIB_RECIPES libubootenv: uprev to v0.3 Mingli Yu (2): ccache: Upgrade to 3.7.11 Revert "python3: define a profile directory path" Naoto Yamaguchi (1): patch.py: Change to more strictly fuzz detection Nathan Rossi (4): libexif: Enable native and nativesdk cmake.bbclass: Rework compiler program variables for allarch python3: Improve handling of python3 manifest generation python3-manifest.json: Updates Oleksandr Kravchuk (9): python3-setuptools: update to 49.2.0 bash-completion: update to 2.11 python3: update to 3.8.5 re2c: update to 2.0 diffoscope: update to 153 json-c: update to 0.15 git: update 2.28.0 libwpe: update to 1.7.1 python3-setuptools: update to 49.3.1 Richard Purdie (20): perl: Avoid race continually rebuilding miniperl gcc: Fix mangled patch bitbake: server/process: Fix UI first connection tracking bitbake: server/process: Account for xmlrpc connections Revert "lib/oe/log_colorizer.py: add LogColorizerProxyProgressHandler" lib/package_manager: Fix missing imports populate_sdk_ext: Ensure buildtools doesn't corrupt OECORE_NATIVE_SYSROOT buildtools: Handle generic environment setup injection uninative: Handle PREMIRRORS generically maintainers: Update entries for Mark Hatle gcr: Fix patch Upstream-Status from v2 patch bitbake: server/process: Remove pointless process forking bitbake: server/process: Simplfy idle callback handler function bitbake: server/process: Pass timeout/xmlrpc parameters directly to the server bitbake: server/process: Add extra logfile flushing packagefeed-stability: Remove as obsolete build-compare: Drop recipe qemu: Upgrade 5.0.0 -> 5.1.0 selftest/tinfoil: Increase wait event timeout lttng-tools: upgrade 2.12.1 -> 2.12.2 Ross Burton (3): popt: upgrade to 1.18 conf/machine: set UBOOT_MACHINE for qemuarm and qemuarm64 gcc: backport a fix for out-of-line atomics on aarch64 TeohJayShen (2): oeqa/manual/bsp-hw.json : remove shutdown_system test oeqa/manual/bsp-hw.json : remove X_server_can_start_up_with_runlevel_5_boot test Trevor Gamblin (1): llvm: upgrade 9.0.1 -> 10.0.1 Tyler Hicks (1): kernel-devicetree: Fix intermittent build failures caused by DTB builds Usama Arif (3): kernel-fitimage: build configuration for image tree when dtb is not present oeqa/selftest/imagefeatures: Add testcase for fitImage ref-manual: Add documentation for kernel-fitimage Vasyl Vavrychuk (1): runqemu: Check gtk or sdl option is passed together with gl or gl-es options. Yi Zhao (1): pbzip2: extend for nativesdk Zhang Qiang (1): kernel.bbclass: Configuration for environment with HOSTCXX hongxu (1): nativesdk-rpm: adjust RPM_CONFIGDIR paths dynamically zangrc (8): libevdev:upgrade 1.9.0 -> 1.9.1 mpg123:upgrade 1.26.2 -> 1.26.3 flex: Refresh patch stress-ng:upgrade 0.11.15 -> 0.11.17 sudo:upgrade 1.9.1 -> 1.9.2 libcap: Upgrade 2.41 -> 2.42 libinput: Upgrade 1.15.6 -> 1.16.0 python3-setuptools: Upgrade 49.2.0 -> 49.2.1 Signed-off-by: Andrew Geissler Change-Id: Ic7fa1e8484c1c7722a70c75608aa4ab21fa7d755 --- poky/bitbake/bin/bitbake-selftest | 1 + poky/bitbake/lib/bb/build.py | 130 +- poky/bitbake/lib/bb/command.py | 8 +- poky/bitbake/lib/bb/cooker.py | 10 +- poky/bitbake/lib/bb/cookerdata.py | 8 +- poky/bitbake/lib/bb/data.py | 6 + poky/bitbake/lib/bb/process.py | 3 +- poky/bitbake/lib/bb/progress.py | 60 +- poky/bitbake/lib/bb/server/process.py | 50 +- poky/bitbake/lib/bb/tests/color.py | 95 + poky/documentation/bsp-guide/bsp.xml | 2 +- poky/documentation/ref-manual/ref-classes.xml | 76 +- poky/documentation/ref-manual/ref-variables.xml | 144 +- .../recipes-kernel/linux/linux-yocto_5.4.bbappend | 16 +- poky/meta/classes/buildhistory.bbclass | 11 +- poky/meta/classes/cmake.bbclass | 36 +- poky/meta/classes/cml1.bbclass | 18 +- poky/meta/classes/gtk-icon-cache.bbclass | 5 + poky/meta/classes/kernel-devicetree.bbclass | 2 +- poky/meta/classes/kernel-fitimage.bbclass | 29 +- poky/meta/classes/kernel-yocto.bbclass | 256 +- poky/meta/classes/kernel.bbclass | 2 + poky/meta/classes/meson.bbclass | 1 + poky/meta/classes/package.bbclass | 2 +- poky/meta/classes/packagefeed-stability.bbclass | 252 - poky/meta/classes/populate_sdk_ext.bbclass | 3 + poky/meta/classes/rootfs-postcommands.bbclass | 6 +- poky/meta/classes/rootfsdebugfiles.bbclass | 2 +- poky/meta/classes/uninative.bbclass | 13 +- poky/meta/conf/distro/include/distro_alias.inc | 1 - poky/meta/conf/distro/include/maintainers.inc | 13 +- .../conf/distro/include/ptest-packagelists.inc | 1 - poky/meta/conf/distro/include/tcmode-default.inc | 10 +- poky/meta/conf/layer.conf | 3 + poky/meta/conf/machine/include/arm/arch-armv6m.inc | 19 + .../conf/machine/include/arm/arch-armv8-2a.inc | 19 + .../conf/machine/include/tune-cortex-m0plus.inc | 11 + poky/meta/conf/machine/include/tune-cortexa55.inc | 2 +- poky/meta/conf/machine/qemuarm.conf | 2 + poky/meta/conf/machine/qemuarm64.conf | 2 + poky/meta/conf/machine/qemumips.conf | 4 + poky/meta/conf/machine/qemumips64.conf | 2 + poky/meta/conf/multilib.conf | 2 +- poky/meta/lib/oe/manifest.py | 144 +- poky/meta/lib/oe/package_manager.py | 1863 -- poky/meta/lib/oe/package_manager/__init__.py | 550 + poky/meta/lib/oe/package_manager/deb/__init__.py | 492 + poky/meta/lib/oe/package_manager/deb/manifest.py | 26 + poky/meta/lib/oe/package_manager/deb/rootfs.py | 210 + poky/meta/lib/oe/package_manager/deb/sdk.py | 96 + poky/meta/lib/oe/package_manager/ipk/__init__.py | 507 + poky/meta/lib/oe/package_manager/ipk/manifest.py | 73 + poky/meta/lib/oe/package_manager/ipk/rootfs.py | 387 + poky/meta/lib/oe/package_manager/ipk/sdk.py | 96 + poky/meta/lib/oe/package_manager/rpm/__init__.py | 404 + poky/meta/lib/oe/package_manager/rpm/manifest.py | 54 + poky/meta/lib/oe/package_manager/rpm/rootfs.py | 148 + poky/meta/lib/oe/package_manager/rpm/sdk.py | 114 + poky/meta/lib/oe/patch.py | 2 +- poky/meta/lib/oe/reproducible.py | 4 +- poky/meta/lib/oe/rootfs.py | 618 +- poky/meta/lib/oe/sdk.py | 285 +- poky/meta/lib/oeqa/core/runner.py | 14 + poky/meta/lib/oeqa/core/target/ssh.py | 7 +- poky/meta/lib/oeqa/manual/bsp-hw.json | 44 - poky/meta/lib/oeqa/runtime/cases/buildcpio.py | 3 +- poky/meta/lib/oeqa/runtime/cases/ltp.py | 5 +- poky/meta/lib/oeqa/sdk/cases/buildcpio.py | 1 + poky/meta/lib/oeqa/sdk/cases/buildgalculator.py | 2 +- poky/meta/lib/oeqa/selftest/cases/imagefeatures.py | 74 + poky/meta/lib/oeqa/selftest/cases/tinfoil.py | 5 +- poky/meta/lib/oeqa/utils/package_manager.py | 4 +- poky/meta/recipes-bsp/grub/grub2.inc | 2 + .../0001-libacpi-Fix-build-witth-fno-commom.patch | 68 + poky/meta/recipes-bsp/libacpi/libacpi_0.2.bb | 4 +- poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb | 27 - poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb | 30 + poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb | 2 + ...d-Fix-multiple-definitions-of-errcatch-an.patch | 58 + .../inetutils/inetutils_1.9.4.bb | 1 + .../0001-devlink.c-add-missing-include.patch | 13 +- .../0001-libc-compat.h-add-musl-workaround.patch | 10 +- .../iproute2/iproute2_5.7.0.bb | 12 - .../iproute2/iproute2_5.8.0.bb | 12 + .../nfs-utils/nfs-utils/nfs-server.service | 1 - .../openssh/openssh/sshd@.service | 1 - .../glib-2.0/glib-2.0/meson.cross.d/common-linux | 2 +- .../glibc/cross-localedef-native_2.31.bb | 52 - .../glibc/cross-localedef-native_2.32.bb | 50 + poky/meta/recipes-core/glibc/glibc-common.inc | 2 +- poky/meta/recipes-core/glibc/glibc-locale_2.31.bb | 1 - poky/meta/recipes-core/glibc/glibc-locale_2.32.bb | 1 + poky/meta/recipes-core/glibc/glibc-mtrace_2.31.bb | 1 - poky/meta/recipes-core/glibc/glibc-mtrace_2.32.bb | 1 + poky/meta/recipes-core/glibc/glibc-scripts_2.31.bb | 1 - poky/meta/recipes-core/glibc/glibc-scripts_2.32.bb | 1 + .../recipes-core/glibc/glibc-testsuite_2.31.bb | 63 - .../recipes-core/glibc/glibc-testsuite_2.32.bb | 63 + poky/meta/recipes-core/glibc/glibc-version.inc | 8 +- ...def-Add-hardlink-resolver-from-util-linux.patch | 7 +- ...-localedef-Add-hardlink-resolver-to-build.patch | 52 - ...ledef-fix-ups-hardlink-to-make-it-compile.patch | 7 +- ...libc-Look-for-host-system-ld.so.cache-as-.patch | 14 +- ...libc-Fix-buffer-overrun-with-a-relocated-.patch | 12 +- ...libc-Raise-the-size-of-arrays-containing-.patch | 28 +- ...ivesdk-glibc-Allow-64-bit-atomics-for-x86.patch | 7 +- ...libc-Make-relocatable-install-for-locales.patch | 60 +- ...500-e5500-e6500-603e-fsqrt-implementation.patch | 7 +- ...-Fix-undefined-reference-to-__sqrt_finite.patch | 208 + ...-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch | 29 - ...qrt-f-are-now-inline-functions-and-call-o.patch | 387 + ...-Fix-undefined-reference-to-__sqrt_finite.patch | 205 - ...bug-1443-which-explains-what-the-patch-do.patch | 62 + ...qrt-f-are-now-inline-functions-and-call-o.patch | 384 - ...bug-1443-which-explains-what-the-patch-do.patch | 58 - ...n-libm-err-tab.pl-with-specific-dirs-in-S.patch | 36 + ...qrt-f-are-now-inline-functions-and-call-o.patch | 61 + ...n-libm-err-tab.pl-with-specific-dirs-in-S.patch | 33 - ...qrt-f-are-now-inline-functions-and-call-o.patch | 58 - ...-configure.ac-handle-correctly-libc_cv_ro.patch | 42 + ...-configure.ac-handle-correctly-libc_cv_ro.patch | 39 - ...thin-the-path-sets-wrong-config-variables.patch | 263 + .../glibc/glibc/0016-Add-unused-attribute.patch | 31 - ...-timezone-re-written-tzselect-as-posix-sh.patch | 45 + ...move-bash-dependency-for-nscd-init-script.patch | 75 + ...thin-the-path-sets-wrong-config-variables.patch | 260 - ...c-Cross-building-and-testing-instructions.patch | 619 + ...-timezone-re-written-tzselect-as-posix-sh.patch | 42 - ...move-bash-dependency-for-nscd-init-script.patch | 72 - ...019-eglibc-Help-bootstrap-cross-toolchain.patch | 100 + ...c-Cross-building-and-testing-instructions.patch | 616 - ...0020-eglibc-Resolve-__fpscr_values-on-SH4.patch | 56 + ...ward-port-cross-locale-generation-support.patch | 563 + ...021-eglibc-Help-bootstrap-cross-toolchain.patch | 97 - ...0022-Define-DUMMY_LOCALE_T-if-not-defined.patch | 32 + ...0022-eglibc-Resolve-__fpscr_values-on-SH4.patch | 53 - ...ward-port-cross-locale-generation-support.patch | 560 - ...dd-to-archive-uses-a-hard-coded-locale-pa.patch | 84 + ...0024-Define-DUMMY_LOCALE_T-if-not-defined.patch | 29 - ....c-Make-_dl_build_local_scope-breadth-fir.patch | 56 + ...tl-Emit-no-lines-in-bison-generated-files.patch | 34 + ...dd-to-archive-uses-a-hard-coded-locale-pa.patch | 80 - ....c-Make-_dl_build_local_scope-breadth-fir.patch | 53 - .../0026-inject-file-assembly-directives.patch | 240 + ...tl-Emit-no-lines-in-bison-generated-files.patch | 31 - ...ent-maybe-uninitialized-errors-with-Os-BZ.patch | 56 + .../0028-inject-file-assembly-directives.patch | 301 - ...-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch | 33 + ...ent-maybe-uninitialized-errors-with-Os-BZ.patch | 53 - ...-Unify-the-header-between-arm-and-aarch64.patch | 70 + ...erpc-Do-not-ask-compiler-for-finding-arch.patch | 51 + ...-Unify-the-header-between-arm-and-aarch64.patch | 67 - poky/meta/recipes-core/glibc/glibc_2.31.bb | 113 - poky/meta/recipes-core/glibc/glibc_2.32.bb | 113 + .../initscripts-1.0/populate-volatile.sh | 80 +- ...-and-append-i386-to-fix-libkbdfile-test08.patch | 45 - .../0001-analyze.l-add-missing-string-format.patch | 24 - poky/meta/recipes-core/kbd/kbd/fix_cflags.patch | 25 - poky/meta/recipes-core/kbd/kbd/run-ptest | 4 - .../kbd/kbd/set-proper-path-of-resources.patch | 99 - poky/meta/recipes-core/kbd/kbd_2.2.0.bb | 72 - poky/meta/recipes-core/kbd/kbd_2.3.0.bb | 38 + poky/meta/recipes-core/meta/buildtools-tarball.bb | 19 +- .../meta/recipes-core/meta/cve-update-db-native.bb | 13 +- poky/meta/recipes-core/musl/musl_git.bb | 4 +- .../recipes-core/systemd/systemd-boot_245.6.bb | 70 - .../recipes-core/systemd/systemd-boot_246.1.bb | 70 + .../recipes-core/systemd/systemd-conf_245.6.bb | 38 - .../recipes-core/systemd/systemd-conf_246.1.bb | 38 + poky/meta/recipes-core/systemd/systemd.inc | 4 +- .../systemd/0001-Handle-missing-gshadow.patch | 73 +- .../0001-Use-PREFIX-ROOTPREFIX-correctly.patch | 81 + ...t-install-dependency-links-at-install-tim.patch | 34 +- ...01-do-not-disable-buffer-in-writing-files.patch | 198 +- .../0002-don-t-use-glibc-specific-qsort_r.patch | 40 +- ...llback-parse_printf_format-implementation.patch | 52 +- ...asic-missing.h-check-for-missing-strndupa.patch | 175 +- .../systemd/0006-Include-netinet-if_ether.h.patch | 166 +- ...-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch | 32 +- ...-of-__register_atfork-for-non-glibc-build.patch | 10 +- .../0011-Use-uintmax_t-for-handling-rlim_t.patch | 30 +- ...ass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch | 26 +- ...c-compatible-basename-for-non-glibc-syste.patch | 10 +- ...ble-buffering-when-writing-to-oom_score_a.patch | 10 +- ...-XSI-compliant-strerror_r-from-GNU-specif.patch | 20 +- .../systemd/0021-Handle-missing-LOCK_EX.patch | 23 + ...ompatible-pointer-type-struct-sockaddr_un.patch | 37 + ...fi_loader_entry_one_shot_stat-has-incompl.patch | 31 + .../systemd/systemd/CVE-2020-13776.patch | 96 - poky/meta/recipes-core/systemd/systemd_245.6.bb | 715 - poky/meta/recipes-core/systemd/systemd_246.2.bb | 719 + .../recipes-core/sysvinit/sysvinit/crypt-lib.patch | 16 +- .../recipes-core/sysvinit/sysvinit/install.patch | 7 +- poky/meta/recipes-core/sysvinit/sysvinit_2.96.bb | 114 - poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb | 113 + .../volatile-binds/files/volatile-binds.service.in | 1 - .../recipes-devtools/binutils/binutils-2.34.inc | 47 - .../recipes-devtools/binutils/binutils-2.35.inc | 45 + .../binutils/binutils-cross-canadian_2.34.bb | 3 - .../binutils/binutils-cross-canadian_2.35.bb | 3 + .../binutils/binutils-cross-testsuite_2.34.bb | 85 - .../binutils/binutils-cross-testsuite_2.35.bb | 85 + .../binutils/binutils-cross_2.34.bb | 3 - .../binutils/binutils-cross_2.35.bb | 3 + .../binutils/binutils-crosssdk_2.34.bb | 13 - .../binutils/binutils-crosssdk_2.35.bb | 13 + ...nutils-crosssdk-Generate-relocatable-SDKs.patch | 31 +- ...oss-Do-not-generate-linker-script-directo.patch | 10 +- ...tivesdk-Search-for-alternative-ld.so.conf.patch | 29 +- ...ure-widen-the-regexp-for-SH-architectures.patch | 15 +- .../0005-Point-scripts-location-to-libdir.patch | 15 +- ...te-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch | 14 +- .../binutils/binutils/0007-Use-libtool-2.4.patch | 21234 ----------------- ...he-distro-compiler-point-to-the-wrong-ins.patch | 35 + ...he-distro-compiler-point-to-the-wrong-ins.patch | 32 - ...es-of-system-directories-when-cross-linki.patch | 287 + ...Change-default-emulation-for-mips64-linux.patch | 60 + ...es-of-system-directories-when-cross-linki.patch | 269 - .../0010-Add-support-for-Netlogic-XLP.patch | 409 + ...-rpath-in-libtool-when-sysroot-is-enabled.patch | 49 - ...Change-default-emulation-for-mips64-linux.patch | 57 - ...ncorrect-assembling-for-ppc-wait-mnemonic.patch | 37 + .../0012-Add-support-for-Netlogic-XLP.patch | 406 - .../binutils/0012-Detect-64-bit-MIPS-targets.patch | 50 + .../binutils/binutils/0013-Use-libtool-2.4.patch | 23153 +++++++++++++++++++ ...ncorrect-assembling-for-ppc-wait-mnemonic.patch | 33 - .../binutils/0014-Detect-64-bit-MIPS-targets.patch | 47 - ...-rpath-in-libtool-when-sysroot-is-enabled.patch | 52 + .../0015-sync-with-OE-libtool-changes.patch | 9 +- ...eck-for-clang-before-checking-gcc-version.patch | 14 +- ...op-redundant-program_name-definition-fno-.patch | 61 - .../binutils/binutils/CVE-2020-0551.patch | 549 - .../recipes-devtools/binutils/binutils_2.34.bb | 65 - .../recipes-devtools/binutils/binutils_2.35.bb | 65 + .../build-compare/build-compare_git.bb | 29 - poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb | 32 - poky/meta/recipes-devtools/ccache/ccache_3.7.11.bb | 32 + poky/meta/recipes-devtools/distcc/distcc_3.3.3.bb | 1 + ...ith-gcc-10-which-defaults-to-fno-common-c.patch | 34 + .../recipes-devtools/flex/flex/check-funcs.patch | 36 +- poky/meta/recipes-devtools/gcc/gcc-10.1.inc | 121 - ...-Straight-Line-Speculation-SLS-mitigation.patch | 202 - .../0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch | 39 - ...roduce-SLS-mitigation-for-RET-and-BR-inst.patch | 607 - .../0002-gcc-poison-system-directories.patch | 200 - ...-aarch64-Mitigate-SLS-for-BLR-instruction.patch | 658 - .../0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch | 70 - .../gcc/gcc-10.1/0004-64-bit-multilib-hack.patch | 116 - .../gcc/gcc-10.1/0005-optional-libstdc.patch | 122 - .../gcc/gcc-10.1/0006-COLLECT_GCC_OPTIONS.patch | 35 - ...efaults.h-in-B-instead-of-S-and-t-oe-in-B.patch | 92 - .../gcc-10.1/0008-fortran-cross-compile-hack.patch | 43 - .../gcc/gcc-10.1/0009-cpp-honor-sysroot.patch | 51 - .../gcc-10.1/0010-MIPS64-Default-to-N64-ABI.patch | 54 - ...C_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch | 243 - ...0012-gcc-Fix-argument-list-too-long-error.patch | 37 - .../gcc/gcc-10.1/0013-Disable-sdt.patch | 110 - .../gcc/gcc-10.1/0014-libtool.patch | 39 - ...4-pass-fix-v4bx-to-linker-to-support-EABI.patch | 40 - ...tilib-config-files-from-B-instead-of-usin.patch | 99 - ...-libdir-from-.la-which-usually-points-to-.patch | 28 - .../gcc/gcc-10.1/0018-export-CPP.patch | 50 - ...Ensure-target-gcc-headers-can-be-included.patch | 57 - ...h-host-directory-during-relink-if-inst_pr.patch | 35 - ...IBS_DIR-replacement-instead-of-hardcoding.patch | 26 - .../0022-aarch64-Add-support-for-musl-ldso.patch | 25 - ...ibcc1-fix-libcc1-s-install-path-and-rpath.patch | 51 - ...-handle-sysroot-support-for-nativesdk-gcc.patch | 346 - ...et-sysroot-gcc-version-specific-dirs-with.patch | 99 - ...-various-_FOR_BUILD-and-related-variables.patch | 134 - .../0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch | 25 - ...shared-to-link-commandline-for-musl-targe.patch | 84 - ...bgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch | 26 - .../0030-sync-gcc-stddef.h-with-musl.patch | 88 - ...ation-fault-in-precompiled-header-generat.patch | 57 - .../gcc-10.1/0032-Fix-for-testsuite-failure.patch | 255 - ...0033-Re-introduce-spe-commandline-options.patch | 38 - ...e-alias-for-__cpu_indicator_init-instead-.patch | 83 - ...nmodes-Do-not-use-__LINE__-for-maintainin.patch | 182 - ...-Enable-CET-in-cross-compiler-if-possible.patch | 150 - ...37-mingw32-Enable-operation_not_supported.patch | 26 - ...libatomic-Do-not-enforce-march-on-aarch64.patch | 42 - .../recipes-devtools/gcc/gcc-10.1/pr96130.patch | 106 - poky/meta/recipes-devtools/gcc/gcc-10.2.inc | 120 + .../recipes-devtools/gcc/gcc-cross-canadian.inc | 2 +- .../gcc/gcc-cross-canadian_10.1.bb | 5 - .../gcc/gcc-cross-canadian_10.2.bb | 5 + poky/meta/recipes-devtools/gcc/gcc-cross_10.1.bb | 3 - poky/meta/recipes-devtools/gcc/gcc-cross_10.2.bb | 3 + .../meta/recipes-devtools/gcc/gcc-crosssdk_10.1.bb | 2 - .../meta/recipes-devtools/gcc/gcc-crosssdk_10.2.bb | 2 + poky/meta/recipes-devtools/gcc/gcc-runtime.inc | 4 + poky/meta/recipes-devtools/gcc/gcc-runtime_10.1.bb | 2 - poky/meta/recipes-devtools/gcc/gcc-runtime_10.2.bb | 2 + .../recipes-devtools/gcc/gcc-sanitizers_10.1.bb | 7 - .../recipes-devtools/gcc/gcc-sanitizers_10.2.bb | 7 + poky/meta/recipes-devtools/gcc/gcc-source_10.1.bb | 4 - poky/meta/recipes-devtools/gcc/gcc-source_10.2.bb | 4 + ...4-Fix-up-__aarch64_cas16_acq_rel-fallback.patch | 66 + ...-Straight-Line-Speculation-SLS-mitigation.patch | 202 + .../gcc/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch | 39 + ...roduce-SLS-mitigation-for-RET-and-BR-inst.patch | 607 + .../gcc/0002-gcc-poison-system-directories.patch | 200 + ...-aarch64-Mitigate-SLS-for-BLR-instruction.patch | 658 + .../0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch | 70 + .../gcc/gcc/0004-64-bit-multilib-hack.patch | 116 + .../gcc/gcc/0005-optional-libstdc.patch | 122 + .../gcc/gcc/0006-COLLECT_GCC_OPTIONS.patch | 35 + ...efaults.h-in-B-instead-of-S-and-t-oe-in-B.patch | 92 + .../gcc/gcc/0008-fortran-cross-compile-hack.patch | 43 + .../gcc/gcc/0009-cpp-honor-sysroot.patch | 51 + .../gcc/gcc/0010-MIPS64-Default-to-N64-ABI.patch | 54 + ...C_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch | 243 + ...0012-gcc-Fix-argument-list-too-long-error.patch | 41 + .../gcc/gcc/0013-Disable-sdt.patch | 110 + .../recipes-devtools/gcc/gcc/0014-libtool.patch | 39 + ...4-pass-fix-v4bx-to-linker-to-support-EABI.patch | 40 + ...tilib-config-files-from-B-instead-of-usin.patch | 99 + ...-libdir-from-.la-which-usually-points-to-.patch | 28 + .../recipes-devtools/gcc/gcc/0018-export-CPP.patch | 50 + ...Ensure-target-gcc-headers-can-be-included.patch | 57 + ...h-host-directory-during-relink-if-inst_pr.patch | 35 + ...IBS_DIR-replacement-instead-of-hardcoding.patch | 26 + .../0022-aarch64-Add-support-for-musl-ldso.patch | 25 + ...ibcc1-fix-libcc1-s-install-path-and-rpath.patch | 51 + ...-handle-sysroot-support-for-nativesdk-gcc.patch | 346 + ...et-sysroot-gcc-version-specific-dirs-with.patch | 99 + ...-various-_FOR_BUILD-and-related-variables.patch | 134 + .../0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch | 25 + ...shared-to-link-commandline-for-musl-targe.patch | 84 + ...bgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch | 26 + .../gcc/gcc/0030-sync-gcc-stddef.h-with-musl.patch | 88 + ...ation-fault-in-precompiled-header-generat.patch | 57 + .../gcc/gcc/0032-Fix-for-testsuite-failure.patch | 255 + ...0033-Re-introduce-spe-commandline-options.patch | 38 + ...e-alias-for-__cpu_indicator_init-instead-.patch | 83 + ...nmodes-Do-not-use-__LINE__-for-maintainin.patch | 182 + ...36-mingw32-Enable-operation_not_supported.patch | 26 + ...libatomic-Do-not-enforce-march-on-aarch64.patch | 42 + poky/meta/recipes-devtools/gcc/gcc_10.1.bb | 14 - poky/meta/recipes-devtools/gcc/gcc_10.2.bb | 14 + .../recipes-devtools/gcc/libgcc-initial_10.1.bb | 5 - .../recipes-devtools/gcc/libgcc-initial_10.2.bb | 5 + poky/meta/recipes-devtools/gcc/libgcc_10.1.bb | 5 - poky/meta/recipes-devtools/gcc/libgcc_10.2.bb | 5 + poky/meta/recipes-devtools/gcc/libgfortran_10.1.bb | 3 - poky/meta/recipes-devtools/gcc/libgfortran_10.2.bb | 3 + poky/meta/recipes-devtools/git/git_2.27.0.bb | 10 - poky/meta/recipes-devtools/git/git_2.28.0.bb | 9 + .../recipes-devtools/gnu-config/gnu-config_git.bb | 4 +- poky/meta/recipes-devtools/go/go-1.14.inc | 4 +- .../recipes-devtools/go/go-binary-native_1.14.4.bb | 46 - .../recipes-devtools/go/go-binary-native_1.14.7.bb | 46 + .../json-c/json-c/CVE-2020-12762.patch | 160 - poky/meta/recipes-devtools/json-c/json-c_0.14.bb | 20 - poky/meta/recipes-devtools/json-c/json-c_0.15.bb | 18 + .../libdnf/libdnf/enable_test_data_dir_set.patch | 26 + poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb | 1 + poky/meta/recipes-devtools/llvm/llvm_git.bb | 4 +- poky/meta/recipes-devtools/meson/meson.inc | 2 +- .../0001-Make-CPU-family-warnings-fatal.patch | 19 +- ...efix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch | 48 +- ...hon.py-do-not-substitute-python-s-install.patch | 11 +- ...02-Support-building-allarch-recipes-again.patch | 4 +- .../meson/meson/disable-rpath-handling.patch | 10 +- poky/meta/recipes-devtools/meson/meson_0.54.3.bb | 4 - poky/meta/recipes-devtools/meson/meson_0.55.0.bb | 4 + .../meson/nativesdk-meson_0.54.3.bb | 65 - .../meson/nativesdk-meson_0.55.0.bb | 65 + ...headers.m4-for-development-versions-of-au.patch | 28 - .../nasm/nasm/0001-stdlib-Add-strlcat.patch | 28 +- .../nasm/0002-Add-debug-prefix-map-option.patch | 129 +- .../nasm/nasm/CVE-2018-19755.patch | 116 - .../nasm/nasm/CVE-2019-14248.patch | 43 - poky/meta/recipes-devtools/nasm/nasm_2.14.02.bb | 26 - poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb | 21 + .../perl/files/perl-cross-makefile.patch | 29 + poky/meta/recipes-devtools/perl/perl_5.32.0.bb | 6 + ...ionally-do-not-fetch-code-by-easy_install.patch | 16 +- .../recipes-devtools/python/python-setuptools.inc | 60 - .../python/python3-setuptools_47.3.1.bb | 6 - .../python/python3-setuptools_49.3.1.bb | 65 + ...nfigure.ac-define-a-path-for-profile-data.patch | 42 - .../python/python3/get_module_deps3.py | 20 +- .../python/python3/python3-manifest.json | 68 +- poky/meta/recipes-devtools/python/python3_3.8.3.bb | 362 - poky/meta/recipes-devtools/python/python3_3.8.5.bb | 361 + poky/meta/recipes-devtools/qemu/qemu-native.inc | 4 - .../recipes-devtools/qemu/qemu-native_5.0.0.bb | 9 - .../recipes-devtools/qemu/qemu-native_5.1.0.bb | 9 + .../qemu/qemu-system-native_5.0.0.bb | 26 - .../qemu/qemu-system-native_5.1.0.bb | 26 + poky/meta/recipes-devtools/qemu/qemu.inc | 12 +- .../qemu/qemu/0001-Add-enable-disable-udev.patch | 15 +- ...001-qemu-Add-missing-wacom-HID-descriptor.patch | 17 +- ...01-qemu-Do-not-include-file-if-not-exists.patch | 13 +- ...age-ptest-which-runs-all-unit-test-cases-.patch | 13 +- ...dition-environment-space-to-boot-loader-q.patch | 10 +- .../qemu/qemu/0004-qemu-disable-Valgrind.patch | 10 +- ...-set-ld.bfd-fix-cflags-and-set-some-envir.patch | 13 +- ...ardev-connect-socket-to-a-spawned-command.patch | 56 +- .../qemu/0007-apic-fixup-fallthrough-to-PIC.patch | 10 +- ...-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch | 10 +- .../qemu/qemu/0009-Fix-webkitgtk-builds.patch | 73 +- ...ure-Add-pkg-config-handling-for-libgcrypt.patch | 14 +- ...error-messages-when-qemi_cpu_kick_thread-.patch | 74 - .../qemu/qemu/CVE-2020-10761.patch | 151 - .../qemu/qemu/CVE-2020-13361.patch | 61 - .../qemu/qemu/CVE-2020-13362.patch | 55 - .../qemu/qemu/CVE-2020-13659.patch | 58 - .../qemu/qemu/CVE-2020-13791.patch | 53 - .../qemu/qemu/CVE-2020-13800.patch | 63 - .../recipes-devtools/qemu/qemu/find_datadir.patch | 14 +- poky/meta/recipes-devtools/qemu/qemu_5.0.0.bb | 33 - poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb | 33 + .../rpm/files/environment.d-rpm.sh | 1 + poky/meta/recipes-devtools/rpm/rpm_4.15.1.bb | 5 + .../rsync/files/makefile-no-rebuild.patch | 14 +- poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb | 58 - poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb | 58 + poky/meta/recipes-devtools/strace/strace_5.7.bb | 55 - poky/meta/recipes-devtools/strace/strace_5.8.bb | 55 + ...-gen-don-t-append-dirty-if-we-re-not-in-g.patch | 55 - poky/meta/recipes-devtools/vala/vala_0.48.6.bb | 7 - poky/meta/recipes-devtools/vala/vala_0.48.7.bb | 5 + .../ethtool/ethtool/avoid_parallel_tests.patch | 6 +- poky/meta/recipes-extended/ethtool/ethtool_5.7.bb | 35 - poky/meta/recipes-extended/ethtool/ethtool_5.8.bb | 37 + .../ghostscript/ghostscript/CVE-2020-15900.patch | 54 + .../ghostscript/ghostscript_9.52.bb | 1 + .../logrotate/act-as-mv-when-rotate.patch | 22 +- .../disable-check-different-filesystems.patch | 8 +- .../recipes-extended/logrotate/logrotate_3.16.0.bb | 92 - .../recipes-extended/logrotate/logrotate_3.17.0.bb | 91 + .../0001-Remove-OOM-tests-from-runtest-mm.patch | 34 + poky/meta/recipes-extended/ltp/ltp_20200515.bb | 3 +- ...p-superfluous-global-variable-definitions.patch | 35 + ...p-superfluous-global-variable-definitions.patch | 37 + ...p-superfluous-global-variable-definitions.patch | 42 + .../meta/recipes-extended/minicom/minicom_2.7.1.bb | 3 + ...refix-to-unsigned-long-long-constants-to-.patch | 381 - ...-lib-inet6.c-INET6_rresolve-various-fixes.patch | 87 - .../net-tools/net-tools/Add_missing_headers.patch | 15 + ...g_443075-ifconfig.c-pointtopoint_spelling.patch | 26 + .../Bug_541172-netstat.c-exit-codes.patch | 22 + ...terface-0-del-IP-will-remove-the-aliased-.patch | 32 - .../net-tools/net-tools/musl-fixes.patch | 100 - .../net-tools/net-tools/net-tools-1.60-sctp1.patch | 635 - .../net-tools/net-tools-1.60-sctp2-quiet.patch | 28 - .../net-tools/net-tools-1.60-sctp3-addrs.patch | 363 - .../net-tools-fix-building-with-linux-4.8.patch | 52 - .../net-tools/net-tools_1.60-20181103.bb | 110 + .../net-tools/net-tools_1.60-26.bb | 132 - poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb | 2 +- .../stress-ng/stress-ng_0.11.15.bb | 26 - .../stress-ng/stress-ng_0.11.18.bb | 27 + poky/meta/recipes-extended/sudo/sudo_1.9.1.bb | 47 - poky/meta/recipes-extended/sudo/sudo_1.9.2.bb | 47 + .../0001-meson-Make-sure-gcr-oids.h-is-built.patch | 36 + poky/meta/recipes-gnome/gcr/gcr_3.36.0.bb | 2 + poky/meta/recipes-gnome/gnome/gconf/python3.patch | 60 + poky/meta/recipes-gnome/gnome/gconf_3.2.6.bb | 3 + .../gobject-introspection_1.64.1.bb | 5 + poky/meta/recipes-gnome/libhandy/libhandy_git.bb | 2 +- poky/meta/recipes-gnome/librsvg/librsvg_2.40.20.bb | 50 - poky/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb | 49 + poky/meta/recipes-graphics/cogl/cogl-1.0.inc | 2 +- .../recipes-graphics/harfbuzz/harfbuzz_2.6.8.bb | 44 - .../recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb | 43 + .../0002-meson.build-make-TLS-ELF-optional.patch | 8 +- poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb | 15 - poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb | 15 + poky/meta/recipes-graphics/mesa/mesa.inc | 3 +- poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb | 2 - poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb | 2 + .../ttf-fonts/ttf-bitstream-vera_1.10.bb | 2 +- .../virglrenderer/virglrenderer_0.8.2.bb | 2 +- .../recipes-graphics/wayland/libinput_1.15.6.bb | 50 - .../recipes-graphics/wayland/libinput_1.16.0.bb | 50 + poky/meta/recipes-graphics/wayland/weston_8.0.0.bb | 20 +- ...id-duplicate-definition-of-I810PatternROP.patch | 27 + .../xorg-driver/xf86-video-intel_git.bb | 1 + ...Avoid-duplicate-definitions-of-IOPortBase.patch | 45 + .../xorg-xserver/xserver-xorg_1.20.8.bb | 1 + .../kern-tools/kern-tools-native_git.bb | 2 +- .../0001-kexec-Fix-build-with-fno-common.patch | 68 + .../recipes-kernel/kexec/kexec-tools_2.0.20.bb | 1 + .../linux-libc-headers/linux-libc-headers.inc | 16 +- ...lude-linux-stddef.h-in-swab.h-uapi-header.patch | 10 +- .../linux-libc-headers/linux-libc-headers_5.4.bb | 16 - .../linux-libc-headers/linux-libc-headers_5.8.bb | 18 + poky/meta/recipes-kernel/linux/kernel-devsrc.bb | 8 +- .../recipes-kernel/linux/linux-yocto-rt_5.4.bb | 6 +- .../recipes-kernel/linux/linux-yocto-rt_5.8.bb | 44 + .../recipes-kernel/linux/linux-yocto-tiny_5.4.bb | 8 +- .../recipes-kernel/linux/linux-yocto-tiny_5.8.bb | 32 + poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb | 24 +- poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb | 55 + .../recipes-kernel/lttng/lttng-modules_2.12.1.bb | 42 - .../recipes-kernel/lttng/lttng-modules_2.12.2.bb | 42 + ...st-events-ns-tp.h-Fix-build-with-musl-lib.patch | 43 - .../recipes-kernel/lttng/lttng-tools_2.12.1.bb | 165 - .../recipes-kernel/lttng/lttng-tools_2.12.2.bb | 164 + .../make-mod-scripts/make-mod-scripts_1.0.bb | 2 + poky/meta/recipes-kernel/perf/perf.bb | 4 + .../recipes-kernel/systemtap/systemtap_git.inc | 2 +- .../alsa/alsa-topology-conf_1.2.3.bb | 4 +- .../recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb | 6 +- .../recipes-multimedia/libomxil/libomxil_0.9.3.bb | 2 + .../recipes-multimedia/mpg123/mpg123_1.26.2.bb | 51 - .../recipes-multimedia/mpg123/mpg123_1.26.3.bb | 51 + poky/meta/recipes-multimedia/x264/x264_git.bb | 4 +- poky/meta/recipes-sato/webkit/libwpe_1.6.0.bb | 18 - poky/meta/recipes-sato/webkit/libwpe_1.7.1.bb | 17 + ...-fix-build-errors-due-to-WWc-11-narrowing.patch | 66 + poky/meta/recipes-sato/webkit/webkitgtk_2.28.3.bb | 131 - poky/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb | 132 + .../bash-completion/bash-completion_2.10.bb | 37 - .../bash-completion/bash-completion_2.11.bb | 37 + poky/meta/recipes-support/curl/curl_7.71.1.bb | 83 - poky/meta/recipes-support/curl/curl_7.72.0.bb | 83 + .../recipes-support/diffoscope/diffoscope_151.bb | 17 - .../recipes-support/diffoscope/diffoscope_153.bb | 17 + .../meta/recipes-support/fribidi/fribidi_1.0.10.bb | 2 +- poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb | 6 +- .../0001-tests-do-not-statically-link-a-test.patch | 8 +- .../0002-tests-do-not-run-target-executables.patch | 15 +- poky/meta/recipes-support/libcap/libcap_2.36.bb | 76 - poky/meta/recipes-support/libcap/libcap_2.42.bb | 76 + .../recipes-support/libevdev/libevdev_1.9.0.bb | 16 - .../recipes-support/libevdev/libevdev_1.9.1.bb | 16 + .../meta/recipes-support/libexif/libexif_0.6.22.bb | 2 + poky/meta/recipes-support/libffi/libffi_3.3.bb | 2 +- .../0001-Fix-compilation-with-fno-common.patch | 448 + .../recipes-support/libunwind/libunwind_1.4.0.bb | 1 + .../recipes-support/popt/popt/disable_tests.patch | 21 - .../recipes-support/popt/popt/pkgconfig_fix.patch | 15 - .../popt/popt/popt_fix_for_automake-1.12.patch | 21 - poky/meta/recipes-support/popt/popt_1.16.bb | 22 - poky/meta/recipes-support/popt/popt_1.18.bb | 15 + .../recipes-support/re2c/re2c/CVE-2020-11958.patch | 41 - poky/meta/recipes-support/re2c/re2c_1.3.bb | 16 - poky/meta/recipes-support/re2c/re2c_2.0.bb | 14 + poky/meta/recipes-support/sqlite/sqlite3_3.32.3.bb | 11 - poky/meta/recipes-support/sqlite/sqlite3_3.33.0.bb | 10 + poky/scripts/lib/checklayer/__init__.py | 14 + poky/scripts/lib/wic/plugins/source/bootimg-efi.py | 60 + .../lib/wic/plugins/source/bootimg-partition.py | 2 +- poky/scripts/runqemu | 7 +- poky/scripts/yocto-check-layer | 9 +- 550 files changed, 42564 insertions(+), 42060 deletions(-) create mode 100644 poky/bitbake/lib/bb/tests/color.py delete mode 100644 poky/meta/classes/packagefeed-stability.bbclass create mode 100755 poky/meta/conf/machine/include/arm/arch-armv6m.inc create mode 100644 poky/meta/conf/machine/include/arm/arch-armv8-2a.inc create mode 100755 poky/meta/conf/machine/include/tune-cortex-m0plus.inc delete mode 100644 poky/meta/lib/oe/package_manager.py create mode 100644 poky/meta/lib/oe/package_manager/__init__.py create mode 100644 poky/meta/lib/oe/package_manager/deb/__init__.py create mode 100644 poky/meta/lib/oe/package_manager/deb/manifest.py create mode 100644 poky/meta/lib/oe/package_manager/deb/rootfs.py create mode 100644 poky/meta/lib/oe/package_manager/deb/sdk.py create mode 100644 poky/meta/lib/oe/package_manager/ipk/__init__.py create mode 100644 poky/meta/lib/oe/package_manager/ipk/manifest.py create mode 100644 poky/meta/lib/oe/package_manager/ipk/rootfs.py create mode 100644 poky/meta/lib/oe/package_manager/ipk/sdk.py create mode 100644 poky/meta/lib/oe/package_manager/rpm/__init__.py create mode 100644 poky/meta/lib/oe/package_manager/rpm/manifest.py create mode 100644 poky/meta/lib/oe/package_manager/rpm/rootfs.py create mode 100644 poky/meta/lib/oe/package_manager/rpm/sdk.py create mode 100644 poky/meta/recipes-bsp/libacpi/files/0001-libacpi-Fix-build-witth-fno-commom.patch delete mode 100644 poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb create mode 100644 poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb create mode 100644 poky/meta/recipes-connectivity/inetutils/inetutils/0001-ftpd-telnetd-Fix-multiple-definitions-of-errcatch-an.patch delete mode 100644 poky/meta/recipes-connectivity/iproute2/iproute2_5.7.0.bb create mode 100644 poky/meta/recipes-connectivity/iproute2/iproute2_5.8.0.bb delete mode 100644 poky/meta/recipes-core/glibc/cross-localedef-native_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/cross-localedef-native_2.32.bb delete mode 100644 poky/meta/recipes-core/glibc/glibc-locale_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/glibc-locale_2.32.bb delete mode 100644 poky/meta/recipes-core/glibc/glibc-mtrace_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/glibc-mtrace_2.32.bb delete mode 100644 poky/meta/recipes-core/glibc/glibc-scripts_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/glibc-scripts_2.32.bb delete mode 100644 poky/meta/recipes-core/glibc/glibc-testsuite_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/glibc-testsuite_2.32.bb delete mode 100644 poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-to-build.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0009-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0009-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0010-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0010-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0011-Quote-from-bug-1443-which-explains-what-the-patch-do.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0011-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0012-Quote-from-bug-1443-which-explains-what-the-patch-do.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0012-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0013-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0013-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0014-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0014-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0015-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0015-yes-within-the-path-sets-wrong-config-variables.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0016-Add-unused-attribute.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0016-timezone-re-written-tzselect-as-posix-sh.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0017-Remove-bash-dependency-for-nscd-init-script.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0017-yes-within-the-path-sets-wrong-config-variables.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0018-eglibc-Cross-building-and-testing-instructions.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0018-timezone-re-written-tzselect-as-posix-sh.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0019-Remove-bash-dependency-for-nscd-init-script.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0019-eglibc-Help-bootstrap-cross-toolchain.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0020-eglibc-Cross-building-and-testing-instructions.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0020-eglibc-Resolve-__fpscr_values-on-SH4.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0021-eglibc-Forward-port-cross-locale-generation-support.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0021-eglibc-Help-bootstrap-cross-toolchain.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0022-Define-DUMMY_LOCALE_T-if-not-defined.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0022-eglibc-Resolve-__fpscr_values-on-SH4.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0023-eglibc-Forward-port-cross-locale-generation-support.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0024-Define-DUMMY_LOCALE_T-if-not-defined.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0024-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0025-intl-Emit-no-lines-in-bison-generated-files.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0026-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0026-inject-file-assembly-directives.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0027-intl-Emit-no-lines-in-bison-generated-files.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0027-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0028-inject-file-assembly-directives.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0028-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0029-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch create mode 100644 poky/meta/recipes-core/glibc/glibc/0030-powerpc-Do-not-ask-compiler-for-finding-arch.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc/0030-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch delete mode 100644 poky/meta/recipes-core/glibc/glibc_2.31.bb create mode 100644 poky/meta/recipes-core/glibc/glibc_2.32.bb delete mode 100644 poky/meta/recipes-core/kbd/kbd/0001-Use-DATADIR-and-append-i386-to-fix-libkbdfile-test08.patch delete mode 100644 poky/meta/recipes-core/kbd/kbd/0001-analyze.l-add-missing-string-format.patch delete mode 100644 poky/meta/recipes-core/kbd/kbd/fix_cflags.patch delete mode 100644 poky/meta/recipes-core/kbd/kbd/run-ptest delete mode 100644 poky/meta/recipes-core/kbd/kbd/set-proper-path-of-resources.patch delete mode 100644 poky/meta/recipes-core/kbd/kbd_2.2.0.bb create mode 100644 poky/meta/recipes-core/kbd/kbd_2.3.0.bb delete mode 100644 poky/meta/recipes-core/systemd/systemd-boot_245.6.bb create mode 100644 poky/meta/recipes-core/systemd/systemd-boot_246.1.bb delete mode 100644 poky/meta/recipes-core/systemd/systemd-conf_245.6.bb create mode 100644 poky/meta/recipes-core/systemd/systemd-conf_246.1.bb create mode 100644 poky/meta/recipes-core/systemd/systemd/0001-Use-PREFIX-ROOTPREFIX-correctly.patch create mode 100644 poky/meta/recipes-core/systemd/systemd/0021-Handle-missing-LOCK_EX.patch create mode 100644 poky/meta/recipes-core/systemd/systemd/0022-Fix-incompatible-pointer-type-struct-sockaddr_un.patch create mode 100644 poky/meta/recipes-core/systemd/systemd/0023-Fix-field-efi_loader_entry_one_shot_stat-has-incompl.patch delete mode 100644 poky/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch delete mode 100644 poky/meta/recipes-core/systemd/systemd_245.6.bb create mode 100644 poky/meta/recipes-core/systemd/systemd_246.2.bb delete mode 100644 poky/meta/recipes-core/sysvinit/sysvinit_2.96.bb create mode 100644 poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb delete mode 100644 poky/meta/recipes-devtools/binutils/binutils-2.34.inc create mode 100644 poky/meta/recipes-devtools/binutils/binutils-2.35.inc delete mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.34.bb create mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.35.bb delete mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.34.bb create mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.35.bb delete mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross_2.34.bb create mode 100644 poky/meta/recipes-devtools/binutils/binutils-cross_2.35.bb delete mode 100644 poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.34.bb create mode 100644 poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.35.bb delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0007-Use-libtool-2.4.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0007-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0008-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0008-warn-for-uses-of-system-directories-when-cross-linki.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0009-Change-default-emulation-for-mips64-linux.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0010-Add-support-for-Netlogic-XLP.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0010-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0011-Change-default-emulation-for-mips64-linux.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0011-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0012-Add-support-for-Netlogic-XLP.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0012-Detect-64-bit-MIPS-targets.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0013-Use-libtool-2.4.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0013-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0014-Detect-64-bit-MIPS-targets.patch create mode 100644 poky/meta/recipes-devtools/binutils/binutils/0014-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/0017-binutils-drop-redundant-program_name-definition-fno-.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils/CVE-2020-0551.patch delete mode 100644 poky/meta/recipes-devtools/binutils/binutils_2.34.bb create mode 100644 poky/meta/recipes-devtools/binutils/binutils_2.35.bb delete mode 100644 poky/meta/recipes-devtools/build-compare/build-compare_git.bb delete mode 100644 poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb create mode 100644 poky/meta/recipes-devtools/ccache/ccache_3.7.11.bb create mode 100644 poky/meta/recipes-devtools/distcc/files/0001-Fix-build-with-gcc-10-which-defaults-to-fno-common-c.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1.inc delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0002-gcc-poison-system-directories.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0004-64-bit-multilib-hack.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0005-optional-libstdc.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0006-COLLECT_GCC_OPTIONS.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0008-fortran-cross-compile-hack.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0009-cpp-honor-sysroot.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0010-MIPS64-Default-to-N64-ABI.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0012-gcc-Fix-argument-list-too-long-error.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0013-Disable-sdt.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0014-libtool.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0018-export-CPP.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0019-Ensure-target-gcc-headers-can-be-included.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0022-aarch64-Add-support-for-musl-ldso.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0024-handle-sysroot-support-for-nativesdk-gcc.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0026-Fix-various-_FOR_BUILD-and-related-variables.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0030-sync-gcc-stddef.h-with-musl.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0031-fix-segmentation-fault-in-precompiled-header-generat.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0032-Fix-for-testsuite-failure.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0033-Re-introduce-spe-commandline-options.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0036-Enable-CET-in-cross-compiler-if-possible.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0037-mingw32-Enable-operation_not_supported.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/0038-libatomic-Do-not-enforce-march-on-aarch64.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.1/pr96130.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc-10.2.inc delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-cross_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-cross_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-runtime_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-runtime_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/gcc-source_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc-source_10.2.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-Fix-up-__aarch64_cas16_acq_rel-fallback.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0002-gcc-poison-system-directories.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0004-64-bit-multilib-hack.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0005-optional-libstdc.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0006-COLLECT_GCC_OPTIONS.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0008-fortran-cross-compile-hack.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0009-cpp-honor-sysroot.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0010-MIPS64-Default-to-N64-ABI.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0012-gcc-Fix-argument-list-too-long-error.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0013-Disable-sdt.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0014-libtool.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0018-export-CPP.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0019-Ensure-target-gcc-headers-can-be-included.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0022-aarch64-Add-support-for-musl-ldso.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0024-handle-sysroot-support-for-nativesdk-gcc.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0026-Fix-various-_FOR_BUILD-and-related-variables.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0030-sync-gcc-stddef.h-with-musl.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0031-fix-segmentation-fault-in-precompiled-header-generat.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0032-Fix-for-testsuite-failure.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0033-Re-introduce-spe-commandline-options.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0036-mingw32-Enable-operation_not_supported.patch create mode 100644 poky/meta/recipes-devtools/gcc/gcc/0037-libatomic-Do-not-enforce-march-on-aarch64.patch delete mode 100644 poky/meta/recipes-devtools/gcc/gcc_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/gcc_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/libgcc-initial_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/libgcc-initial_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/libgcc_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/libgcc_10.2.bb delete mode 100644 poky/meta/recipes-devtools/gcc/libgfortran_10.1.bb create mode 100644 poky/meta/recipes-devtools/gcc/libgfortran_10.2.bb delete mode 100644 poky/meta/recipes-devtools/git/git_2.27.0.bb create mode 100644 poky/meta/recipes-devtools/git/git_2.28.0.bb delete mode 100644 poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb create mode 100644 poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb delete mode 100644 poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch delete mode 100644 poky/meta/recipes-devtools/json-c/json-c_0.14.bb create mode 100644 poky/meta/recipes-devtools/json-c/json-c_0.15.bb create mode 100644 poky/meta/recipes-devtools/libdnf/libdnf/enable_test_data_dir_set.patch delete mode 100644 poky/meta/recipes-devtools/meson/meson_0.54.3.bb create mode 100644 poky/meta/recipes-devtools/meson/meson_0.55.0.bb delete mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.3.bb create mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.0.bb delete mode 100644 poky/meta/recipes-devtools/nasm/nasm/0001-fix-pa_add_headers.m4-for-development-versions-of-au.patch delete mode 100644 poky/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch delete mode 100644 poky/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch delete mode 100644 poky/meta/recipes-devtools/nasm/nasm_2.14.02.bb create mode 100644 poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb create mode 100644 poky/meta/recipes-devtools/perl/files/perl-cross-makefile.patch delete mode 100644 poky/meta/recipes-devtools/python/python-setuptools.inc delete mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb create mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb delete mode 100644 poky/meta/recipes-devtools/python/python3/0001-configure.ac-define-a-path-for-profile-data.patch delete mode 100644 poky/meta/recipes-devtools/python/python3_3.8.3.bb create mode 100644 poky/meta/recipes-devtools/python/python3_3.8.5.bb delete mode 100644 poky/meta/recipes-devtools/qemu/qemu-native_5.0.0.bb create mode 100644 poky/meta/recipes-devtools/qemu/qemu-native_5.1.0.bb delete mode 100644 poky/meta/recipes-devtools/qemu/qemu-system-native_5.0.0.bb create mode 100644 poky/meta/recipes-devtools/qemu/qemu-system-native_5.1.0.bb delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/0013-cpus.c-Add-error-messages-when-qemi_cpu_kick_thread-.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13361.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13362.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13659.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13800.patch delete mode 100644 poky/meta/recipes-devtools/qemu/qemu_5.0.0.bb create mode 100644 poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb create mode 100644 poky/meta/recipes-devtools/rpm/files/environment.d-rpm.sh delete mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb create mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb delete mode 100644 poky/meta/recipes-devtools/strace/strace_5.7.bb create mode 100644 poky/meta/recipes-devtools/strace/strace_5.8.bb delete mode 100644 poky/meta/recipes-devtools/vala/vala/0001-git-version-gen-don-t-append-dirty-if-we-re-not-in-g.patch delete mode 100644 poky/meta/recipes-devtools/vala/vala_0.48.6.bb create mode 100644 poky/meta/recipes-devtools/vala/vala_0.48.7.bb delete mode 100644 poky/meta/recipes-extended/ethtool/ethtool_5.7.bb create mode 100644 poky/meta/recipes-extended/ethtool/ethtool_5.8.bb create mode 100644 poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-15900.patch delete mode 100644 poky/meta/recipes-extended/logrotate/logrotate_3.16.0.bb create mode 100644 poky/meta/recipes-extended/logrotate/logrotate_3.17.0.bb create mode 100644 poky/meta/recipes-extended/ltp/ltp/0001-Remove-OOM-tests-from-runtest-mm.patch create mode 100644 poky/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch create mode 100644 poky/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch create mode 100644 poky/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/0001-added-ull-prefix-to-unsigned-long-long-constants-to-.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/0001-lib-inet6.c-INET6_rresolve-various-fixes.patch create mode 100644 poky/meta/recipes-extended/net-tools/net-tools/Add_missing_headers.patch create mode 100644 poky/meta/recipes-extended/net-tools/net-tools/Bug_443075-ifconfig.c-pointtopoint_spelling.patch create mode 100644 poky/meta/recipes-extended/net-tools/net-tools/Bug_541172-netstat.c-exit-codes.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/ifconfig-interface-0-del-IP-will-remove-the-aliased-.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/musl-fixes.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp1.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp2-quiet.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp3-addrs.patch delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools/net-tools-fix-building-with-linux-4.8.patch create mode 100644 poky/meta/recipes-extended/net-tools/net-tools_1.60-20181103.bb delete mode 100644 poky/meta/recipes-extended/net-tools/net-tools_1.60-26.bb delete mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.15.bb create mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb delete mode 100644 poky/meta/recipes-extended/sudo/sudo_1.9.1.bb create mode 100644 poky/meta/recipes-extended/sudo/sudo_1.9.2.bb create mode 100644 poky/meta/recipes-gnome/gcr/gcr/0001-meson-Make-sure-gcr-oids.h-is-built.patch create mode 100644 poky/meta/recipes-gnome/gnome/gconf/python3.patch delete mode 100644 poky/meta/recipes-gnome/librsvg/librsvg_2.40.20.bb create mode 100644 poky/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb delete mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.8.bb create mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb delete mode 100644 poky/meta/recipes-graphics/wayland/libinput_1.15.6.bb create mode 100644 poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb create mode 100644 poky/meta/recipes-graphics/xorg-driver/xf86-video-intel/0001-i810-Avoid-duplicate-definition-of-I810PatternROP.patch create mode 100644 poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Avoid-duplicate-definitions-of-IOPortBase.patch create mode 100644 poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch delete mode 100644 poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.4.bb create mode 100644 poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.8.bb create mode 100644 poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb create mode 100644 poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb create mode 100644 poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb delete mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb delete mode 100644 poky/meta/recipes-kernel/lttng/lttng-tools/0001-tests-gen-ust-events-ns-tp.h-Fix-build-with-musl-lib.patch delete mode 100644 poky/meta/recipes-kernel/lttng/lttng-tools_2.12.1.bb create mode 100644 poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb delete mode 100644 poky/meta/recipes-multimedia/mpg123/mpg123_1.26.2.bb create mode 100644 poky/meta/recipes-multimedia/mpg123/mpg123_1.26.3.bb delete mode 100644 poky/meta/recipes-sato/webkit/libwpe_1.6.0.bb create mode 100644 poky/meta/recipes-sato/webkit/libwpe_1.7.1.bb create mode 100644 poky/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch delete mode 100644 poky/meta/recipes-sato/webkit/webkitgtk_2.28.3.bb create mode 100644 poky/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb delete mode 100644 poky/meta/recipes-support/bash-completion/bash-completion_2.10.bb create mode 100644 poky/meta/recipes-support/bash-completion/bash-completion_2.11.bb delete mode 100644 poky/meta/recipes-support/curl/curl_7.71.1.bb create mode 100644 poky/meta/recipes-support/curl/curl_7.72.0.bb delete mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_151.bb create mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_153.bb delete mode 100644 poky/meta/recipes-support/libcap/libcap_2.36.bb create mode 100644 poky/meta/recipes-support/libcap/libcap_2.42.bb delete mode 100644 poky/meta/recipes-support/libevdev/libevdev_1.9.0.bb create mode 100644 poky/meta/recipes-support/libevdev/libevdev_1.9.1.bb create mode 100644 poky/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch delete mode 100644 poky/meta/recipes-support/popt/popt/disable_tests.patch delete mode 100644 poky/meta/recipes-support/popt/popt/pkgconfig_fix.patch delete mode 100644 poky/meta/recipes-support/popt/popt/popt_fix_for_automake-1.12.patch delete mode 100644 poky/meta/recipes-support/popt/popt_1.16.bb create mode 100644 poky/meta/recipes-support/popt/popt_1.18.bb delete mode 100644 poky/meta/recipes-support/re2c/re2c/CVE-2020-11958.patch delete mode 100644 poky/meta/recipes-support/re2c/re2c_1.3.bb create mode 100644 poky/meta/recipes-support/re2c/re2c_2.0.bb delete mode 100644 poky/meta/recipes-support/sqlite/sqlite3_3.32.3.bb create mode 100644 poky/meta/recipes-support/sqlite/sqlite3_3.33.0.bb (limited to 'poky/meta/recipes-devtools/meson/meson.inc') diff --git a/poky/bitbake/bin/bitbake-selftest b/poky/bitbake/bin/bitbake-selftest index 041a2719f..e84d6a559 100755 --- a/poky/bitbake/bin/bitbake-selftest +++ b/poky/bitbake/bin/bitbake-selftest @@ -18,6 +18,7 @@ except RuntimeError as exc: sys.exit(str(exc)) tests = ["bb.tests.codeparser", + "bb.tests.color", "bb.tests.cooker", "bb.tests.cow", "bb.tests.data", diff --git a/poky/bitbake/lib/bb/build.py b/poky/bitbake/lib/bb/build.py index 23b6ee455..94f9cb371 100644 --- a/poky/bitbake/lib/bb/build.py +++ b/poky/bitbake/lib/bb/build.py @@ -16,7 +16,9 @@ import os import sys import logging import glob +import itertools import time +import re import stat import bb import bb.msg @@ -303,20 +305,60 @@ def exec_func_python(func, d, runfile, cwd=None): def shell_trap_code(): return '''#!/bin/sh\n +__BITBAKE_LAST_LINE=0 + # Emit a useful diagnostic if something fails: -bb_exit_handler() { +bb_sh_exit_handler() { + ret=$? + if [ "$ret" != 0 ]; then + echo "WARNING: exit code $ret from a shell command." + fi + exit $ret +} + +bb_bash_exit_handler() { ret=$? - case $ret in - 0) ;; - *) case $BASH_VERSION in - "") echo "WARNING: exit code $ret from a shell command.";; - *) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from '$BASH_COMMAND'";; - esac - exit $ret - esac + { set +x; } > /dev/null + trap "" DEBUG + if [ "$ret" != 0 ]; then + echo "WARNING: ${BASH_SOURCE[0]}:${__BITBAKE_LAST_LINE} exit $ret from '$1'" + + echo "WARNING: Backtrace (BB generated script): " + for i in $(seq 1 $((${#FUNCNAME[@]} - 1))); do + if [ "$i" -eq 1 ]; then + echo -e "\t#$((i)): ${FUNCNAME[$i]}, ${BASH_SOURCE[$((i-1))]}, line ${__BITBAKE_LAST_LINE}" + else + echo -e "\t#$((i)): ${FUNCNAME[$i]}, ${BASH_SOURCE[$((i-1))]}, line ${BASH_LINENO[$((i-1))]}" + fi + done + fi + exit $ret +} + +bb_bash_debug_handler() { + local line=${BASH_LINENO[0]} + # For some reason the DEBUG trap trips with lineno=1 when scripts exit; ignore it + if [ "$line" -eq 1 ]; then + return + fi + + # Track the line number of commands as they execute. This is so we can have access to the failing line number + # in the EXIT trap. See http://gnu-bash.2382.n7.nabble.com/trap-echo-quot-trap-exit-on-LINENO-quot-EXIT-gt-wrong-linenumber-td3666.html + if [ "${FUNCNAME[1]}" != "bb_bash_exit_handler" ]; then + __BITBAKE_LAST_LINE=$line + fi } -trap 'bb_exit_handler' 0 -set -e + +case $BASH_VERSION in +"") trap 'bb_sh_exit_handler' 0 + set -e + ;; +*) trap 'bb_bash_exit_handler "$BASH_COMMAND"' 0 + trap '{ bb_bash_debug_handler; } 2>/dev/null' DEBUG + set -e + shopt -s extdebug + ;; +esac ''' def create_progress_handler(func, progress, logfile, d): @@ -346,7 +388,7 @@ def create_progress_handler(func, progress, logfile, d): cls_obj = functools.reduce(resolve, cls.split("."), bb.utils._context) if not cls_obj: # Fall-back on __builtins__ - cls_obj = functools.reduce(lambda x, y: x.get(y), cls.split("."), __builtins__) + cls_obj = functools.reduce(resolve, cls.split("."), __builtins__) if cls_obj: return cls_obj(d, outfile=logfile, otherargs=otherargs) bb.warn('%s: unknown custom progress handler in task progress varflag value "%s", ignoring' % (func, cls)) @@ -398,7 +440,13 @@ exit $ret progress = d.getVarFlag(func, 'progress') if progress: - logfile = create_progress_handler(func, progress, logfile, d) + try: + logfile = create_progress_handler(func, progress, logfile, d) + except: + from traceback import format_exc + logger.error("Failed to create progress handler") + logger.error(format_exc()) + raise fifobuffer = bytearray() def readfifo(data): @@ -450,6 +498,62 @@ exit $ret bb.debug(2, "Executing shell function %s" % func) with open(os.devnull, 'r+') as stdin, logfile: bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)]) + except bb.process.ExecutionError as exe: + # Find the backtrace that the shell trap generated + backtrace_marker_regex = re.compile(r"WARNING: Backtrace \(BB generated script\)") + stdout_lines = (exe.stdout or "").split("\n") + backtrace_start_line = None + for i, line in enumerate(reversed(stdout_lines)): + if backtrace_marker_regex.search(line): + backtrace_start_line = len(stdout_lines) - i + break + + # Read the backtrace frames, starting at the location we just found + backtrace_entry_regex = re.compile(r"#(?P\d+): (?P[^\s]+), (?P.+?), line (" + r"?P\d+)") + backtrace_frames = [] + if backtrace_start_line: + for line in itertools.islice(stdout_lines, backtrace_start_line, None): + match = backtrace_entry_regex.search(line) + if match: + backtrace_frames.append(match.groupdict()) + + with open(runfile, "r") as script: + script_lines = [line.rstrip() for line in script.readlines()] + + # For each backtrace frame, search backwards in the script (from the line number called out by the frame), + # to find the comment that emit_vars injected when it wrote the script. This will give us the metadata + # filename (e.g. .bb or .bbclass) and line number where the shell function was originally defined. + script_metadata_comment_regex = re.compile(r"# line: (?P\d+), file: (?P.+)") + better_frames = [] + # Skip the very last frame since it's just the call to the shell task in the body of the script + for frame in backtrace_frames[:-1]: + # Check whether the frame corresponds to a function defined in the script vs external script. + if os.path.samefile(frame["file"], runfile): + # Search backwards from the frame lineno to locate the comment that BB injected + i = int(frame["lineno"]) - 1 + while i >= 0: + match = script_metadata_comment_regex.match(script_lines[i]) + if match: + # Calculate the relative line in the function itself + relative_line_in_function = int(frame["lineno"]) - i - 2 + # Calculate line in the function as declared in the metadata + metadata_function_line = relative_line_in_function + int(match["lineno"]) + better_frames.append("#{frameno}: {funcname}, {file}, line {lineno}".format( + frameno=frame["frameno"], + funcname=frame["funcname"], + file=match["file"], + lineno=metadata_function_line + )) + break + i -= 1 + else: + better_frames.append("#{frameno}: {funcname}, {file}, line {lineno}".format(**frame)) + + if better_frames: + better_frames = ("\t{0}".format(frame) for frame in better_frames) + exe.extra_message = "\nBacktrace (metadata-relative locations):\n{0}".format("\n".join(better_frames)) + raise finally: os.unlink(fifopath) diff --git a/poky/bitbake/lib/bb/command.py b/poky/bitbake/lib/bb/command.py index 805ed9216..4d152ff4c 100644 --- a/poky/bitbake/lib/bb/command.py +++ b/poky/bitbake/lib/bb/command.py @@ -84,7 +84,7 @@ class Command: if command not in CommandsAsync.__dict__: return None, "No such command" self.currentAsyncCommand = (command, commandline) - self.cooker.configuration.server_register_idlecallback(self.cooker.runCommands, self.cooker) + self.cooker.idleCallBackRegister(self.cooker.runCommands, self.cooker) return True, None def runAsyncCommand(self): @@ -723,10 +723,10 @@ class CommandsAsync: """ Find signature info files via the signature generator """ - pn = params[0] + (mc, pn) = bb.runqueue.split_mc(params[0]) taskname = params[1] sigs = params[2] - res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data) - bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data) + res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.databuilder.mcdata[mc]) + bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.databuilder.mcdata[mc]) command.finishAsyncCommand() findSigInfo.needcache = False diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py index f6abc6348..912360546 100644 --- a/poky/bitbake/lib/bb/cooker.py +++ b/poky/bitbake/lib/bb/cooker.py @@ -148,7 +148,7 @@ class BBCooker: Manages one bitbake build run """ - def __init__(self, configuration, featureSet=None): + def __init__(self, configuration, featureSet=None, idleCallBackRegister=None): self.recipecaches = None self.skiplist = {} self.featureset = CookerFeatures() @@ -158,6 +158,8 @@ class BBCooker: self.configuration = configuration + self.idleCallBackRegister = idleCallBackRegister + bb.debug(1, "BBCooker starting %s" % time.time()) sys.stdout.flush() @@ -210,7 +212,7 @@ class BBCooker: cooker.process_inotify_updates() return 1.0 - self.configuration.server_register_idlecallback(_process_inotify_updates, self) + self.idleCallBackRegister(_process_inotify_updates, self) # TOSTOP must not be set or our children will hang when they output try: @@ -1423,7 +1425,7 @@ class BBCooker: return True return retval - self.configuration.server_register_idlecallback(buildFileIdle, rq) + self.idleCallBackRegister(buildFileIdle, rq) def buildTargets(self, targets, task): """ @@ -1494,7 +1496,7 @@ class BBCooker: if 'universe' in targets: rq.rqdata.warn_multi_bb = True - self.configuration.server_register_idlecallback(buildTargetsIdle, rq) + self.idleCallBackRegister(buildTargetsIdle, rq) def getAllKeysWithFlags(self, flaglist): diff --git a/poky/bitbake/lib/bb/cookerdata.py b/poky/bitbake/lib/bb/cookerdata.py index 24bf09c56..b86e7d446 100644 --- a/poky/bitbake/lib/bb/cookerdata.py +++ b/poky/bitbake/lib/bb/cookerdata.py @@ -143,16 +143,10 @@ class CookerConfiguration(object): setattr(self, key, parameters.options.__dict__[key]) self.env = parameters.environment.copy() - def setServerRegIdleCallback(self, srcb): - self.server_register_idlecallback = srcb - def __getstate__(self): state = {} for key in self.__dict__.keys(): - if key == "server_register_idlecallback": - state[key] = None - else: - state[key] = getattr(self, key) + state[key] = getattr(self, key) return state def __setstate__(self,state): diff --git a/poky/bitbake/lib/bb/data.py b/poky/bitbake/lib/bb/data.py index b0683c518..97022853c 100644 --- a/poky/bitbake/lib/bb/data.py +++ b/poky/bitbake/lib/bb/data.py @@ -161,6 +161,12 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False): return True if func: + # Write a comment indicating where the shell function came from (line number and filename) to make it easier + # for the user to diagnose task failures. This comment is also used by build.py to determine the metadata + # location of shell functions. + o.write("# line: {0}, file: {1}\n".format( + d.getVarFlag(var, "lineno", False), + d.getVarFlag(var, "filename", False))) # NOTE: should probably check for unbalanced {} within the var val = val.rstrip('\n') o.write("%s() {\n%s\n}\n" % (varExpanded, val)) diff --git a/poky/bitbake/lib/bb/process.py b/poky/bitbake/lib/bb/process.py index 2dc472a86..f36c929d2 100644 --- a/poky/bitbake/lib/bb/process.py +++ b/poky/bitbake/lib/bb/process.py @@ -41,6 +41,7 @@ class ExecutionError(CmdError): self.exitcode = exitcode self.stdout = stdout self.stderr = stderr + self.extra_message = None def __str__(self): message = "" @@ -51,7 +52,7 @@ class ExecutionError(CmdError): if message: message = ":\n" + message return (CmdError.__str__(self) + - " with exit code %s" % self.exitcode + message) + " with exit code %s" % self.exitcode + message + (self.extra_message or "")) class Popen(subprocess.Popen): defaults = { diff --git a/poky/bitbake/lib/bb/progress.py b/poky/bitbake/lib/bb/progress.py index 9c755b7f7..d051ba019 100644 --- a/poky/bitbake/lib/bb/progress.py +++ b/poky/bitbake/lib/bb/progress.py @@ -14,7 +14,27 @@ import bb.event import bb.build from bb.build import StdoutNoopContextManager -class ProgressHandler(object): + +# from https://stackoverflow.com/a/14693789/221061 +ANSI_ESCAPE_REGEX = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]') + + +def filter_color(string): + """ + Filter ANSI escape codes out of |string|, return new string + """ + return ANSI_ESCAPE_REGEX.sub('', string) + + +def filter_color_n(string): + """ + Filter ANSI escape codes out of |string|, returns tuple of + (new string, # of ANSI codes removed) + """ + return ANSI_ESCAPE_REGEX.subn('', string) + + +class ProgressHandler: """ Base class that can pretend to be a file object well enough to be used to build objects to intercept console output and determine the @@ -55,6 +75,7 @@ class ProgressHandler(object): self._lastevent = ts self._progress = progress + class LineFilterProgressHandler(ProgressHandler): """ A ProgressHandler variant that provides the ability to filter out @@ -66,7 +87,7 @@ class LineFilterProgressHandler(ProgressHandler): """ def __init__(self, d, outfile=None): self._linebuffer = '' - super(LineFilterProgressHandler, self).__init__(d, outfile) + super().__init__(d, outfile) def write(self, string): self._linebuffer += string @@ -80,41 +101,44 @@ class LineFilterProgressHandler(ProgressHandler): lbreakpos = line.rfind('\r') + 1 if lbreakpos: line = line[lbreakpos:] - if self.writeline(line): - super(LineFilterProgressHandler, self).write(line) + if self.writeline(filter_color(line)): + super().write(line) def writeline(self, line): return True + class BasicProgressHandler(ProgressHandler): def __init__(self, d, regex=r'(\d+)%', outfile=None): - super(BasicProgressHandler, self).__init__(d, outfile) + super().__init__(d, outfile) self._regex = re.compile(regex) # Send an initial progress event so the bar gets shown self._fire_progress(0) def write(self, string): - percs = self._regex.findall(string) + percs = self._regex.findall(filter_color(string)) if percs: progress = int(percs[-1]) self.update(progress) - super(BasicProgressHandler, self).write(string) + super().write(string) + class OutOfProgressHandler(ProgressHandler): def __init__(self, d, regex, outfile=None): - super(OutOfProgressHandler, self).__init__(d, outfile) + super().__init__(d, outfile) self._regex = re.compile(regex) # Send an initial progress event so the bar gets shown self._fire_progress(0) def write(self, string): - nums = self._regex.findall(string) + nums = self._regex.findall(filter_color(string)) if nums: progress = (float(nums[-1][0]) / float(nums[-1][1])) * 100 self.update(progress) - super(OutOfProgressHandler, self).write(string) + super().write(string) + -class MultiStageProgressReporter(object): +class MultiStageProgressReporter: """ Class which allows reporting progress without the caller having to know where they are in the overall sequence. Useful @@ -199,6 +223,7 @@ class MultiStageProgressReporter(object): value is considered to be out of stage_total, otherwise it should be a percentage value from 0 to 100. """ + progress = None if self._stage_total: stage_progress = (float(stage_progress) / self._stage_total) * 100 if self._stage < 0: @@ -207,9 +232,10 @@ class MultiStageProgressReporter(object): progress = self._base_progress + (stage_progress * self._stage_weights[self._stage]) else: progress = self._base_progress - if progress > 100: - progress = 100 - self._fire_progress(progress) + if progress: + if progress > 100: + progress = 100 + self._fire_progress(progress) def finish(self): if self._finished: @@ -230,6 +256,7 @@ class MultiStageProgressReporter(object): out.append('Up to finish: %d' % stage_weight) bb.warn('Stage times:\n %s' % '\n '.join(out)) + class MultiStageProcessProgressReporter(MultiStageProgressReporter): """ Version of MultiStageProgressReporter intended for use with @@ -238,7 +265,7 @@ class MultiStageProcessProgressReporter(MultiStageProgressReporter): def __init__(self, d, processname, stage_weights, debug=False): self._processname = processname self._started = False - MultiStageProgressReporter.__init__(self, d, stage_weights, debug) + super().__init__(d, stage_weights, debug) def start(self): if not self._started: @@ -255,13 +282,14 @@ class MultiStageProcessProgressReporter(MultiStageProgressReporter): MultiStageProgressReporter.finish(self) bb.event.fire(bb.event.ProcessFinished(self._processname), self._data) + class DummyMultiStageProcessProgressReporter(MultiStageProgressReporter): """ MultiStageProcessProgressReporter that takes the calls and does nothing with them (to avoid a bunch of "if progress_reporter:" checks) """ def __init__(self): - MultiStageProcessProgressReporter.__init__(self, "", None, []) + super().__init__(None, []) def _fire_progress(self, taskprogress, rate=None): pass diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py index 9ec79f5b6..65e1eab52 100644 --- a/poky/bitbake/lib/bb/server/process.py +++ b/poky/bitbake/lib/bb/server/process.py @@ -34,12 +34,11 @@ logger = logging.getLogger('BitBake') class ProcessTimeout(SystemExit): pass -class ProcessServer(multiprocessing.Process): +class ProcessServer(): profile_filename = "profile.log" profile_processed_filename = "profile.log.processed" - def __init__(self, lock, sock, sockname): - multiprocessing.Process.__init__(self) + def __init__(self, lock, sock, sockname, server_timeout, xmlrpcinterface): self.command_channel = False self.command_channel_reply = False self.quit = False @@ -47,6 +46,7 @@ class ProcessServer(multiprocessing.Process): self.next_heartbeat = time.time() self.event_handle = None + self.hadanyui = False self.haveui = False self.maxuiwait = 30 self.xmlrpc = False @@ -57,6 +57,9 @@ class ProcessServer(multiprocessing.Process): self.sock = sock self.sockname = sockname + self.server_timeout = server_timeout + self.xmlrpcinterface = xmlrpcinterface + def register_idle_function(self, function, data): """Register a function to be called while the server is idle""" assert hasattr(function, '__call__') @@ -188,6 +191,7 @@ class ProcessServer(multiprocessing.Process): self.command_channel_reply = writer self.haveui = True + self.hadanyui = True except (EOFError, OSError): disconnect_client(self, fds) @@ -200,7 +204,7 @@ class ProcessServer(multiprocessing.Process): # If we don't see a UI connection within maxuiwait, its unlikely we're going to see # one. We have had issue with processes hanging indefinitely so timing out UI-less # servers is useful. - if not self.haveui and not self.timeout and (self.lastui + self.maxuiwait) < time.time(): + if not self.hadanyui and not self.xmlrpc and not self.timeout and (self.lastui + self.maxuiwait) < time.time(): print("No UI connection within max timeout, exiting to avoid infinite loop.") self.quit = True @@ -243,6 +247,10 @@ class ProcessServer(multiprocessing.Process): self.cooker.post_serve() + # Flush logs before we release the lock + sys.stdout.flush() + sys.stderr.flush() + # Finally release the lockfile but warn about other processes holding it open lock = self.bitbake_lock lockfile = lock.name @@ -465,23 +473,25 @@ class BitBakeServer(object): print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format))) sys.stdout.flush() - server = ProcessServer(self.bitbake_lock, self.sock, self.sockname) - self.configuration.setServerRegIdleCallback(server.register_idle_function) - os.close(self.readypipe) - writer = ConnectionWriter(self.readypipein) try: - self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset) - except bb.BBHandledException: - return None - writer.send("r") - writer.close() - server.cooker = self.cooker - server.server_timeout = self.configuration.server_timeout - server.xmlrpcinterface = self.configuration.xmlrpcinterface - print("Started bitbake server pid %d" % os.getpid()) - sys.stdout.flush() - - server.start() + server = ProcessServer(self.bitbake_lock, self.sock, self.sockname, self.configuration.server_timeout, self.configuration.xmlrpcinterface) + os.close(self.readypipe) + writer = ConnectionWriter(self.readypipein) + try: + self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset, server.register_idle_function) + except bb.BBHandledException: + return None + writer.send("r") + writer.close() + server.cooker = self.cooker + print("Started bitbake server pid %d" % os.getpid()) + sys.stdout.flush() + + server.run() + finally: + # Flush any ,essages/errors to the logfile before exit + sys.stdout.flush() + sys.stderr.flush() def connectProcessServer(sockname, featureset): # Connect to socket diff --git a/poky/bitbake/lib/bb/tests/color.py b/poky/bitbake/lib/bb/tests/color.py new file mode 100644 index 000000000..bf03750c6 --- /dev/null +++ b/poky/bitbake/lib/bb/tests/color.py @@ -0,0 +1,95 @@ +# +# BitBake Test for ANSI color code filtering +# +# Copyright (C) 2020 Agilent Technologies, Inc. +# Author: Chris Laplante +# +# SPDX-License-Identifier: MIT +# + +import unittest +import bb.progress +import bb.data +import bb.event +from bb.progress import filter_color, filter_color_n +import io +import re + + +class ProgressWatcher: + def __init__(self): + self._reports = [] + + def handle_event(self, event): + self._reports.append((event.progress, event.rate)) + + def reports(self): + return self._reports + + +class ColorCodeTests(unittest.TestCase): + def setUp(self): + self.d = bb.data.init() + self._progress_watcher = ProgressWatcher() + bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event) + + def tearDown(self): + bb.event.remove("bb.build.TaskProgress", None) + + def test_filter_color(self): + input_string = "~~~~~~~~~~~~^~~~~~~~" + filtered = filter_color(input_string) + self.assertEqual(filtered, "~~~~~~~~~~~~^~~~~~~~") + + def test_filter_color_n(self): + input_string = "~~~~~~~~~~~~^~~~~~~~" + filtered, code_count = filter_color_n(input_string) + self.assertEqual(filtered, "~~~~~~~~~~~~^~~~~~~~") + self.assertEqual(code_count, 4) + + def test_LineFilterProgressHandler_color_filtering(self): + class CustomProgressHandler(bb.progress.LineFilterProgressHandler): + PROGRESS_REGEX = re.compile(r"Progress: (?P\d+)%") + + def writeline(self, line): + match = self.PROGRESS_REGEX.match(line) + if match: + self.update(int(match.group("progress"))) + return False + return True + + buffer = io.StringIO() + handler = CustomProgressHandler(self.d, buffer) + handler.write("Program output!\n") + handler.write("More output!\n") + handler.write("Progress: 10%\n") # 10% + handler.write("Even more\n") + handler.write("Progress: 50%\n") # 50% + handler.write("Progress: 60%\n") # 60% + handler.write("Progress: 100%\n") # 100% + + expected = [(10, None), (50, None), (60, None), (100, None)] + self.assertEqual(self._progress_watcher.reports(), expected) + + self.assertEqual(buffer.getvalue(), "Program output!\nMore output!\nEven more\n") + + def test_BasicProgressHandler_color_filtering(self): + buffer = io.StringIO() + handler = bb.progress.BasicProgressHandler(self.d, outfile=buffer) + handler.write("1%\n") # 1% + handler.write("2%\n") # 2% + handler.write("10%\n") # 10% + handler.write("100%\n") # 100% + + expected = [(0, None), (1, None), (2, None), (10, None), (100, None)] + self.assertListEqual(self._progress_watcher.reports(), expected) + + def test_OutOfProgressHandler_color_filtering(self): + buffer = io.StringIO() + handler = bb.progress.OutOfProgressHandler(self.d, r'(\d+) of (\d+)', outfile=buffer) + handler.write("Text text 1 of 5") # 1/5 + handler.write("Text text 3 of 5") # 3/5 + handler.write("Text text 5 of 5") # 5/5 + + expected = [(0, None), (20.0, None), (60.0, None), (100.0, None)] + self.assertListEqual(self._progress_watcher.reports(), expected) diff --git a/poky/documentation/bsp-guide/bsp.xml b/poky/documentation/bsp-guide/bsp.xml index 72a077e80..f5c3f31fa 100644 --- a/poky/documentation/bsp-guide/bsp.xml +++ b/poky/documentation/bsp-guide/bsp.xml @@ -2158,7 +2158,7 @@ IMAGE_BOOT_FILES: Files installed into the device's boot partition when preparing the image using the Wic tool - with the bootimg-partition + with the bootimg-partition or bootimg-efi source plugin. diff --git a/poky/documentation/ref-manual/ref-classes.xml b/poky/documentation/ref-manual/ref-classes.xml index ab12373ae..1dcd5fdd0 100644 --- a/poky/documentation/ref-manual/ref-classes.xml +++ b/poky/documentation/ref-manual/ref-classes.xml @@ -1880,8 +1880,82 @@ This check was removed for YP 2.3 release The kernel-fitimage class provides support to - pack zImages. + pack a kernel Image, device trees and a RAM disk into a single + FIT image. In theory, a FIT image can support any number of kernels, + RAM disks and device-trees. + However, kernel-fitimage currently only supports + limited usescases: just one kernel image, an optional RAM disk, and + any number of device tree. + + + To create a FIT image, it is required that + KERNEL_CLASSES + is set to "kernel-fitimage" and + KERNEL_IMAGETYPE + is set to "fitImage". + + + + The options for the device tree compiler passed to mkimage -D feature + when creating the FIT image are specified using the + UBOOT_MKIMAGE_DTCOPTS + variable. + + + + Only a single kernel can be added to the FIT image created by + kernel-fitimage and the kernel image in FIT is + mandatory. + The address where the kernel image is to be loaded by U-boot is + specified by + UBOOT_LOADADDRESS + and the entrypoint by + UBOOT_ENTRYPOINT. + + + + Multiple device trees can be added to the FIT image created by + kernel-fitimage and the device tree is optional. + The address where the device tree is to be loaded by U-boot is + specified by + UBOOT_DTBO_LOADADDRESS + for device tree overlays and by + UBOOT_DTB_LOADADDRESS + for device tree binaries. + + + + Only a single RAM disk can be added to the FIT image created by + kernel-fitimage and the RAM disk in FIT is + optional. + The address where the RAM disk image is to be loaded by U-boot + is specified by + UBOOT_RD_LOADADDRESS + and the entrypoint by + UBOOT_RD_ENTRYPOINT. + The ramdisk is added to FIT image when + INITRAMFS_IMAGE + is specified. + + + + The FIT image generated by kernel-fitimage class + is signed when the variables + UBOOT_SIGN_ENABLE, + UBOOT_MKIMAGE_DTCOPTS, + UBOOT_SIGN_KEYDIR + and + UBOOT_SIGN_KEYNAME + are set appropriately. + The default values used for + FIT_HASH_ALG + and + FIT_SIGN_ALG + in kernel-fitimage are "sha256" and "rsa2048" + respectively. + +
diff --git a/poky/documentation/ref-manual/ref-variables.xml b/poky/documentation/ref-manual/ref-variables.xml index 9fe744aff..a5064807e 100644 --- a/poky/documentation/ref-manual/ref-variables.xml +++ b/poky/documentation/ref-manual/ref-variables.xml @@ -4991,6 +4991,30 @@ + FIT_HASH_ALG + + FIT_HASH_ALG[doc] = "Specifies the hash algorithm used in creating the FIT Image." + + + + Specifies the hash algorithm used in creating the FIT Image. + For e.g. sha256. + + + + + FIT_SIGN_ALG + + FIT_SIGN_ALG[doc] = "Specifies the signature algorithm used in creating the FIT Image." + + + + Specifies the signature algorithm used in creating the FIT Image. + For e.g. rsa2048. + + + + FONT_EXTRA_RDEPENDS FONT_EXTRA_RDEPENDS[doc] = "When a recipe inherits the fontcache class, this variable specifies runtime dependencies for font packages. This variable defaults to 'fontconfig-utils'." @@ -5703,7 +5727,7 @@ A space-separated list of files installed into the boot partition when preparing an image using the Wic tool - with the bootimg-partition source + with the bootimg-partition or bootimg-efi source plugin. By default, the files are installed under the same name as the source files. @@ -15960,6 +15984,38 @@ + UBOOT_DTB_LOADADDRESS + + UBOOT_DTB_LOADADDRESS[doc] = "Specifies the load address for the dtb." + + + + Specifies the load address for the dtb image used by U-boot. + During FIT image creation, the + UBOOT_DTB_LOADADDRESS variable is used + in kernel-fitimage class to specify the + load address to be used in creating the dtb sections of + Image Tree Source for the FIT image. + + + + + UBOOT_DTBO_LOADADDRESS + + UBOOT_DTBO_LOADADDRESS[doc] = "Specifies the load address for the dtbo." + + + + Specifies the load address for the dtbo image used by U-boot. + During FIT image creation, the + UBOOT_DTBO_LOADADDRESS variable is used + in kernel-fitimage class to specify the + load address to be used in creating the dtbo sections of + Image Tree Source for the FIT image. + + + + UBOOT_ENTRYPOINT UBOOT_ENTRYPOINT[doc] = "Specifies the entry point for the U-Boot image." @@ -16045,6 +16101,51 @@ + UBOOT_MKIMAGE_DTCOPTS + + UBOOT_MKIMAGE_DTCOPTS[doc] = "Options for the device tree compiler passed to mkimage '-D' feature." + + + + Options for the device tree compiler passed to mkimage '-D' + feature while creating FIT image in + kernel-fitimage class. + + + + + UBOOT_RD_LOADADDRESS + + UBOOT_RD_LOADADDRESS[doc] = "Specifies the load address for the ramdisk image." + + + + Specifies the load address for the RAM disk image. + During FIT image creation, the + UBOOT_RD_LOADADDRESS variable is used + in kernel-fitimage class to specify the + load address to be used in creating the Image Tree Source for + the FIT image. + + + + + UBOOT_RD_ENTRYPOINT + + UBOOT_RD_ENTRYPOINT[doc] = "Specifies the entrypoint for the ramdisk image." + + + + Specifies the entrypoint for the RAM disk image. + During FIT image creation, the + UBOOT_RD_ENTRYPOINT variable is used + in kernel-fitimage class to specify the + entrypoint to be used in creating the Image Tree Source for + the FIT image. + + + + UBOOT_SUFFIX UBOOT_SUFFIX[doc] = "Points to the generated U-Boot extension." @@ -16063,6 +16164,47 @@ + UBOOT_SIGN_ENABLE + + UBOOT_SIGN_KEYDIR[doc] = "Enable signing of FIT image." + + + + Enable signing of FIT image. The default value is "0". + + + + + UBOOT_SIGN_KEYDIR + + UBOOT_SIGN_KEYDIR[doc] = "Location of the directory containing the RSA key and certificate used for signing FIT image." + + + + Location of the directory containing the RSA key and + certificate used for signing FIT image. + + + + + UBOOT_SIGN_KEYNAME + + UBOOT_SIGN_KEYNAME[doc] = "The name of keys used for signing U-boot FIT image" + + + + The name of keys used for signing U-boot FIT image stored in + UBOOT_SIGN_KEYDIR + directory. For e.g. dev.key key and dev.crt certificate + stored in + UBOOT_SIGN_KEYDIR + directory will have + UBOOT_SIGN_KEYNAME + set to "dev". + + + + UBOOT_TARGET UBOOT_TARGET[doc] = "Specifies the target used for building U-Boot." diff --git a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend index 626748541..2c73eb2f6 100644 --- a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend +++ b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend @@ -7,17 +7,17 @@ KMACHINE_genericx86 ?= "common-pc" KMACHINE_genericx86-64 ?= "common-pc-64" KMACHINE_beaglebone-yocto ?= "beaglebone" -SRCREV_machine_genericx86 ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" -SRCREV_machine_genericx86-64 ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" -SRCREV_machine_edgerouter ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" -SRCREV_machine_beaglebone-yocto ?= "ec485bd4afef57715eb45ba331b04b3f941e43bb" +SRCREV_machine_genericx86 ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" +SRCREV_machine_genericx86-64 ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" +SRCREV_machine_edgerouter ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" +SRCREV_machine_beaglebone-yocto ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto" -LINUX_VERSION_genericx86 = "5.4.49" -LINUX_VERSION_genericx86-64 = "5.4.49" -LINUX_VERSION_edgerouter = "5.4.49" -LINUX_VERSION_beaglebone-yocto = "5.4.49" +LINUX_VERSION_genericx86 = "5.4.54" +LINUX_VERSION_genericx86-64 = "5.4.54" +LINUX_VERSION_edgerouter = "5.4.54" +LINUX_VERSION_beaglebone-yocto = "5.4.54" diff --git a/poky/meta/classes/buildhistory.bbclass b/poky/meta/classes/buildhistory.bbclass index a4288ef9e..805e976ac 100644 --- a/poky/meta/classes/buildhistory.bbclass +++ b/poky/meta/classes/buildhistory.bbclass @@ -429,8 +429,8 @@ def buildhistory_list_installed(d, rootfs_type="image"): from oe.sdk import sdk_list_installed_packages from oe.utils import format_pkg_list - process_list = [('file', 'bh_installed_pkgs.txt'),\ - ('deps', 'bh_installed_pkgs_deps.txt')] + process_list = [('file', 'bh_installed_pkgs_%s.txt' % os.getpid()),\ + ('deps', 'bh_installed_pkgs_deps_%s.txt' % os.getpid())] if rootfs_type == "image": pkgs = image_list_installed_packages(d) @@ -460,9 +460,10 @@ buildhistory_get_installed() { # Get list of installed packages pkgcache="$1/installed-packages.tmp" - cat ${WORKDIR}/bh_installed_pkgs.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs.txt + cat ${WORKDIR}/bh_installed_pkgs_${PID}.txt | sort > $pkgcache && rm ${WORKDIR}/bh_installed_pkgs_${PID}.txt cat $pkgcache | awk '{ print $1 }' > $1/installed-package-names.txt + if [ -s $pkgcache ] ; then cat $pkgcache | awk '{ print $2 }' | xargs -n1 basename > $1/installed-packages.txt else @@ -471,8 +472,8 @@ buildhistory_get_installed() { # Produce dependency graph # First, quote each name to handle characters that cause issues for dot - sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps.txt > $1/depends.tmp && - rm ${WORKDIR}/bh_installed_pkgs_deps.txt + sed 's:\([^| ]*\):"\1":g' ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt > $1/depends.tmp && + rm ${WORKDIR}/bh_installed_pkgs_deps_${PID}.txt # Remove lines with rpmlib(...) and config(...) dependencies, change the # delimiter from pipe to "->", set the style for recommend lines and # turn versioned dependencies into edge labels. diff --git a/poky/meta/classes/cmake.bbclass b/poky/meta/classes/cmake.bbclass index 8243f7ce8..7c055e8a3 100644 --- a/poky/meta/classes/cmake.bbclass +++ b/poky/meta/classes/cmake.bbclass @@ -21,23 +21,6 @@ python() { d.setVarFlag("do_compile", "progress", r"outof:^\[(\d+)/(\d+)\]\s+") else: bb.fatal("Unknown CMake Generator %s" % generator) - - # C/C++ Compiler (without cpu arch/tune arguments) - if not d.getVar('OECMAKE_C_COMPILER'): - cc_list = d.getVar('CC').split() - if cc_list[0] == 'ccache': - d.setVar('OECMAKE_C_COMPILER_LAUNCHER', cc_list[0]) - d.setVar('OECMAKE_C_COMPILER', cc_list[1]) - else: - d.setVar('OECMAKE_C_COMPILER', cc_list[0]) - - if not d.getVar('OECMAKE_CXX_COMPILER'): - cxx_list = d.getVar('CXX').split() - if cxx_list[0] == 'ccache': - d.setVar('OECMAKE_CXX_COMPILER_LAUNCHER', cxx_list[0]) - d.setVar('OECMAKE_CXX_COMPILER', cxx_list[1]) - else: - d.setVar('OECMAKE_CXX_COMPILER', cxx_list[0]) } OECMAKE_AR ?= "${AR}" @@ -51,8 +34,23 @@ OECMAKE_CXX_LINK_FLAGS ?= "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS} ${CXXFLAGS} ${LD CXXFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}" CFLAGS += "${HOST_CC_ARCH} ${TOOLCHAIN_OPTIONS}" -OECMAKE_C_COMPILER_LAUNCHER ?= "" -OECMAKE_CXX_COMPILER_LAUNCHER ?= "" +def oecmake_map_compiler(compiler, d): + args = d.getVar(compiler).split() + if args[0] == "ccache": + return args[1], args[0] + return args[0], "" + +# C/C++ Compiler (without cpu arch/tune arguments) +OECMAKE_C_COMPILER ?= "${@oecmake_map_compiler('CC', d)[0]}" +OECMAKE_C_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CC', d)[1]}" +OECMAKE_CXX_COMPILER ?= "${@oecmake_map_compiler('CXX', d)[0]}" +OECMAKE_CXX_COMPILER_LAUNCHER ?= "${@oecmake_map_compiler('CXX', d)[1]}" + +# clear compiler vars for allarch to avoid sig hash difference +OECMAKE_C_COMPILER_allarch = "" +OECMAKE_C_COMPILER_LAUNCHER_allarch = "" +OECMAKE_CXX_COMPILER_allarch = "" +OECMAKE_CXX_COMPILER_LAUNCHER_allarch = "" OECMAKE_RPATH ?= "" OECMAKE_PERLNATIVE_DIR ??= "" diff --git a/poky/meta/classes/cml1.bbclass b/poky/meta/classes/cml1.bbclass index 8ab240589..9b9866f4c 100644 --- a/poky/meta/classes/cml1.bbclass +++ b/poky/meta/classes/cml1.bbclass @@ -27,12 +27,16 @@ CROSS_CURSES_INC = '-DCURSES_LOC=""' TERMINFO = "${STAGING_DATADIR_NATIVE}/terminfo" KCONFIG_CONFIG_COMMAND ??= "menuconfig" +KCONFIG_CONFIG_ROOTDIR ??= "${B}" python do_menuconfig() { import shutil + config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config") + configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig") + try: - mtime = os.path.getmtime(".config") - shutil.copy(".config", ".config.orig") + mtime = os.path.getmtime(config) + shutil.copy(config, configorig) except OSError: mtime = 0 @@ -42,7 +46,7 @@ python do_menuconfig() { # FIXME this check can be removed when the minimum bitbake version has been bumped if hasattr(bb.build, 'write_taint'): try: - newmtime = os.path.getmtime(".config") + newmtime = os.path.getmtime(config) except OSError: newmtime = 0 @@ -52,7 +56,7 @@ python do_menuconfig() { } do_menuconfig[depends] += "ncurses-native:do_populate_sysroot" do_menuconfig[nostamp] = "1" -do_menuconfig[dirs] = "${B}" +do_menuconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}" addtask menuconfig after do_configure python do_diffconfig() { @@ -61,8 +65,8 @@ python do_diffconfig() { workdir = d.getVar('WORKDIR') fragment = workdir + '/fragment.cfg' - configorig = '.config.orig' - config = '.config' + configorig = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config.orig") + config = os.path.join(d.getVar('KCONFIG_CONFIG_ROOTDIR'), ".config") try: md5newconfig = bb.utils.md5_file(configorig) @@ -85,5 +89,5 @@ python do_diffconfig() { } do_diffconfig[nostamp] = "1" -do_diffconfig[dirs] = "${B}" +do_diffconfig[dirs] = "${KCONFIG_CONFIG_ROOTDIR}" addtask diffconfig diff --git a/poky/meta/classes/gtk-icon-cache.bbclass b/poky/meta/classes/gtk-icon-cache.bbclass index dd394af27..340a28385 100644 --- a/poky/meta/classes/gtk-icon-cache.bbclass +++ b/poky/meta/classes/gtk-icon-cache.bbclass @@ -1,5 +1,10 @@ FILES_${PN} += "${datadir}/icons/hicolor" +#gtk+3 reqiure GTK3DISTROFEATURES, DEPENDS on it make all the +#recipes inherit this class require GTK3DISTROFEATURES +inherit features_check +ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" + DEPENDS +=" ${@['hicolor-icon-theme', '']['${BPN}' == 'hicolor-icon-theme']} \ ${@['gdk-pixbuf', '']['${BPN}' == 'gdk-pixbuf']} \ ${@['gtk+3', '']['${BPN}' == 'gtk+3']} \ diff --git a/poky/meta/classes/kernel-devicetree.bbclass b/poky/meta/classes/kernel-devicetree.bbclass index 522c46575..81dda8003 100644 --- a/poky/meta/classes/kernel-devicetree.bbclass +++ b/poky/meta/classes/kernel-devicetree.bbclass @@ -52,7 +52,7 @@ do_configure_append() { do_compile_append() { for dtbf in ${KERNEL_DEVICETREE}; do dtb=`normalize_dtb "$dtbf"` - oe_runmake $dtb + oe_runmake $dtb CC="${KERNEL_CC} $cc_extra " LD="${KERNEL_LD}" ${KERNEL_EXTRA_ARGS} done } diff --git a/poky/meta/classes/kernel-fitimage.bbclass b/poky/meta/classes/kernel-fitimage.bbclass index 72b05ff8d..fa4ea6fee 100644 --- a/poky/meta/classes/kernel-fitimage.bbclass +++ b/poky/meta/classes/kernel-fitimage.bbclass @@ -257,12 +257,21 @@ fitimage_emit_section_config() { # Test if we have any DTBs at all sep="" conf_desc="" + conf_node="conf@" kernel_line="" fdt_line="" ramdisk_line="" setup_line="" default_line="" + # conf node name is selected based on dtb ID if it is present, + # otherwise its selected based on kernel ID + if [ -n "${3}" ]; then + conf_node=$conf_node${3} + else + conf_node=$conf_node${2} + fi + if [ -n "${2}" ]; then conf_desc="Linux kernel" sep=", " @@ -287,12 +296,18 @@ fitimage_emit_section_config() { fi if [ "${6}" = "1" ]; then - default_line="default = \"conf@${3}\";" + # default node is selected based on dtb ID if it is present, + # otherwise its selected based on kernel ID + if [ -n "${3}" ]; then + default_line="default = \"conf@${3}\";" + else + default_line="default = \"conf@${2}\";" + fi fi cat << EOF >> ${1} ${default_line} - conf@${3} { + $conf_node { description = "${6} ${conf_desc}"; ${kernel_line} ${fdt_line} @@ -434,6 +449,13 @@ fitimage_assemble() { # fitimage_emit_section_maint ${1} confstart + # kernel-fitimage.bbclass currently only supports a single kernel (no less or + # more) to be added to the FIT image along with 0 or more device trees and + # 0 or 1 ramdisk. + # If a device tree is to be part of the FIT image, then select + # the default configuration to be used is based on the dtbcount. If there is + # no dtb present than select the default configuation to be based on + # the kernelcount. if [ -n "${DTBS}" ]; then i=1 for DTB in ${DTBS}; do @@ -445,6 +467,9 @@ fitimage_assemble() { fi i=`expr ${i} + 1` done + else + defaultconfigcount=1 + fitimage_emit_section_config ${1} "${kernelcount}" "" "${ramdiskcount}" "${setupcount}" "${defaultconfigcount}" fi fitimage_emit_section_maint ${1} sectend diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass index 3311f6e84..96ea61225 100644 --- a/poky/meta/classes/kernel-yocto.bbclass +++ b/poky/meta/classes/kernel-yocto.bbclass @@ -87,6 +87,13 @@ def get_machine_branch(d, default): do_kernel_metadata() { set +e + + if [ -n "$1" ]; then + mode="$1" + else + mode="patch" + fi + cd ${S} export KMETA=${KMETA} @@ -120,14 +127,13 @@ do_kernel_metadata() { if [ -n "${KBUILD_DEFCONFIG}" ]; then if [ -f "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" ]; then if [ -f "${WORKDIR}/defconfig" ]; then - # If the two defconfig's are different, warn that we didn't overwrite the - # one already placed in WORKDIR by the fetcher. + # If the two defconfig's are different, warn that we overwrote the + # one already placed in WORKDIR cmp "${WORKDIR}/defconfig" "${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG}" if [ $? -ne 0 ]; then - bbwarn "defconfig detected in WORKDIR. ${KBUILD_DEFCONFIG} skipped" - else - cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig + bbdebug 1 "detected SRC_URI or unpatched defconfig in WORKDIR. ${KBUILD_DEFCONFIG} copied over it" fi + cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig else cp -f ${S}/arch/${ARCH}/configs/${KBUILD_DEFCONFIG} ${WORKDIR}/defconfig fi @@ -137,17 +143,19 @@ do_kernel_metadata() { fi fi - # was anyone trying to patch the kernel meta data ?, we need to do - # this here, since the scc commands migrate the .cfg fragments to the - # kernel source tree, where they'll be used later. - check_git_config - patches="${@" ".join(find_patches(d,'kernel-meta'))}" - for p in $patches; do - ( - cd ${WORKDIR}/kernel-meta - git am -s $p - ) - done + if [ "$mode" = "patch" ]; then + # was anyone trying to patch the kernel meta data ?, we need to do + # this here, since the scc commands migrate the .cfg fragments to the + # kernel source tree, where they'll be used later. + check_git_config + patches="${@" ".join(find_patches(d,'kernel-meta'))}" + for p in $patches; do + ( + cd ${WORKDIR}/kernel-meta + git am -s $p + ) + done + fi sccs_from_src_uri="${@" ".join(find_sccs(d))}" patches="${@" ".join(find_patches(d,''))}" @@ -212,13 +220,40 @@ do_kernel_metadata() { fi meta_dir=$(kgit --meta) - # run1: pull all the configuration fragments, no matter where they come from - elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} ${KERNEL_FEATURES}`" - if [ -n "${elements}" ]; then - echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition - scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches ${KERNEL_FEATURES} - if [ $? -ne 0 ]; then - bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + KERNEL_FEATURES_FINAL="" + if [ -n "${KERNEL_FEATURES}" ]; then + for feature in ${KERNEL_FEATURES}; do + feature_found=f + for d in $includes; do + path_to_check=$(echo $d | sed 's/-I//g') + if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then + feature_found=t + fi + done + if [ "$feature_found" = "f" ]; then + if [ -n "${KERNEL_DANGLING_FEATURES_WARN_ONLY}" ]; then + bbwarn "Feature '$feature' not found, but KERNEL_DANGLING_FEATURES_WARN_ONLY is set" + bbwarn "This may cause runtime issues, dropping feature and allowing configuration to continue" + else + bberror "Feature '$feature' not found, this will cause configuration failures." + bberror "Check the SRC_URI for meta-data repositories or directories that may be missing" + bbfatal_log "Set KERNEL_DANGLING_FEATURES_WARN_ONLY to ignore this issue" + fi + else + KERNEL_FEATURES_FINAL="$KERNEL_FEATURES_FINAL $feature" + fi + done + fi + + if [ "$mode" = "config" ]; then + # run1: pull all the configuration fragments, no matter where they come from + elements="`echo -n ${bsp_definition} $sccs_defconfig ${sccs} ${patches} $KERNEL_FEATURES_FINAL`" + if [ -n "${elements}" ]; then + echo "${bsp_definition}" > ${S}/${meta_dir}/bsp_definition + scc --force -o ${S}/${meta_dir}:cfg,merge,meta ${includes} $sccs_defconfig $bsp_definition $sccs $patches $KERNEL_FEATURES_FINAL + if [ $? -ne 0 ]; then + bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + fi fi fi @@ -229,12 +264,14 @@ do_kernel_metadata() { sccs="${bsp_definition} ${sccs}" fi - # run2: only generate patches for elements that have been passed on the SRC_URI - elements="`echo -n ${sccs} ${patches} ${KERNEL_FEATURES}`" - if [ -n "${elements}" ]; then - scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} ${KERNEL_FEATURES} - if [ $? -ne 0 ]; then - bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + if [ "$mode" = "patch" ]; then + # run2: only generate patches for elements that have been passed on the SRC_URI + elements="`echo -n ${sccs} ${patches} $KERNEL_FEATURES_FINAL`" + if [ -n "${elements}" ]; then + scc --force -o ${S}/${meta_dir}:patch --cmds patch ${includes} ${sccs} ${patches} $KERNEL_FEATURES_FINAL + if [ $? -ne 0 ]; then + bbfatal_log "Could not generate configuration queue for ${KMACHINE}." + fi fi fi } @@ -338,6 +375,8 @@ do_kernel_configme[depends] += "bc-native:do_populate_sysroot bison-native:do_po do_kernel_configme[depends] += "kern-tools-native:do_populate_sysroot" do_kernel_configme[dirs] += "${S} ${B}" do_kernel_configme() { + do_kernel_metadata config + # translate the kconfig_mode into something that merge_config.sh # understands case ${KCONFIG_MODE} in @@ -380,6 +419,67 @@ do_kernel_configme() { } addtask kernel_configme before do_configure after do_patch +addtask config_analysis + +do_config_analysis[depends] = "virtual/kernel:do_configure" +do_config_analysis[depends] += "kern-tools-native:do_populate_sysroot" + +CONFIG_AUDIT_FILE ?= "${WORKDIR}/config-audit.txt" +CONFIG_ANALYSIS_FILE ?= "${WORKDIR}/config-analysis.txt" + +python do_config_analysis() { + import re, string, sys, subprocess + + s = d.getVar('S') + + env = os.environ.copy() + env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/") + env['LD'] = d.getVar('KERNEL_LD') + env['CC'] = d.getVar('KERNEL_CC') + env['ARCH'] = d.getVar('ARCH') + env['srctree'] = s + + # read specific symbols from the kernel recipe or from local.conf + # i.e.: CONFIG_ANALYSIS_pn-linux-yocto-dev = 'NF_CONNTRACK LOCALVERSION' + config = d.getVar( 'CONFIG_ANALYSIS' ) + if not config: + config = [ "" ] + else: + config = config.split() + + for c in config: + for action in ["analysis","audit"]: + if action == "analysis": + try: + analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--blame', c], cwd=s, env=env ).decode('utf-8') + except subprocess.CalledProcessError as e: + bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8')) + + outfile = d.getVar( 'CONFIG_ANALYSIS_FILE' ) + + if action == "audit": + try: + analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--summary', '--extended', '--sanity', c], cwd=s, env=env ).decode('utf-8') + except subprocess.CalledProcessError as e: + bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8')) + + outfile = d.getVar( 'CONFIG_AUDIT_FILE' ) + + if c: + outdir = os.path.dirname( outfile ) + outname = os.path.basename( outfile ) + outfile = outdir + '/'+ c + '-' + outname + + if config and os.path.isfile(outfile): + os.remove(outfile) + + with open(outfile, 'w+') as f: + f.write( analysis ) + + bb.warn( "Configuration {} executed, see: {} for details".format(action,outfile )) + if c: + bb.warn( analysis ) +} python do_kernel_configcheck() { import re, string, sys, subprocess @@ -389,57 +489,89 @@ python do_kernel_configcheck() { # meta-series for processing kmeta = d.getVar("KMETA") or "meta" if not os.path.exists(kmeta): - kmeta = "." + kmeta + kmeta = subprocess.check_output(['kgit', '--meta'], cwd=d.getVar('S')).decode('utf-8').rstrip() s = d.getVar('S') env = os.environ.copy() env['PATH'] = "%s:%s%s" % (d.getVar('PATH'), s, "/scripts/util/") - env['LD'] = "${KERNEL_LD}" + env['LD'] = d.getVar('KERNEL_LD') + env['CC'] = d.getVar('KERNEL_CC') + env['ARCH'] = d.getVar('ARCH') + env['srctree'] = s try: configs = subprocess.check_output(['scc', '--configs', '-o', s + '/.kernel-meta'], env=env).decode('utf-8') except subprocess.CalledProcessError as e: bb.fatal( "Cannot gather config fragments for audit: %s" % e.output.decode("utf-8") ) - try: - subprocess.check_call(['kconf_check', '--report', '-o', - '%s/%s/cfg' % (s, kmeta), d.getVar('B') + '/.config', s, configs], cwd=s, env=env) - except subprocess.CalledProcessError: - # The configuration gathering can return different exit codes, but - # we interpret them based on the KCONF_AUDIT_LEVEL variable, so we catch - # everything here, and let the run continue. - pass - config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0) bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0) - # if config check visibility is non-zero, report dropped configuration values - mismatch_file = d.expand("${S}/%s/cfg/mismatch.txt" % kmeta) - if os.path.exists(mismatch_file): - if config_check_visibility: - with open (mismatch_file, "r") as myfile: + # if config check visibility is "1", that's the lowest level of audit. So + # we add the --classify option to the run, since classification will + # streamline the output to only report options that could be boot issues, + # or are otherwise required for proper operation. + extra_params = "" + if config_check_visibility == 1: + extra_params = "--classify" + + # category #1: mismatches + try: + analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--mismatches', extra_params], cwd=s, env=env ).decode('utf-8') + except subprocess.CalledProcessError as e: + bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8')) + + if analysis: + outfile = "{}/{}/cfg/mismatch.txt".format( s, kmeta ) + if os.path.isfile(outfile): + os.remove(outfile) + with open(outfile, 'w+') as f: + f.write( analysis ) + + if config_check_visibility and os.stat(outfile).st_size > 0: + with open (outfile, "r") as myfile: results = myfile.read() bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results) - if bsp_check_visibility: - invalid_file = d.expand("${S}/%s/cfg/invalid.cfg" % kmeta) - if os.path.exists(invalid_file) and os.stat(invalid_file).st_size > 0: - with open (invalid_file, "r") as myfile: - results = myfile.read() - bb.warn( "[kernel config]: This BSP sets config options that are not offered anywhere within this kernel:\n\n%s" % results) - errors_file = d.expand("${S}/%s/cfg/fragment_errors.txt" % kmeta) - if os.path.exists(errors_file) and os.stat(errors_file).st_size > 0: - with open (errors_file, "r") as myfile: + # category #2: invalid fragment elements + extra_params = "" + if bsp_check_visibility > 1: + extra_params = "--strict" + try: + analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--invalid', extra_params], cwd=s, env=env ).decode('utf-8') + except subprocess.CalledProcessError as e: + bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8')) + + if analysis: + outfile = "{}/{}/cfg/invalid.txt".format(s,kmeta) + if os.path.isfile(outfile): + os.remove(outfile) + with open(outfile, 'w+') as f: + f.write( analysis ) + + if bsp_check_visibility and os.stat(outfile).st_size > 0: + with open (outfile, "r") as myfile: results = myfile.read() - bb.warn( "[kernel config]: This BSP contains fragments with errors:\n\n%s" % results) - - # if the audit level is greater than two, we report if a fragment has overriden - # a value from a base fragment. This is really only used for new kernel introduction - if bsp_check_visibility > 2: - redefinition_file = d.expand("${S}/%s/cfg/redefinition.txt" % kmeta) - if os.path.exists(redefinition_file) and os.stat(redefinition_file).st_size > 0: - with open (redefinition_file, "r") as myfile: + bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results) + + # category #3: redefined options (this is pretty verbose and is debug only) + try: + analysis = subprocess.check_output(['symbol_why.py', '--dotconfig', '{}'.format( d.getVar('B') + '/.config' ), '--sanity'], cwd=s, env=env ).decode('utf-8') + except subprocess.CalledProcessError as e: + bb.fatal( "config analysis failed: %s" % e.output.decode('utf-8')) + + if analysis: + outfile = "{}/{}/cfg/redefinition.txt".format(s,kmeta) + if os.path.isfile(outfile): + os.remove(outfile) + with open(outfile, 'w+') as f: + f.write( analysis ) + + # if the audit level is greater than two, we report if a fragment has overriden + # a value from a base fragment. This is really only used for new kernel introduction + if bsp_check_visibility > 2 and os.stat(outfile).st_size > 0: + with open (outfile, "r") as myfile: results = myfile.read() bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results) } diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass index cf43a5d60..e2ceb6a33 100644 --- a/poky/meta/classes/kernel.bbclass +++ b/poky/meta/classes/kernel.bbclass @@ -212,6 +212,8 @@ UBOOT_LOADADDRESS ?= "${UBOOT_ENTRYPOINT}" KERNEL_EXTRA_ARGS ?= "" EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"" +EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}"" + KERNEL_ALT_IMAGETYPE ??= "" copy_initramfs() { diff --git a/poky/meta/classes/meson.bbclass b/poky/meta/classes/meson.bbclass index ff52d20e5..83aa854b7 100644 --- a/poky/meta/classes/meson.bbclass +++ b/poky/meta/classes/meson.bbclass @@ -98,6 +98,7 @@ strip = ${@meson_array('STRIP', d)} readelf = ${@meson_array('READELF', d)} pkgconfig = 'pkg-config' llvm-config = 'llvm-config${LLVMVERSION}' +cups-config = 'cups-config' [properties] needs_exe_wrapper = true diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass index f8dc1bb46..7a36262eb 100644 --- a/poky/meta/classes/package.bbclass +++ b/poky/meta/classes/package.bbclass @@ -1936,7 +1936,7 @@ python package_do_shlibs() { shlibs_file = os.path.join(shlibswork_dir, pkg + ".list") if len(sonames): with open(shlibs_file, 'w') as fd: - for s in sonames: + for s in sorted(sonames): if s[0] in shlib_provider and s[1] in shlib_provider[s[0]]: (old_pkg, old_pkgver) = shlib_provider[s[0]][s[1]] if old_pkg != pkg: diff --git a/poky/meta/classes/packagefeed-stability.bbclass b/poky/meta/classes/packagefeed-stability.bbclass deleted file mode 100644 index 564860256..000000000 --- a/poky/meta/classes/packagefeed-stability.bbclass +++ /dev/null @@ -1,252 +0,0 @@ -# Class to avoid copying packages into the feed if they haven't materially changed -# -# Copyright (C) 2015 Intel Corporation -# Released under the MIT license (see COPYING.MIT for details) -# -# This class effectively intercepts packages as they are written out by -# do_package_write_*, causing them to be written into a different -# directory where we can compare them to whatever older packages might -# be in the "real" package feed directory, and avoid copying the new -# package to the feed if it has not materially changed. The idea is to -# avoid unnecessary churn in the packages when dependencies trigger task -# reexecution (and thus repackaging). Enabling the class is simple: -# -# INHERIT += "packagefeed-stability" -# -# Caveats: -# 1) Latest PR values in the build system may not match those in packages -# seen on the target (naturally) -# 2) If you rebuild from sstate without the existing package feed present, -# you will lose the "state" of the package feed i.e. the preserved old -# package versions. Not the end of the world, but would negate the -# entire purpose of this class. -# -# Note that running -c cleanall on a recipe will purposely delete the old -# package files so they will definitely be copied the next time. - -python() { - if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d): - return - # Package backend agnostic intercept - # This assumes that the package_write task is called package_write_ - # and that the directory in which packages should be written is - # pointed to by the variable DEPLOY_DIR_ - for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split(): - if pkgclass.startswith('package_'): - pkgtype = pkgclass.split('_', 1)[1] - pkgwritefunc = 'do_package_write_%s' % pkgtype - sstate_outputdirs = d.getVarFlag(pkgwritefunc, 'sstate-outputdirs', False) - deploydirvar = 'DEPLOY_DIR_%s' % pkgtype.upper() - deploydirvarref = '${' + deploydirvar + '}' - pkgcomparefunc = 'do_package_compare_%s' % pkgtype - - if bb.data.inherits_class('image', d): - d.appendVarFlag('do_rootfs', 'recrdeptask', ' ' + pkgcomparefunc) - - if bb.data.inherits_class('populate_sdk_base', d): - d.appendVarFlag('do_populate_sdk', 'recrdeptask', ' ' + pkgcomparefunc) - - if bb.data.inherits_class('populate_sdk_ext', d): - d.appendVarFlag('do_populate_sdk_ext', 'recrdeptask', ' ' + pkgcomparefunc) - - d.appendVarFlag('do_build', 'recrdeptask', ' ' + pkgcomparefunc) - - if d.getVarFlag(pkgwritefunc, 'noexec') or not d.getVarFlag(pkgwritefunc, 'task'): - # Packaging is disabled for this recipe, we shouldn't do anything - continue - - if deploydirvarref in sstate_outputdirs: - deplor_dir_pkgtype = d.expand(deploydirvarref + '-prediff') - # Set intermediate output directory - d.setVarFlag(pkgwritefunc, 'sstate-outputdirs', sstate_outputdirs.replace(deploydirvarref, deplor_dir_pkgtype)) - # Update SSTATE_DUPWHITELIST to avoid shared location conflicted error - d.appendVar('SSTATE_DUPWHITELIST', ' %s' % deplor_dir_pkgtype) - - d.setVar(pkgcomparefunc, d.getVar('do_package_compare', False)) - d.setVarFlags(pkgcomparefunc, d.getVarFlags('do_package_compare', False)) - d.appendVarFlag(pkgcomparefunc, 'depends', ' build-compare-native:do_populate_sysroot') - bb.build.addtask(pkgcomparefunc, 'do_build', 'do_packagedata ' + pkgwritefunc, d) -} - -# This isn't the real task function - it's a template that we use in the -# anonymous python code above -fakeroot python do_package_compare () { - currenttask = d.getVar('BB_CURRENTTASK') - pkgtype = currenttask.rsplit('_', 1)[1] - package_compare_impl(pkgtype, d) -} - -def package_compare_impl(pkgtype, d): - import errno - import fnmatch - import glob - import subprocess - import oe.sstatesig - - pn = d.getVar('PN') - deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper()) - prepath = deploydir + '-prediff/' - - # Find out PKGR values are - pkgdatadir = d.getVar('PKGDATA_DIR') - packages = [] - try: - with open(os.path.join(pkgdatadir, pn), 'r') as f: - for line in f: - if line.startswith('PACKAGES:'): - packages = line.split(':', 1)[1].split() - break - except IOError as e: - if e.errno == errno.ENOENT: - pass - - if not packages: - bb.debug(2, '%s: no packages, nothing to do' % pn) - return - - pkgrvalues = {} - rpkgnames = {} - rdepends = {} - pkgvvalues = {} - for pkg in packages: - with open(os.path.join(pkgdatadir, 'runtime', pkg), 'r') as f: - for line in f: - if line.startswith('PKGR:'): - pkgrvalues[pkg] = line.split(':', 1)[1].strip() - if line.startswith('PKGV:'): - pkgvvalues[pkg] = line.split(':', 1)[1].strip() - elif line.startswith('PKG_%s:' % pkg): - rpkgnames[pkg] = line.split(':', 1)[1].strip() - elif line.startswith('RDEPENDS_%s:' % pkg): - rdepends[pkg] = line.split(':', 1)[1].strip() - - # Prepare a list of the runtime package names for packages that were - # actually produced - rpkglist = [] - for pkg, rpkg in rpkgnames.items(): - if os.path.exists(os.path.join(pkgdatadir, 'runtime', pkg + '.packaged')): - rpkglist.append((rpkg, pkg)) - rpkglist.sort(key=lambda x: len(x[0]), reverse=True) - - pvu = d.getVar('PV', False) - if '$' + '{SRCPV}' in pvu: - pvprefix = pvu.split('$' + '{SRCPV}', 1)[0] - else: - pvprefix = None - - pkgwritetask = 'package_write_%s' % pkgtype - files = [] - docopy = False - manifest, _ = oe.sstatesig.sstate_get_manifest_filename(pkgwritetask, d) - mlprefix = d.getVar('MLPREFIX') - # Copy recipe's all packages if one of the packages are different to make - # they have the same PR. - with open(manifest, 'r') as f: - for line in f: - if line.startswith(prepath): - srcpath = line.rstrip() - if os.path.isfile(srcpath): - destpath = os.path.join(deploydir, os.path.relpath(srcpath, prepath)) - - # This is crude but should work assuming the output - # package file name starts with the package name - # and rpkglist is sorted by length (descending) - pkgbasename = os.path.basename(destpath) - pkgname = None - for rpkg, pkg in rpkglist: - if mlprefix and pkgtype == 'rpm' and rpkg.startswith(mlprefix): - rpkg = rpkg[len(mlprefix):] - if pkgbasename.startswith(rpkg): - pkgr = pkgrvalues[pkg] - destpathspec = destpath.replace(pkgr, '*') - if pvprefix: - pkgv = pkgvvalues[pkg] - if pkgv.startswith(pvprefix): - pkgvsuffix = pkgv[len(pvprefix):] - if '+' in pkgvsuffix: - newpkgv = pvprefix + '*+' + pkgvsuffix.split('+', 1)[1] - destpathspec = destpathspec.replace(pkgv, newpkgv) - pkgname = pkg - break - else: - bb.warn('Unable to map %s back to package' % pkgbasename) - destpathspec = destpath - - oldfile = None - if not docopy: - oldfiles = glob.glob(destpathspec) - if oldfiles: - oldfile = oldfiles[-1] - result = subprocess.call(['pkg-diff.sh', oldfile, srcpath]) - if result != 0: - docopy = True - bb.note("%s and %s are different, will copy packages" % (oldfile, srcpath)) - else: - docopy = True - bb.note("No old packages found for %s, will copy packages" % pkgname) - - files.append((pkgname, pkgbasename, srcpath, destpath)) - - # Remove all the old files and copy again if docopy - if docopy: - bb.note('Copying packages for recipe %s' % pn) - pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}')) - try: - with open(pcmanifest, 'r') as f: - for line in f: - fn = line.rstrip() - if fn: - try: - os.remove(fn) - bb.note('Removed old package %s' % fn) - except OSError as e: - if e.errno == errno.ENOENT: - pass - except IOError as e: - if e.errno == errno.ENOENT: - pass - - # Create new manifest - with open(pcmanifest, 'w') as f: - for pkgname, pkgbasename, srcpath, destpath in files: - destdir = os.path.dirname(destpath) - bb.utils.mkdirhier(destdir) - # Remove allarch rpm pkg if it is already existed (for - # multilib), they're identical in theory, but sstate.bbclass - # copies it again, so keep align with that. - if os.path.exists(destpath) and pkgtype == 'rpm' \ - and d.getVar('PACKAGE_ARCH') == 'all': - os.unlink(destpath) - if (os.stat(srcpath).st_dev == os.stat(destdir).st_dev): - # Use a hard link to save space - os.link(srcpath, destpath) - else: - shutil.copyfile(srcpath, destpath) - f.write('%s\n' % destpath) - else: - bb.note('Not copying packages for recipe %s' % pn) - -do_cleansstate[postfuncs] += "pfs_cleanpkgs" -python pfs_cleanpkgs () { - import errno - for pkgclass in (d.getVar('PACKAGE_CLASSES') or '').split(): - if pkgclass.startswith('package_'): - pkgtype = pkgclass.split('_', 1)[1] - deploydir = d.getVar('DEPLOY_DIR_%s' % pkgtype.upper()) - prepath = deploydir + '-prediff' - pcmanifest = os.path.join(prepath, d.expand('pkg-compare-manifest-${MULTIMACH_TARGET_SYS}-${PN}')) - try: - with open(pcmanifest, 'r') as f: - for line in f: - fn = line.rstrip() - if fn: - try: - os.remove(fn) - except OSError as e: - if e.errno == errno.ENOENT: - pass - os.remove(pcmanifest) - except IOError as e: - if e.errno == errno.ENOENT: - pass -} diff --git a/poky/meta/classes/populate_sdk_ext.bbclass b/poky/meta/classes/populate_sdk_ext.bbclass index fd0da16e7..44d99cfb9 100644 --- a/poky/meta/classes/populate_sdk_ext.bbclass +++ b/poky/meta/classes/populate_sdk_ext.bbclass @@ -653,7 +653,10 @@ sdk_ext_postinst() { # Make sure when the user sets up the environment, they also get # the buildtools-tarball tools in their path. + echo "# Save and reset OECORE_NATIVE_SYSROOT as buildtools may change it" >> $env_setup_script + echo "SAVED=\"\$OECORE_NATIVE_SYSROOT\"" >> $env_setup_script echo ". $target_sdk_dir/buildtools/environment-setup*" >> $env_setup_script + echo "OECORE_NATIVE_SYSROOT=\"\$SAVED\"" >> $env_setup_script fi # Allow bitbake environment setup to be ran as part of this sdk. diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass index c43b9a982..984730ebe 100644 --- a/poky/meta/classes/rootfs-postcommands.bbclass +++ b/poky/meta/classes/rootfs-postcommands.bbclass @@ -1,6 +1,6 @@ # Zap the root password if debug-tweaks feature is not enabled -ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password ; ",d)}' +ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'empty-root-password' ], "", "zap_empty_root_password; ",d)}' # Allow dropbear/openssh to accept logins from accounts with an empty password string if debug-tweaks or allow-empty-password is enabled ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'allow-empty-password' ], "ssh_allow_empty_password; ", "",d)}' @@ -12,7 +12,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'deb ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains_any("IMAGE_FEATURES", [ 'debug-tweaks', 'post-install-logging' ], "postinst_enable_logging; ", "",d)}' # Create /etc/timestamp during image construction to give a reasonably sane default time setting -ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp ; " +ROOTFS_POSTPROCESS_COMMAND += "rootfs_update_timestamp; " # Tweak the mount options for rootfs in /etc/fstab if read-only-rootfs is enabled ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", "read_only_rootfs_hook; ", "",d)}' @@ -26,7 +26,7 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("IMAGE_FEATURES", "read-only APPEND_append = '${@bb.utils.contains("IMAGE_FEATURES", "read-only-rootfs", " ro", "", d)}' # Generates test data file with data store variables expanded in json format -ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data ; " +ROOTFS_POSTPROCESS_COMMAND += "write_image_test_data; " # Write manifest IMAGE_MANIFEST = "${IMGDEPLOYDIR}/${IMAGE_NAME}${IMAGE_NAME_SUFFIX}.manifest" diff --git a/poky/meta/classes/rootfsdebugfiles.bbclass b/poky/meta/classes/rootfsdebugfiles.bbclass index e2ba4e364..85c7ec743 100644 --- a/poky/meta/classes/rootfsdebugfiles.bbclass +++ b/poky/meta/classes/rootfsdebugfiles.bbclass @@ -28,7 +28,7 @@ ROOTFS_DEBUG_FILES ?= "" ROOTFS_DEBUG_FILES[doc] = "Lists additional files or directories to be installed with 'cp -a' in the format 'source1 target1;source2 target2;...'" -ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files ;" +ROOTFS_POSTPROCESS_COMMAND += "rootfs_debug_files;" rootfs_debug_files () { #!/bin/sh -e echo "${ROOTFS_DEBUG_FILES}" | sed -e 's/;/\n/g' | while read source target mode; do diff --git a/poky/meta/classes/uninative.bbclass b/poky/meta/classes/uninative.bbclass index 70799bbf6..316c0f061 100644 --- a/poky/meta/classes/uninative.bbclass +++ b/poky/meta/classes/uninative.bbclass @@ -56,12 +56,17 @@ python uninative_event_fetchloader() { # Our games with path manipulation of DL_DIR mean standard PREMIRRORS don't work # and we can't easily put 'chksum' into the url path from a url parameter with # the current fetcher url handling - ownmirror = d.getVar('SOURCE_MIRROR_URL') - if ownmirror: - localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} ${SOURCE_MIRROR_URL}/uninative/%s/${UNINATIVE_TARBALL}" % chksum) + premirrors = bb.fetch2.mirror_from_string(localdata.getVar("PREMIRRORS")) + for line in premirrors: + try: + (find, replace) = line + except ValueError: + continue + if find.startswith("http"): + localdata.appendVar("PREMIRRORS", " ${UNINATIVE_URL}${UNINATIVE_TARBALL} %s/uninative/%s/${UNINATIVE_TARBALL}" % (replace, chksum)) srcuri = d.expand("${UNINATIVE_URL}${UNINATIVE_TARBALL};sha256sum=%s" % chksum) - bb.note("Fetching uninative binary shim from %s" % srcuri) + bb.note("Fetching uninative binary shim %s (will check PREMIRRORS first)" % srcuri) fetcher = bb.fetch2.Fetch([srcuri], localdata, cache=False) fetcher.download() diff --git a/poky/meta/conf/distro/include/distro_alias.inc b/poky/meta/conf/distro/include/distro_alias.inc index 2f9e1b113..56055f779 100644 --- a/poky/meta/conf/distro/include/distro_alias.inc +++ b/poky/meta/conf/distro/include/distro_alias.inc @@ -22,7 +22,6 @@ DISTRO_PN_ALIAS_pn-bluez5 = "Fedora=bluez Opensuse=bluez" DISTRO_PN_ALIAS_pn-bootchart2 = "Fedora=bootchart2 Opensuse=bootchart" DISTRO_PN_ALIAS_pn-btrfs-tools = "Debian=btrfs-tools Fedora=btrfs-progs" DISTRO_PN_ALIAS_pn-build-appliance-image = "OSPDT" -DISTRO_PN_ALIAS_pn-build-compare = "Opensuse=build-compare Fedora=build-compare" DISTRO_PN_ALIAS_pn-builder = "OE-Core" DISTRO_PN_ALIAS_pn-buildtools-tarball = "OE-Core" DISTRO_PN_ALIAS_pn-cdrtools = "OpenSUSE=cdrtools OSPDT" diff --git a/poky/meta/conf/distro/include/maintainers.inc b/poky/meta/conf/distro/include/maintainers.inc index e8d42db12..c3a1f2733 100644 --- a/poky/meta/conf/distro/include/maintainers.inc +++ b/poky/meta/conf/distro/include/maintainers.inc @@ -80,7 +80,6 @@ RECIPE_MAINTAINER_pn-bootchart2 = "Alexander Kanavin " RECIPE_MAINTAINER_pn-bsd-headers = "Khem Raj " RECIPE_MAINTAINER_pn-btrfs-tools = "Alexander Kanavin " RECIPE_MAINTAINER_pn-build-appliance-image = "Richard Purdie " -RECIPE_MAINTAINER_pn-build-compare = "Paul Eggleton " RECIPE_MAINTAINER_pn-build-sysroots = "Richard Purdie " RECIPE_MAINTAINER_pn-builder = "Richard Purdie " RECIPE_MAINTAINER_pn-buildtools-extended-tarball = "Richard Purdie " @@ -135,14 +134,14 @@ RECIPE_MAINTAINER_pn-cups = "Chen Qi " RECIPE_MAINTAINER_pn-curl = "Armin Kuster " RECIPE_MAINTAINER_pn-cve-update-db-native = "Ross Burton " RECIPE_MAINTAINER_pn-cwautomacros = "Ross Burton " -RECIPE_MAINTAINER_pn-db = "Mark Hatle " +RECIPE_MAINTAINER_pn-db = "Unassigned " RECIPE_MAINTAINER_pn-dbus = "Chen Qi " RECIPE_MAINTAINER_pn-dbus-glib = "Chen Qi " RECIPE_MAINTAINER_pn-dbus-test = "Chen Qi " RECIPE_MAINTAINER_pn-dbus-wait = "Chen Qi " RECIPE_MAINTAINER_pn-debianutils = "Yi Zhao " RECIPE_MAINTAINER_pn-dejagnu = "Nathan Rossi " -RECIPE_MAINTAINER_pn-depmodwrapper-cross = "Mark Hatle " +RECIPE_MAINTAINER_pn-depmodwrapper-cross = "Unassigned " RECIPE_MAINTAINER_pn-desktop-file-utils = "Alexander Kanavin " RECIPE_MAINTAINER_pn-dhcp = "Hongxu Jia " RECIPE_MAINTAINER_pn-diffoscope = "Joshua Watt " @@ -192,7 +191,7 @@ RECIPE_MAINTAINER_pn-gcc-cross-canadian-${TRANSLATED_TARGET_ARCH} = "Khem Raj kbd \ connman->xl2tpd \ lttng-tools->lttng-modules \ + adwaita-icon-theme->gdk-pixbuf \ + adwaita-icon-theme->gtk+3 \ " # Avoid adding bison-native to the sysroot without a specific diff --git a/poky/meta/conf/machine/include/arm/arch-armv6m.inc b/poky/meta/conf/machine/include/arm/arch-armv6m.inc new file mode 100755 index 000000000..68768106c --- /dev/null +++ b/poky/meta/conf/machine/include/arm/arch-armv6m.inc @@ -0,0 +1,19 @@ +# Tuning for ARMV6-m defined in ARM v6-M ArchitectureReference Manual +# at https://static.docs.arm.com/ddi0419/d/DDI0419D_armv6m_arm.pdf +DEFAULTTUNE ?= "armv6m" + +TUNEVALID[armv6m] = "Enable instructions for ARMv6-m" +TUNECONFLICTS[armv6m] = "armv4 armv5 armv6 armv7a" + +# Use armv6s-m instead of armv6-m to avoid gcc bug "SVC is not permitted on this architecture". +# SVC is a valid instruction. +TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'armv6m', ' -march=armv6s-m', '', d)}" +MACHINEOVERRIDES =. "${@bb.utils.contains('TUNE_FEATURES', 'armv6m', 'armv6m:', '' ,d)}" + +require conf/machine/include/arm/arch-armv5.inc + +# Little Endian +AVAILTUNES += "armv6m" +ARMPKGARCH_tune-armv6m = "armv6m" +TUNE_FEATURES_tune-armv6m = "armv6m" +PACKAGE_EXTRA_ARCHS_tune-armv6m = "armv6m" diff --git a/poky/meta/conf/machine/include/arm/arch-armv8-2a.inc b/poky/meta/conf/machine/include/arm/arch-armv8-2a.inc new file mode 100644 index 000000000..1c095256d --- /dev/null +++ b/poky/meta/conf/machine/include/arm/arch-armv8-2a.inc @@ -0,0 +1,19 @@ +DEFAULTTUNE ?= "armv8-2a" + +TUNEVALID[armv8-2a] = "Enable instructions for ARMv8-a" +TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'armv8-2a', ' -march=armv8.2-a', '', d)}" +# TUNE crypto will be handled by arch-armv8a.inc below +MACHINEOVERRIDES =. "${@bb.utils.contains('TUNE_FEATURES', 'armv8-2a', 'armv8-2a:', '' ,d)}" + +require conf/machine/include/arm/arch-armv8a.inc + +# Little Endian base configs +AVAILTUNES += "armv8-2a armv8-2a-crypto" +ARMPKGARCH_tune-armv8-2a ?= "armv8-2a" +ARMPKGARCH_tune-armv8-2a-crypto ?= "armv8-2a" +TUNE_FEATURES_tune-armv8-2a = "aarch64 armv8-2a" +TUNE_FEATURES_tune-armv8-2a-crypto = "${TUNE_FEATURES_tune-armv8-2a} crypto" +PACKAGE_EXTRA_ARCHS_tune-armv8-2a = "${PACKAGE_EXTRA_ARCHS_tune-armv8a} armv8-2a" +PACKAGE_EXTRA_ARCHS_tune-armv8-2a-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8-2a} armv8-2a-crypto" +BASE_LIB_tune-armv8-2a = "lib64" +BASE_LIB_tune-armv8-2a-crypto = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortex-m0plus.inc b/poky/meta/conf/machine/include/tune-cortex-m0plus.inc new file mode 100755 index 000000000..1c7512b06 --- /dev/null +++ b/poky/meta/conf/machine/include/tune-cortex-m0plus.inc @@ -0,0 +1,11 @@ +DEFAULTTUNE ?= "cortexm0-plus" +require conf/machine/include/arm/arch-armv6m.inc + +TUNEVALID[cortexm0-plus] = "Enable Cortex-M0 Plus specific processor optimizations" +TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'cortexm0-plus', ' -mcpu=cortex-m0plus', '', d)}" +AVAILTUNES += "cortexm0-plus" + +ARMPKGARCH_tune-cortexm0-plus = "cortexm0-plus" +TUNE_FEATURES_tune-cortexm0-plus = "${TUNE_FEATURES_tune-armv6m} cortexm0-plus" + +PACKAGE_EXTRA_ARCHS_tune-cortexm0-plus = "${PACKAGE_EXTRA_ARCHS_tune-armv6m} cortexm0-plus" diff --git a/poky/meta/conf/machine/include/tune-cortexa55.inc b/poky/meta/conf/machine/include/tune-cortexa55.inc index 89032b59e..b383eb733 100644 --- a/poky/meta/conf/machine/include/tune-cortexa55.inc +++ b/poky/meta/conf/machine/include/tune-cortexa55.inc @@ -3,7 +3,7 @@ DEFAULTTUNE ?= "cortexa55" TUNEVALID[cortexa55] = "Enable Cortex-A55 specific processor optimizations" TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'cortexa55', ' -mcpu=cortex-a55', '', d)}" -require conf/machine/include/arm/arch-armv8a.inc +require conf/machine/include/arm/arch-armv8-2a.inc # Little Endian base configs AVAILTUNES += "cortexa55" diff --git a/poky/meta/conf/machine/qemuarm.conf b/poky/meta/conf/machine/qemuarm.conf index 44e73a307..3364dcf04 100644 --- a/poky/meta/conf/machine/qemuarm.conf +++ b/poky/meta/conf/machine/qemuarm.conf @@ -7,6 +7,8 @@ require conf/machine/include/qemu.inc KERNEL_IMAGETYPE = "zImage" +UBOOT_MACHINE ?= "qemu_arm_defconfig" + SERIAL_CONSOLES ?= "115200;ttyAMA0 115200;hvc0" SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" diff --git a/poky/meta/conf/machine/qemuarm64.conf b/poky/meta/conf/machine/qemuarm64.conf index d0d6f38e4..fdd464d70 100644 --- a/poky/meta/conf/machine/qemuarm64.conf +++ b/poky/meta/conf/machine/qemuarm64.conf @@ -7,6 +7,8 @@ require conf/machine/include/qemu.inc KERNEL_IMAGETYPE = "Image" +UBOOT_MACHINE ?= "qemu_arm64_defconfig" + SERIAL_CONSOLES ?= "115200;ttyAMA0 115200;hvc0" SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" diff --git a/poky/meta/conf/machine/qemumips.conf b/poky/meta/conf/machine/qemumips.conf index 31ad75448..1373e4cba 100644 --- a/poky/meta/conf/machine/qemumips.conf +++ b/poky/meta/conf/machine/qemumips.conf @@ -9,6 +9,10 @@ require conf/machine/include/qemuboot-mips.inc KERNEL_IMAGETYPE = "vmlinux" KERNEL_ALT_IMAGETYPE = "vmlinux.bin" +UBOOT_MACHINE ?= "qemu_mips_defconfig" + SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" QB_SYSTEM_NAME = "qemu-system-mips" + +QB_CPU = "-cpu 34Kf" diff --git a/poky/meta/conf/machine/qemumips64.conf b/poky/meta/conf/machine/qemumips64.conf index 6d5174665..1e7748649 100644 --- a/poky/meta/conf/machine/qemumips64.conf +++ b/poky/meta/conf/machine/qemumips64.conf @@ -11,6 +11,8 @@ QB_CPU = "-cpu MIPS64R2-generic" KERNEL_IMAGETYPE = "vmlinux" KERNEL_ALT_IMAGETYPE = "vmlinux.bin" +UBOOT_MACHINE ?= "qemu_mips64_defconfig" + SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyS1" QB_SYSTEM_NAME = "qemu-system-mips64" diff --git a/poky/meta/conf/multilib.conf b/poky/meta/conf/multilib.conf index 58f2ac5c8..d231107f8 100644 --- a/poky/meta/conf/multilib.conf +++ b/poky/meta/conf/multilib.conf @@ -30,4 +30,4 @@ PKG_CONFIG_PATH[vardepvalueexclude] = ":${WORKDIR}/recipe-sysroot/${datadir}/pkg # These recipes don't need multilib variants, the ${BPN} PROVDES/RPROVDES # ${MLPREFIX}${BPN} -NON_MULTILIB_RECIPES = "grub grub-efi make-mod-scripts ovmf" +NON_MULTILIB_RECIPES = "grub grub-efi make-mod-scripts ovmf u-boot" diff --git a/poky/meta/lib/oe/manifest.py b/poky/meta/lib/oe/manifest.py index f7c88f9a0..47bd62241 100644 --- a/poky/meta/lib/oe/manifest.py +++ b/poky/meta/lib/oe/manifest.py @@ -7,7 +7,6 @@ import os import re import bb - class Manifest(object, metaclass=ABCMeta): """ This is an abstract class. Do not instantiate this directly. @@ -189,149 +188,12 @@ class Manifest(object, metaclass=ABCMeta): return installed_pkgs -class RpmManifest(Manifest): - """ - Returns a dictionary object with mip and mlp packages. - """ - def _split_multilib(self, pkg_list): - pkgs = dict() - - for pkg in pkg_list.split(): - pkg_type = self.PKG_TYPE_MUST_INSTALL - - ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() - - for ml_variant in ml_variants: - if pkg.startswith(ml_variant + '-'): - pkg_type = self.PKG_TYPE_MULTILIB - - if not pkg_type in pkgs: - pkgs[pkg_type] = pkg - else: - pkgs[pkg_type] += " " + pkg - - return pkgs - - def create_initial(self): - pkgs = dict() - - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - if var in self.vars_to_split: - split_pkgs = self._split_multilib(self.d.getVar(var)) - if split_pkgs is not None: - pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) - else: - pkg_list = self.d.getVar(var) - if pkg_list is not None: - pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) - - for pkg_type in pkgs: - for pkg in pkgs[pkg_type].split(): - manifest.write("%s,%s\n" % (pkg_type, pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - pass - - -class OpkgManifest(Manifest): - """ - Returns a dictionary object with mip and mlp packages. - """ - def _split_multilib(self, pkg_list): - pkgs = dict() - - for pkg in pkg_list.split(): - pkg_type = self.PKG_TYPE_MUST_INSTALL - - ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() - - for ml_variant in ml_variants: - if pkg.startswith(ml_variant + '-'): - pkg_type = self.PKG_TYPE_MULTILIB - - if not pkg_type in pkgs: - pkgs[pkg_type] = pkg - else: - pkgs[pkg_type] += " " + pkg - - return pkgs - - def create_initial(self): - pkgs = dict() - - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - if var in self.vars_to_split: - split_pkgs = self._split_multilib(self.d.getVar(var)) - if split_pkgs is not None: - pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) - else: - pkg_list = self.d.getVar(var) - if pkg_list is not None: - pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) - - for pkg_type in sorted(pkgs): - for pkg in sorted(pkgs[pkg_type].split()): - manifest.write("%s,%s\n" % (pkg_type, pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - if not os.path.exists(self.initial_manifest): - self.create_initial() - - initial_manifest = self.parse_initial_manifest() - pkgs_to_install = list() - for pkg_type in initial_manifest: - pkgs_to_install += initial_manifest[pkg_type] - if len(pkgs_to_install) == 0: - return - - output = pm.dummy_install(pkgs_to_install) - - with open(self.full_manifest, 'w+') as manifest: - pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') - for line in set(output.split('\n')): - m = pkg_re.match(line) - if m: - manifest.write(m.group(1) + '\n') - - return - - -class DpkgManifest(Manifest): - def create_initial(self): - with open(self.initial_manifest, "w+") as manifest: - manifest.write(self.initial_manifest_file_header) - - for var in self.var_maps[self.manifest_type]: - pkg_list = self.d.getVar(var) - - if pkg_list is None: - continue - - for pkg in pkg_list.split(): - manifest.write("%s,%s\n" % - (self.var_maps[self.manifest_type][var], pkg)) - - def create_final(self): - pass - - def create_full(self, pm): - pass - def create_manifest(d, final_manifest=False, manifest_dir=None, manifest_type=Manifest.MANIFEST_TYPE_IMAGE): + from oe.package_manager.rpm.manifest import RpmManifest + from oe.package_manager.ipk.manifest import OpkgManifest + from oe.package_manager.deb.manifest import DpkgManifest manifest_map = {'rpm': RpmManifest, 'ipk': OpkgManifest, 'deb': DpkgManifest} diff --git a/poky/meta/lib/oe/package_manager.py b/poky/meta/lib/oe/package_manager.py deleted file mode 100644 index 35e5cff07..000000000 --- a/poky/meta/lib/oe/package_manager.py +++ /dev/null @@ -1,1863 +0,0 @@ -# -# SPDX-License-Identifier: GPL-2.0-only -# - -from abc import ABCMeta, abstractmethod -import os -import glob -import subprocess -import shutil -import re -import collections -import bb -import tempfile -import oe.utils -import oe.path -import string -from oe.gpg_sign import get_signer -import hashlib -import fnmatch - -# this can be used by all PM backends to create the index files in parallel -def create_index(arg): - index_cmd = arg - - bb.note("Executing '%s' ..." % index_cmd) - result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") - if result: - bb.note(result) - -def opkg_query(cmd_output): - """ - This method parse the output from the package managerand return - a dictionary with the information of the packages. This is used - when the packages are in deb or ipk format. - """ - verregex = re.compile(r' \([=<>]* [^ )]*\)') - output = dict() - pkg = "" - arch = "" - ver = "" - filename = "" - dep = [] - prov = [] - pkgarch = "" - for line in cmd_output.splitlines()+['']: - line = line.rstrip() - if ':' in line: - if line.startswith("Package: "): - pkg = line.split(": ")[1] - elif line.startswith("Architecture: "): - arch = line.split(": ")[1] - elif line.startswith("Version: "): - ver = line.split(": ")[1] - elif line.startswith("File: ") or line.startswith("Filename:"): - filename = line.split(": ")[1] - if "/" in filename: - filename = os.path.basename(filename) - elif line.startswith("Depends: "): - depends = verregex.sub('', line.split(": ")[1]) - for depend in depends.split(", "): - dep.append(depend) - elif line.startswith("Recommends: "): - recommends = verregex.sub('', line.split(": ")[1]) - for recommend in recommends.split(", "): - dep.append("%s [REC]" % recommend) - elif line.startswith("PackageArch: "): - pkgarch = line.split(": ")[1] - elif line.startswith("Provides: "): - provides = verregex.sub('', line.split(": ")[1]) - for provide in provides.split(", "): - prov.append(provide) - - # When there is a blank line save the package information - elif not line: - # IPK doesn't include the filename - if not filename: - filename = "%s_%s_%s.ipk" % (pkg, ver, arch) - if pkg: - output[pkg] = {"arch":arch, "ver":ver, - "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov} - pkg = "" - arch = "" - ver = "" - filename = "" - dep = [] - prov = [] - pkgarch = "" - - return output - -def failed_postinsts_abort(pkgs, log_path): - bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot, -then please place them into pkg_postinst_ontarget_${PN} (). -Deferring to first boot via 'exit 1' is no longer supported. -Details of the failure are in %s.""" %(pkgs, log_path)) - -def generate_locale_archive(d, rootfs, target_arch, localedir): - # Pretty sure we don't need this for locale archive generation but - # keeping it to be safe... - locale_arch_options = { \ - "arc": ["--uint32-align=4", "--little-endian"], - "arceb": ["--uint32-align=4", "--big-endian"], - "arm": ["--uint32-align=4", "--little-endian"], - "armeb": ["--uint32-align=4", "--big-endian"], - "aarch64": ["--uint32-align=4", "--little-endian"], - "aarch64_be": ["--uint32-align=4", "--big-endian"], - "sh4": ["--uint32-align=4", "--big-endian"], - "powerpc": ["--uint32-align=4", "--big-endian"], - "powerpc64": ["--uint32-align=4", "--big-endian"], - "powerpc64le": ["--uint32-align=4", "--little-endian"], - "mips": ["--uint32-align=4", "--big-endian"], - "mipsisa32r6": ["--uint32-align=4", "--big-endian"], - "mips64": ["--uint32-align=4", "--big-endian"], - "mipsisa64r6": ["--uint32-align=4", "--big-endian"], - "mipsel": ["--uint32-align=4", "--little-endian"], - "mipsisa32r6el": ["--uint32-align=4", "--little-endian"], - "mips64el": ["--uint32-align=4", "--little-endian"], - "mipsisa64r6el": ["--uint32-align=4", "--little-endian"], - "riscv64": ["--uint32-align=4", "--little-endian"], - "riscv32": ["--uint32-align=4", "--little-endian"], - "i586": ["--uint32-align=4", "--little-endian"], - "i686": ["--uint32-align=4", "--little-endian"], - "x86_64": ["--uint32-align=4", "--little-endian"] - } - if target_arch in locale_arch_options: - arch_options = locale_arch_options[target_arch] - else: - bb.error("locale_arch_options not found for target_arch=" + target_arch) - bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") - - # Need to set this so cross-localedef knows where the archive is - env = dict(os.environ) - env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive") - - for name in sorted(os.listdir(localedir)): - path = os.path.join(localedir, name) - if os.path.isdir(path): - cmd = ["cross-localedef", "--verbose"] - cmd += arch_options - cmd += ["--add-to-archive", path] - subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT) - -class Indexer(object, metaclass=ABCMeta): - def __init__(self, d, deploy_dir): - self.d = d - self.deploy_dir = deploy_dir - - @abstractmethod - def write_index(self): - pass - - -class RpmIndexer(Indexer): - def write_index(self): - self.do_write_index(self.deploy_dir) - - def do_write_index(self, deploy_dir): - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) - else: - signer = None - - createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c") - result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir)) - if result: - bb.fatal(result) - - # Sign repomd - if signer: - sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') - is_ascii_sig = (sig_type.upper() != "BIN") - signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'), - self.d.getVar('PACKAGE_FEED_GPG_NAME'), - self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), - armor=is_ascii_sig) - -class RpmSubdirIndexer(RpmIndexer): - def write_index(self): - bb.note("Generating package index for %s" %(self.deploy_dir)) - self.do_write_index(self.deploy_dir) - for entry in os.walk(self.deploy_dir): - if os.path.samefile(self.deploy_dir, entry[0]): - for dir in entry[1]: - if dir != 'repodata': - dir_path = oe.path.join(self.deploy_dir, dir) - bb.note("Generating package index for %s" %(dir_path)) - self.do_write_index(dir_path) - -class OpkgIndexer(Indexer): - def write_index(self): - arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", - "SDK_PACKAGE_ARCHS", - ] - - opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) - else: - signer = None - - if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): - open(os.path.join(self.deploy_dir, "Packages"), "w").close() - - index_cmds = set() - index_sign_files = set() - for arch_var in arch_vars: - archs = self.d.getVar(arch_var) - if archs is None: - continue - - for arch in archs.split(): - pkgs_dir = os.path.join(self.deploy_dir, arch) - pkgs_file = os.path.join(pkgs_dir, "Packages") - - if not os.path.isdir(pkgs_dir): - continue - - if not os.path.exists(pkgs_file): - open(pkgs_file, "w").close() - - index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' % - (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) - - index_sign_files.add(pkgs_file) - - if len(index_cmds) == 0: - bb.note("There are no packages in %s!" % self.deploy_dir) - return - - oe.utils.multiprocess_launch(create_index, index_cmds, self.d) - - if signer: - feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') - is_ascii_sig = (feed_sig_type.upper() != "BIN") - for f in index_sign_files: - signer.detach_sign(f, - self.d.getVar('PACKAGE_FEED_GPG_NAME'), - self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), - armor=is_ascii_sig) - - -class DpkgIndexer(Indexer): - def _create_configs(self): - bb.utils.mkdirhier(self.apt_conf_dir) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial")) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d")) - bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d")) - - with open(os.path.join(self.apt_conf_dir, "preferences"), - "w") as prefs_file: - pass - with open(os.path.join(self.apt_conf_dir, "sources.list"), - "w+") as sources_file: - pass - - with open(self.apt_conf_file, "w") as apt_conf: - with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"), - "apt", "apt.conf.sample")) as apt_conf_sample: - for line in apt_conf_sample.read().split("\n"): - line = re.sub(r"#ROOTFS#", "/dev/null", line) - line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) - apt_conf.write(line + "\n") - - def write_index(self): - self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"), - "apt-ftparchive") - self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") - self._create_configs() - - os.environ['APT_CONFIG'] = self.apt_conf_file - - pkg_archs = self.d.getVar('PACKAGE_ARCHS') - if pkg_archs is not None: - arch_list = pkg_archs.split() - sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS') - if sdk_pkg_archs is not None: - for a in sdk_pkg_archs.split(): - if a not in pkg_archs: - arch_list.append(a) - - all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() - arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) - - apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") - gzip = bb.utils.which(os.getenv('PATH'), "gzip") - - index_cmds = [] - deb_dirs_found = False - for arch in arch_list: - arch_dir = os.path.join(self.deploy_dir, arch) - if not os.path.isdir(arch_dir): - continue - - cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) - - cmd += "%s -fcn Packages > Packages.gz;" % gzip - - with open(os.path.join(arch_dir, "Release"), "w+") as release: - release.write("Label: %s\n" % arch) - - cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive - - index_cmds.append(cmd) - - deb_dirs_found = True - - if not deb_dirs_found: - bb.note("There are no packages in %s" % self.deploy_dir) - return - - oe.utils.multiprocess_launch(create_index, index_cmds, self.d) - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - raise NotImplementedError('Package feed signing not implementd for dpkg') - - - -class PkgsList(object, metaclass=ABCMeta): - def __init__(self, d, rootfs_dir): - self.d = d - self.rootfs_dir = rootfs_dir - - @abstractmethod - def list_pkgs(self): - pass - -class RpmPkgsList(PkgsList): - def list_pkgs(self): - return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed() - -class OpkgPkgsList(PkgsList): - def __init__(self, d, rootfs_dir, config_file): - super(OpkgPkgsList, self).__init__(d, rootfs_dir) - - self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") - self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) - self.opkg_args += self.d.getVar("OPKG_ARGS") - - def list_pkgs(self, format=None): - cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args) - - # opkg returns success even when it printed some - # "Collected errors:" report to stderr. Mixing stderr into - # stdout then leads to random failures later on when - # parsing the output. To avoid this we need to collect both - # output streams separately and check for empty stderr. - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - cmd_output, cmd_stderr = p.communicate() - cmd_output = cmd_output.decode("utf-8") - cmd_stderr = cmd_stderr.decode("utf-8") - if p.returncode or cmd_stderr: - bb.fatal("Cannot get the installed packages list. Command '%s' " - "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr)) - - return opkg_query(cmd_output) - - -class DpkgPkgsList(PkgsList): - - def list_pkgs(self): - cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), - "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, - "-W"] - - cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n") - - try: - cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Cannot get the installed packages list. Command '%s' " - "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - return opkg_query(cmd_output) - - -class PackageManager(object, metaclass=ABCMeta): - """ - This is an abstract class. Do not instantiate this directly. - """ - - def __init__(self, d, target_rootfs): - self.d = d - self.target_rootfs = target_rootfs - self.deploy_dir = None - self.deploy_lock = None - self._initialize_intercepts() - - def _initialize_intercepts(self): - bb.note("Initializing intercept dir for %s" % self.target_rootfs) - # As there might be more than one instance of PackageManager operating at the same time - # we need to isolate the intercept_scripts directories from each other, - # hence the ugly hash digest in dir name. - self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" % - (hashlib.sha256(self.target_rootfs.encode()).hexdigest())) - - postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split() - if not postinst_intercepts: - postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH") - if not postinst_intercepts_path: - postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts") - postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path) - - bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts)) - bb.utils.remove(self.intercepts_dir, True) - bb.utils.mkdirhier(self.intercepts_dir) - for intercept in postinst_intercepts: - bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept))) - - @abstractmethod - def _handle_intercept_failure(self, failed_script): - pass - - def _postpone_to_first_boot(self, postinst_intercept_hook): - with open(postinst_intercept_hook) as intercept: - registered_pkgs = None - for line in intercept.read().split("\n"): - m = re.match(r"^##PKGS:(.*)", line) - if m is not None: - registered_pkgs = m.group(1).strip() - break - - if registered_pkgs is not None: - bb.note("If an image is being built, the postinstalls for the following packages " - "will be postponed for first boot: %s" % - registered_pkgs) - - # call the backend dependent handler - self._handle_intercept_failure(registered_pkgs) - - - def run_intercepts(self, populate_sdk=None): - intercepts_dir = self.intercepts_dir - - bb.note("Running intercept scripts:") - os.environ['D'] = self.target_rootfs - os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE') - for script in os.listdir(intercepts_dir): - script_full = os.path.join(intercepts_dir, script) - - if script == "postinst_intercept" or not os.access(script_full, os.X_OK): - continue - - # we do not want to run any multilib variant of this - if script.startswith("delay_to_first_boot"): - self._postpone_to_first_boot(script_full) - continue - - if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32': - bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - continue - - bb.note("> Executing %s intercept ..." % script) - - try: - output = subprocess.check_output(script_full, stderr=subprocess.STDOUT) - if output: bb.note(output.decode("utf-8")) - except subprocess.CalledProcessError as e: - bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8"))) - if populate_sdk == 'host': - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - elif populate_sdk == 'target': - if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): - bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - else: - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - else: - if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): - bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" - % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - self._postpone_to_first_boot(script_full) - else: - bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) - - @abstractmethod - def update(self): - """ - Update the package manager package database. - """ - pass - - @abstractmethod - def install(self, pkgs, attempt_only=False): - """ - Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is - True, installation failures are ignored. - """ - pass - - @abstractmethod - def remove(self, pkgs, with_dependencies=True): - """ - Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' - is False, then any dependencies are left in place. - """ - pass - - @abstractmethod - def write_index(self): - """ - This function creates the index files - """ - pass - - @abstractmethod - def remove_packaging_data(self): - pass - - @abstractmethod - def list_installed(self): - pass - - @abstractmethod - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - Deleting the tmpdir is responsability of the caller. - """ - pass - - @abstractmethod - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - """ - Add remote package feeds into repository manager configuration. The parameters - for the feeds are set by feed_uris, feed_base_paths and feed_archs. - See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS - for their description. - """ - pass - - def install_glob(self, globs, sdk=False): - """ - Install all packages that match a glob. - """ - # TODO don't have sdk here but have a property on the superclass - # (and respect in install_complementary) - if sdk: - pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}") - else: - pkgdatadir = self.d.getVar("PKGDATA_DIR") - - try: - bb.note("Installing globbed packages...") - cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs] - pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") - self.install(pkgs.split(), attempt_only=True) - except subprocess.CalledProcessError as e: - # Return code 1 means no packages matched - if e.returncode != 1: - bb.fatal("Could not compute globbed packages list. Command " - "'%s' returned %d:\n%s" % - (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - def install_complementary(self, globs=None): - """ - Install complementary packages based upon the list of currently installed - packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install - these packages, if they don't exist then no error will occur. Note: every - backend needs to call this function explicitly after the normal package - installation - """ - if globs is None: - globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY') - split_linguas = set() - - for translation in self.d.getVar('IMAGE_LINGUAS').split(): - split_linguas.add(translation) - split_linguas.add(translation.split('-')[0]) - - split_linguas = sorted(split_linguas) - - for lang in split_linguas: - globs += " *-locale-%s" % lang - for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split(): - globs += (" " + complementary_linguas) % lang - - if globs is None: - return - - # we need to write the list of installed packages to a file because the - # oe-pkgdata-util reads it from a file - with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs: - pkgs = self.list_installed() - - provided_pkgs = set() - for pkg in pkgs.values(): - provided_pkgs |= set(pkg.get('provs', [])) - - output = oe.utils.format_pkg_list(pkgs, "arch") - installed_pkgs.write(output) - installed_pkgs.flush() - - cmd = ["oe-pkgdata-util", - "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name, - globs] - exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY') - if exclude: - cmd.extend(['--exclude=' + '|'.join(exclude.split())]) - try: - bb.note('Running %s' % cmd) - complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") - complementary_pkgs = set(complementary_pkgs.split()) - skip_pkgs = sorted(complementary_pkgs & provided_pkgs) - install_pkgs = sorted(complementary_pkgs - provided_pkgs) - bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % ( - ' '.join(install_pkgs), - ' '.join(skip_pkgs))) - self.install(install_pkgs, attempt_only=True) - except subprocess.CalledProcessError as e: - bb.fatal("Could not compute complementary packages list. Command " - "'%s' returned %d:\n%s" % - (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - target_arch = self.d.getVar('TARGET_ARCH') - localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale") - if os.path.exists(localedir) and os.listdir(localedir): - generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir) - # And now delete the binary locales - self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False) - - def deploy_dir_lock(self): - if self.deploy_dir is None: - raise RuntimeError("deploy_dir is not set!") - - lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") - - self.deploy_lock = bb.utils.lockfile(lock_file_name) - - def deploy_dir_unlock(self): - if self.deploy_lock is None: - return - - bb.utils.unlockfile(self.deploy_lock) - - self.deploy_lock = None - - def construct_uris(self, uris, base_paths): - """ - Construct URIs based on the following pattern: uri/base_path where 'uri' - and 'base_path' correspond to each element of the corresponding array - argument leading to len(uris) x len(base_paths) elements on the returned - array - """ - def _append(arr1, arr2, sep='/'): - res = [] - narr1 = [a.rstrip(sep) for a in arr1] - narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2] - for a1 in narr1: - if arr2: - for a2 in narr2: - res.append("%s%s%s" % (a1, sep, a2)) - else: - res.append(a1) - return res - return _append(uris, base_paths) - -def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies): - """ - Go through our do_package_write_X dependencies and hardlink the packages we depend - upon into the repo directory. This prevents us seeing other packages that may - have been built that we don't depend upon and also packages for architectures we don't - support. - """ - import errno - - taskdepdata = d.getVar("BB_TASKDEPDATA", False) - mytaskname = d.getVar("BB_RUNTASK") - pn = d.getVar("PN") - seendirs = set() - multilibs = {} - - bb.utils.remove(subrepo_dir, recurse=True) - bb.utils.mkdirhier(subrepo_dir) - - # Detect bitbake -b usage - nodeps = d.getVar("BB_LIMITEDDEPS") or False - if nodeps or not filterbydependencies: - oe.path.symlink(deploydir, subrepo_dir, True) - return - - start = None - for dep in taskdepdata: - data = taskdepdata[dep] - if data[1] == mytaskname and data[0] == pn: - start = dep - break - if start is None: - bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") - pkgdeps = set() - start = [start] - seen = set(start) - # Support direct dependencies (do_rootfs -> do_package_write_X) - # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X) - while start: - next = [] - for dep2 in start: - for dep in taskdepdata[dep2][3]: - if taskdepdata[dep][0] != pn: - if "do_" + taskname in dep: - pkgdeps.add(dep) - elif dep not in seen: - next.append(dep) - seen.add(dep) - start = next - - for dep in pkgdeps: - c = taskdepdata[dep][0] - manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs) - if not manifest: - bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2])) - if not os.path.exists(manifest): - continue - with open(manifest, "r") as f: - for l in f: - l = l.strip() - deploydir = os.path.normpath(deploydir) - if bb.data.inherits_class('packagefeed-stability', d): - dest = l.replace(deploydir + "-prediff", "") - else: - dest = l.replace(deploydir, "") - dest = subrepo_dir + dest - if l.endswith("/"): - if dest not in seendirs: - bb.utils.mkdirhier(dest) - seendirs.add(dest) - continue - # Try to hardlink the file, copy if that fails - destdir = os.path.dirname(dest) - if destdir not in seendirs: - bb.utils.mkdirhier(destdir) - seendirs.add(destdir) - try: - os.link(l, dest) - except OSError as err: - if err.errno == errno.EXDEV: - bb.utils.copyfile(l, dest) - else: - raise - -class RpmPM(PackageManager): - def __init__(self, - d, - target_rootfs, - target_vendor, - task_name='target', - arch_var=None, - os_var=None, - rpm_repo_workdir="oe-rootfs-repo", - filterbydependencies=True, - needfeed=True): - super(RpmPM, self).__init__(d, target_rootfs) - self.target_vendor = target_vendor - self.task_name = task_name - if arch_var == None: - self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_") - else: - self.archs = self.d.getVar(arch_var).replace("-","_") - if task_name == "host": - self.primary_arch = self.d.getVar('SDK_ARCH') - else: - self.primary_arch = self.d.getVar('MACHINE_ARCH') - - if needfeed: - self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir) - create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies) - - self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name) - if not os.path.exists(self.d.expand('${T}/saved_packaging_data')): - bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data')) - self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf'] - self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % - self.task_name) - if not os.path.exists(self.d.expand('${T}/saved')): - bb.utils.mkdirhier(self.d.expand('${T}/saved')) - - def _configure_dnf(self): - # libsolv handles 'noarch' internally, we don't need to specify it explicitly - archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]] - # This prevents accidental matching against libsolv's built-in policies - if len(archs) <= 1: - archs = archs + ["bogusarch"] - # This architecture needs to be upfront so that packages using it are properly prioritized - archs = ["sdk_provides_dummy_target"] + archs - confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/") - bb.utils.mkdirhier(confdir) - open(confdir + "arch", 'w').write(":".join(archs)) - distro_codename = self.d.getVar('DISTRO_CODENAME') - open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '') - - open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("") - - - def _configure_rpm(self): - # We need to configure rpm to use our primary package architecture as the installation architecture, - # and to make it compatible with other package architectures that we use. - # Otherwise it will refuse to proceed with packages installation. - platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/") - rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/") - bb.utils.mkdirhier(platformconfdir) - open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch) - with open(rpmrcconfdir + "rpmrc", 'w') as f: - f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch)) - f.write("buildarch_compat: %s: noarch\n" % self.primary_arch) - - open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n") - if self.d.getVar('RPM_PREFER_ELF_ARCH'): - open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH'))) - - if self.d.getVar('RPM_SIGN_PACKAGES') == '1': - signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND')) - pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key') - signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME')) - rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys") - cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path] - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Importing GPG key failed. Command '%s' " - "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - - def create_configs(self): - self._configure_dnf() - self._configure_rpm() - - def write_index(self): - lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock" - lf = bb.utils.lockfile(lockfilename, False) - RpmIndexer(self.d, self.rpm_repo_dir).write_index() - bb.utils.unlockfile(lf) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - from urllib.parse import urlparse - - if feed_uris == "": - return - - gpg_opts = '' - if self.d.getVar('PACKAGE_FEED_SIGN') == '1': - gpg_opts += 'repo_gpgcheck=1\n' - gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME')) - - if self.d.getVar('RPM_SIGN_PACKAGES') != '1': - gpg_opts += 'gpgcheck=0\n' - - bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d")) - remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - for uri in remote_uris: - repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/")) - if feed_archs is not None: - for arch in feed_archs.split(): - repo_uri = uri + "/" + arch - repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/")) - repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/")) - open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write( - "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts)) - else: - repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/")) - repo_uri = uri - open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write( - "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts)) - - def _prepare_pkg_transaction(self): - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - - def install(self, pkgs, attempt_only = False): - if len(pkgs) == 0: - return - self._prepare_pkg_transaction() - - bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS') - package_exclude = self.d.getVar('PACKAGE_EXCLUDE') - exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else []) - - output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) + - (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) + - (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) + - (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) + - ["install"] + - pkgs) - - failed_scriptlets_pkgnames = collections.OrderedDict() - for line in output.splitlines(): - if line.startswith("Error in POSTIN scriptlet in rpm package"): - failed_scriptlets_pkgnames[line.split()[-1]] = True - - if len(failed_scriptlets_pkgnames) > 0: - failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - - def remove(self, pkgs, with_dependencies = True): - if not pkgs: - return - - self._prepare_pkg_transaction() - - if with_dependencies: - self._invoke_dnf(["remove"] + pkgs) - else: - cmd = bb.utils.which(os.getenv('PATH'), "rpm") - args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs] - - try: - bb.note("Running %s" % ' '.join([cmd] + args + pkgs)) - output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.fatal("Could not invoke rpm. Command " - "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8"))) - - def upgrade(self): - self._prepare_pkg_transaction() - self._invoke_dnf(["upgrade"]) - - def autoremove(self): - self._prepare_pkg_transaction() - self._invoke_dnf(["autoremove"]) - - def remove_packaging_data(self): - self._invoke_dnf(["clean", "all"]) - for dir in self.packaging_data_dirs: - bb.utils.remove(oe.path.join(self.target_rootfs, dir), True) - - def backup_packaging_data(self): - # Save the packaging dirs for increment rpm image generation - if os.path.exists(self.saved_packaging_data): - bb.utils.remove(self.saved_packaging_data, True) - for i in self.packaging_data_dirs: - source_dir = oe.path.join(self.target_rootfs, i) - target_dir = oe.path.join(self.saved_packaging_data, i) - if os.path.isdir(source_dir): - shutil.copytree(source_dir, target_dir, symlinks=True) - elif os.path.isfile(source_dir): - shutil.copy2(source_dir, target_dir) - - def recovery_packaging_data(self): - # Move the rpmlib back - if os.path.exists(self.saved_packaging_data): - for i in self.packaging_data_dirs: - target_dir = oe.path.join(self.target_rootfs, i) - if os.path.exists(target_dir): - bb.utils.remove(target_dir, True) - source_dir = oe.path.join(self.saved_packaging_data, i) - if os.path.isdir(source_dir): - shutil.copytree(source_dir, target_dir, symlinks=True) - elif os.path.isfile(source_dir): - shutil.copy2(source_dir, target_dir) - - def list_installed(self): - output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"], - print_output = False) - packages = {} - current_package = None - current_deps = None - current_state = "initial" - for line in output.splitlines(): - if line.startswith("Package:"): - package_info = line.split(" ")[1:] - current_package = package_info[0] - package_arch = package_info[1] - package_version = package_info[2] - package_rpm = package_info[3] - packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm} - current_deps = [] - elif line.startswith("Dependencies:"): - current_state = "dependencies" - elif line.startswith("Recommendations"): - current_state = "recommendations" - elif line.startswith("DependenciesEndHere:"): - current_state = "initial" - packages[current_package]["deps"] = current_deps - elif len(line) > 0: - if current_state == "dependencies": - current_deps.append(line) - elif current_state == "recommendations": - current_deps.append("%s [REC]" % line) - - return packages - - def update(self): - self._invoke_dnf(["makecache", "--refresh"]) - - def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ): - os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs - - dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf") - standard_dnf_args = ["-v", "--rpmverbosity=info", "-y", - "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), - "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")), - "--installroot=%s" % (self.target_rootfs), - "--setopt=logdir=%s" % (self.d.getVar('T')) - ] - if hasattr(self, "rpm_repo_dir"): - standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir)) - cmd = [dnf_cmd] + standard_dnf_args + dnf_args - bb.note('Running %s' % ' '.join(cmd)) - try: - output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8") - if print_output: - bb.debug(1, output) - return output - except subprocess.CalledProcessError as e: - if print_output: - (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " - "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - else: - (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " - "'%s' returned %d:" % (' '.join(cmd), e.returncode)) - return e.output.decode("utf-8") - - def dump_install_solution(self, pkgs): - open(self.solution_manifest, 'w').write(" ".join(pkgs)) - return pkgs - - def load_old_install_solution(self): - if not os.path.exists(self.solution_manifest): - return [] - with open(self.solution_manifest, 'r') as fd: - return fd.read().split() - - def _script_num_prefix(self, path): - files = os.listdir(path) - numbers = set() - numbers.add(99) - for f in files: - numbers.add(int(f.split("-")[0])) - return max(numbers) + 1 - - def save_rpmpostinst(self, pkg): - bb.note("Saving postinstall script of %s" % (pkg)) - cmd = bb.utils.which(os.getenv('PATH'), "rpm") - args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg] - - try: - output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Could not invoke rpm. Command " - "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8"))) - - # may need to prepend #!/bin/sh to output - - target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/')) - bb.utils.mkdirhier(target_path) - num = self._script_num_prefix(target_path) - saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg)) - open(saved_script_name, 'w').write(output) - os.chmod(saved_script_name, 0o755) - - def _handle_intercept_failure(self, registered_pkgs): - rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') - bb.utils.mkdirhier(rpm_postinsts_dir) - - # Save the package postinstalls in /etc/rpm-postinsts - for pkg in registered_pkgs.split(): - self.save_rpmpostinst(pkg) - - def extract(self, pkg): - output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg]) - pkg_name = output.splitlines()[-1] - if not pkg_name.endswith(".rpm"): - bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output)) - pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name) - - cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio") - rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio") - - if not os.path.isfile(pkg_path): - bb.fatal("Unable to extract package for '%s'." - "File %s doesn't exists" % (pkg, pkg_path)) - - tmp_dir = tempfile.mkdtemp() - current_dir = os.getcwd() - os.chdir(tmp_dir) - - try: - cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd) - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8"))) - except OSError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename)) - - bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) - os.chdir(current_dir) - - return tmp_dir - - -class OpkgDpkgPM(PackageManager): - def __init__(self, d, target_rootfs): - """ - This is an abstract class. Do not instantiate this directly. - """ - super(OpkgDpkgPM, self).__init__(d, target_rootfs) - - def package_info(self, pkg, cmd): - """ - Returns a dictionary with the package info. - - This method extracts the common parts for Opkg and Dpkg - """ - - try: - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") - except subprocess.CalledProcessError as e: - bb.fatal("Unable to list available packages. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - return opkg_query(output) - - def extract(self, pkg, pkg_info): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - - This method extracts the common parts for Opkg and Dpkg - """ - - ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") - tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") - pkg_path = pkg_info[pkg]["filepath"] - - if not os.path.isfile(pkg_path): - bb.fatal("Unable to extract package for '%s'." - "File %s doesn't exists" % (pkg, pkg_path)) - - tmp_dir = tempfile.mkdtemp() - current_dir = os.getcwd() - os.chdir(tmp_dir) - data_tar = 'data.tar.xz' - - try: - cmd = [ar_cmd, 'x', pkg_path] - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - cmd = [tar_cmd, 'xf', data_tar] - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) - except OSError as e: - bb.utils.remove(tmp_dir, recurse=True) - bb.fatal("Unable to extract %s package. Command '%s' " - "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) - - bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) - bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) - bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) - os.chdir(current_dir) - - return tmp_dir - - def _handle_intercept_failure(self, registered_pkgs): - self.mark_packages("unpacked", registered_pkgs.split()) - -class OpkgPM(OpkgDpkgPM): - def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True): - super(OpkgPM, self).__init__(d, target_rootfs) - - self.config_file = config_file - self.pkg_archs = archs - self.task_name = task_name - - self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir) - self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") - self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") - self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs) - self.opkg_args += self.d.getVar("OPKG_ARGS") - - if prepare_index: - create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies) - - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - if opkg_lib_dir[0] == "/": - opkg_lib_dir = opkg_lib_dir[1:] - - self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") - - bb.utils.mkdirhier(self.opkg_dir) - - self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) - if not os.path.exists(self.d.expand('${T}/saved')): - bb.utils.mkdirhier(self.d.expand('${T}/saved')) - - self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1" - if self.from_feeds: - self._create_custom_config() - else: - self._create_config() - - self.indexer = OpkgIndexer(self.d, self.deploy_dir) - - def mark_packages(self, status_tag, packages=None): - """ - This function will change a package's status in /var/lib/opkg/status file. - If 'packages' is None then the new_status will be applied to all - packages - """ - status_file = os.path.join(self.opkg_dir, "status") - - with open(status_file, "r") as sf: - with open(status_file + ".tmp", "w+") as tmp_sf: - if packages is None: - tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", - r"Package: \1\n\2Status: \3%s" % status_tag, - sf.read())) - else: - if type(packages).__name__ != "list": - raise TypeError("'packages' should be a list object") - - status = sf.read() - for pkg in packages: - status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, - r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), - status) - - tmp_sf.write(status) - - os.rename(status_file + ".tmp", status_file) - - def _create_custom_config(self): - bb.note("Building from feeds activated!") - - with open(self.config_file, "w+") as config_file: - priority = 1 - for arch in self.pkg_archs.split(): - config_file.write("arch %s %d\n" % (arch, priority)) - priority += 5 - - for line in (self.d.getVar('IPK_FEED_URIS') or "").split(): - feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) - - if feed_match is not None: - feed_name = feed_match.group(1) - feed_uri = feed_match.group(2) - - bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) - - config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) - - """ - Allow to use package deploy directory contents as quick devel-testing - feed. This creates individual feed configs for each arch subdir of those - specified as compatible for the current machine. - NOTE: Development-helper feature, NOT a full-fledged feed. - """ - if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "": - for arch in self.pkg_archs.split(): - cfg_file_name = os.path.join(self.target_rootfs, - self.d.getVar("sysconfdir"), - "opkg", - "local-%s-feed.conf" % arch) - - with open(cfg_file_name, "w+") as cfg_file: - cfg_file.write("src/gz local-%s %s/%s" % - (arch, - self.d.getVar('FEED_DEPLOYDIR_BASE_URI'), - arch)) - - if self.d.getVar('OPKGLIBDIR') != '/var/lib': - # There is no command line option for this anymore, we need to add - # info_dir and status_file to config file, if OPKGLIBDIR doesn't have - # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" - cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) - cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) - cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) - - - def _create_config(self): - with open(self.config_file, "w+") as config_file: - priority = 1 - for arch in self.pkg_archs.split(): - config_file.write("arch %s %d\n" % (arch, priority)) - priority += 5 - - config_file.write("src oe file:%s\n" % self.deploy_dir) - - for arch in self.pkg_archs.split(): - pkgs_dir = os.path.join(self.deploy_dir, arch) - if os.path.isdir(pkgs_dir): - config_file.write("src oe-%s file:%s\n" % - (arch, pkgs_dir)) - - if self.d.getVar('OPKGLIBDIR') != '/var/lib': - # There is no command line option for this anymore, we need to add - # info_dir and status_file to config file, if OPKGLIBDIR doesn't have - # the default value of "/var/lib" as defined in opkg: - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" - # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" - config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) - config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) - config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - if feed_uris == "": - return - - rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' - % self.target_rootfs) - - os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True) - - feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split() - - with open(rootfs_config, "w+") as config_file: - uri_iterator = 0 - for uri in feed_uris: - if archs: - for arch in archs: - if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))): - continue - bb.note('Adding opkg feed url-%s-%d (%s)' % - (arch, uri_iterator, uri)) - config_file.write("src/gz uri-%s-%d %s/%s\n" % - (arch, uri_iterator, uri, arch)) - else: - bb.note('Adding opkg feed url-%d (%s)' % - (uri_iterator, uri)) - config_file.write("src/gz uri-%d %s\n" % - (uri_iterator, uri)) - - uri_iterator += 1 - - def update(self): - self.deploy_dir_lock() - - cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - self.deploy_dir_unlock() - bb.fatal("Unable to update the package index files. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - self.deploy_dir_unlock() - - def install(self, pkgs, attempt_only=False): - if not pkgs: - return - - cmd = "%s %s" % (self.opkg_cmd, self.opkg_args) - for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split(): - cmd += " --add-exclude %s" % exclude - for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split(): - cmd += " --add-ignore-recommends %s" % bad_recommendation - cmd += " install " - cmd += " ".join(pkgs) - - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - try: - bb.note("Installing the following packages: %s" % ' '.join(pkgs)) - bb.note(cmd) - output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - failed_pkgs = [] - for line in output.split('\n'): - if line.endswith("configuration required on target."): - bb.warn(line) - failed_pkgs.append(line.split(".")[0]) - if failed_pkgs: - failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - except subprocess.CalledProcessError as e: - (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " - "Command '%s' returned %d:\n%s" % - (cmd, e.returncode, e.output.decode("utf-8"))) - - def remove(self, pkgs, with_dependencies=True): - if not pkgs: - return - - if with_dependencies: - cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \ - (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) - else: - cmd = "%s %s --force-depends remove %s" % \ - (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) - - try: - bb.note(cmd) - output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to remove packages. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - def write_index(self): - self.deploy_dir_lock() - - result = self.indexer.write_index() - - self.deploy_dir_unlock() - - if result is not None: - bb.fatal(result) - - def remove_packaging_data(self): - bb.utils.remove(self.opkg_dir, True) - # create the directory back, it's needed by PM lock - bb.utils.mkdirhier(self.opkg_dir) - - def remove_lists(self): - if not self.from_feeds: - bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True) - - def list_installed(self): - return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs() - - def dummy_install(self, pkgs): - """ - The following function dummy installs pkgs and returns the log of output. - """ - if len(pkgs) == 0: - return - - # Create an temp dir as opkg root for dummy installation - temp_rootfs = self.d.expand('${T}/opkg') - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - if opkg_lib_dir[0] == "/": - opkg_lib_dir = opkg_lib_dir[1:] - temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg') - bb.utils.mkdirhier(temp_opkg_dir) - - opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) - opkg_args += self.d.getVar("OPKG_ARGS") - - cmd = "%s %s update" % (self.opkg_cmd, opkg_args) - try: - subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to update. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - # Dummy installation - cmd = "%s %s --noaction install %s " % (self.opkg_cmd, - opkg_args, - ' '.join(pkgs)) - try: - output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to dummy install packages. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - bb.utils.remove(temp_rootfs, True) - - return output - - def backup_packaging_data(self): - # Save the opkglib for increment ipk image generation - if os.path.exists(self.saved_opkg_dir): - bb.utils.remove(self.saved_opkg_dir, True) - shutil.copytree(self.opkg_dir, - self.saved_opkg_dir, - symlinks=True) - - def recover_packaging_data(self): - # Move the opkglib back - if os.path.exists(self.saved_opkg_dir): - if os.path.exists(self.opkg_dir): - bb.utils.remove(self.opkg_dir, True) - - bb.note('Recover packaging data') - shutil.copytree(self.saved_opkg_dir, - self.opkg_dir, - symlinks=True) - - def package_info(self, pkg): - """ - Returns a dictionary with the package info. - """ - cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg) - pkg_info = super(OpkgPM, self).package_info(pkg, cmd) - - pkg_arch = pkg_info[pkg]["arch"] - pkg_filename = pkg_info[pkg]["filename"] - pkg_info[pkg]["filepath"] = \ - os.path.join(self.deploy_dir, pkg_arch, pkg_filename) - - return pkg_info - - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - """ - pkg_info = self.package_info(pkg) - if not pkg_info: - bb.fatal("Unable to get information for package '%s' while " - "trying to extract the package." % pkg) - - tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info) - bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) - - return tmp_dir - -class DpkgPM(OpkgDpkgPM): - def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True): - super(DpkgPM, self).__init__(d, target_rootfs) - self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir) - - create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies) - - if apt_conf_dir is None: - self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") - else: - self.apt_conf_dir = apt_conf_dir - self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") - self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") - self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache") - - self.apt_args = d.getVar("APT_ARGS") - - self.all_arch_list = archs.split() - all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() - self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) - - self._create_configs(archs, base_archs) - - self.indexer = DpkgIndexer(self.d, self.deploy_dir) - - def mark_packages(self, status_tag, packages=None): - """ - This function will change a package's status in /var/lib/dpkg/status file. - If 'packages' is None then the new_status will be applied to all - packages - """ - status_file = self.target_rootfs + "/var/lib/dpkg/status" - - with open(status_file, "r") as sf: - with open(status_file + ".tmp", "w+") as tmp_sf: - if packages is None: - tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", - r"Package: \1\n\2Status: \3%s" % status_tag, - sf.read())) - else: - if type(packages).__name__ != "list": - raise TypeError("'packages' should be a list object") - - status = sf.read() - for pkg in packages: - status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, - r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), - status) - - tmp_sf.write(status) - - os.rename(status_file + ".tmp", status_file) - - def run_pre_post_installs(self, package_name=None): - """ - Run the pre/post installs for package "package_name". If package_name is - None, then run all pre/post install scriptlets. - """ - info_dir = self.target_rootfs + "/var/lib/dpkg/info" - ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"]) - control_scripts = [ - ControlScript(".preinst", "Preinstall", "install"), - ControlScript(".postinst", "Postinstall", "configure")] - status_file = self.target_rootfs + "/var/lib/dpkg/status" - installed_pkgs = [] - - with open(status_file, "r") as status: - for line in status.read().split('\n'): - m = re.match(r"^Package: (.*)", line) - if m is not None: - installed_pkgs.append(m.group(1)) - - if package_name is not None and not package_name in installed_pkgs: - return - - os.environ['D'] = self.target_rootfs - os.environ['OFFLINE_ROOT'] = self.target_rootfs - os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs - os.environ['INTERCEPT_DIR'] = self.intercepts_dir - os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') - - for pkg_name in installed_pkgs: - for control_script in control_scripts: - p_full = os.path.join(info_dir, pkg_name + control_script.suffix) - if os.path.exists(p_full): - try: - bb.note("Executing %s for package: %s ..." % - (control_script.name.lower(), pkg_name)) - output = subprocess.check_output([p_full, control_script.argument], - stderr=subprocess.STDOUT).decode("utf-8") - bb.note(output) - except subprocess.CalledProcessError as e: - bb.warn("%s for package %s failed with %d:\n%s" % - (control_script.name, pkg_name, e.returncode, - e.output.decode("utf-8"))) - failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) - - def update(self): - os.environ['APT_CONFIG'] = self.apt_conf_file - - self.deploy_dir_lock() - - cmd = "%s update" % self.apt_get_cmd - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to update the package index files. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - self.deploy_dir_unlock() - - def install(self, pkgs, attempt_only=False): - if attempt_only and len(pkgs) == 0: - return - - os.environ['APT_CONFIG'] = self.apt_conf_file - - cmd = "%s %s install --force-yes --allow-unauthenticated --no-remove %s" % \ - (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) - - try: - bb.note("Installing the following packages: %s" % ' '.join(pkgs)) - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " - "Command '%s' returned %d:\n%s" % - (cmd, e.returncode, e.output.decode("utf-8"))) - - # rename *.dpkg-new files/dirs - for root, dirs, files in os.walk(self.target_rootfs): - for dir in dirs: - new_dir = re.sub(r"\.dpkg-new", "", dir) - if dir != new_dir: - os.rename(os.path.join(root, dir), - os.path.join(root, new_dir)) - - for file in files: - new_file = re.sub(r"\.dpkg-new", "", file) - if file != new_file: - os.rename(os.path.join(root, file), - os.path.join(root, new_file)) - - - def remove(self, pkgs, with_dependencies=True): - if not pkgs: - return - - if with_dependencies: - os.environ['APT_CONFIG'] = self.apt_conf_file - cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs)) - else: - cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ - " -P --force-depends %s" % \ - (bb.utils.which(os.getenv('PATH'), "dpkg"), - self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Unable to remove packages. Command '%s' " - "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) - - def write_index(self): - self.deploy_dir_lock() - - result = self.indexer.write_index() - - self.deploy_dir_unlock() - - if result is not None: - bb.fatal(result) - - def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): - if feed_uris == "": - return - - sources_conf = os.path.join("%s/etc/apt/sources.list" - % self.target_rootfs) - arch_list = [] - - if feed_archs is None: - for arch in self.all_arch_list: - if not os.path.exists(os.path.join(self.deploy_dir, arch)): - continue - arch_list.append(arch) - else: - arch_list = feed_archs.split() - - feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) - - with open(sources_conf, "w+") as sources_file: - for uri in feed_uris: - if arch_list: - for arch in arch_list: - bb.note('Adding dpkg channel at (%s)' % uri) - sources_file.write("deb %s/%s ./\n" % - (uri, arch)) - else: - bb.note('Adding dpkg channel at (%s)' % uri) - sources_file.write("deb %s ./\n" % uri) - - def _create_configs(self, archs, base_archs): - base_archs = re.sub(r"_", r"-", base_archs) - - if os.path.exists(self.apt_conf_dir): - bb.utils.remove(self.apt_conf_dir, True) - - bb.utils.mkdirhier(self.apt_conf_dir) - bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") - bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") - bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/") - - arch_list = [] - for arch in self.all_arch_list: - if not os.path.exists(os.path.join(self.deploy_dir, arch)): - continue - arch_list.append(arch) - - with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: - priority = 801 - for arch in arch_list: - prefs_file.write( - "Package: *\n" - "Pin: release l=%s\n" - "Pin-Priority: %d\n\n" % (arch, priority)) - - priority += 5 - - pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or "" - for pkg in pkg_exclude.split(): - prefs_file.write( - "Package: %s\n" - "Pin: release *\n" - "Pin-Priority: -1\n\n" % pkg) - - arch_list.reverse() - - with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: - for arch in arch_list: - sources_file.write("deb file:%s/ ./\n" % - os.path.join(self.deploy_dir, arch)) - - base_arch_list = base_archs.split() - multilib_variants = self.d.getVar("MULTILIB_VARIANTS"); - for variant in multilib_variants.split(): - localdata = bb.data.createCopy(self.d) - variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False) - orig_arch = localdata.getVar("DPKG_ARCH") - localdata.setVar("DEFAULTTUNE", variant_tune) - variant_arch = localdata.getVar("DPKG_ARCH") - if variant_arch not in base_arch_list: - base_arch_list.append(variant_arch) - - with open(self.apt_conf_file, "w+") as apt_conf: - with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: - for line in apt_conf_sample.read().split("\n"): - match_arch = re.match(r" Architecture \".*\";$", line) - architectures = "" - if match_arch: - for base_arch in base_arch_list: - architectures += "\"%s\";" % base_arch - apt_conf.write(" Architectures {%s};\n" % architectures); - apt_conf.write(" Architecture \"%s\";\n" % base_archs) - else: - line = re.sub(r"#ROOTFS#", self.target_rootfs, line) - line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) - apt_conf.write(line + "\n") - - target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs - bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) - - bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) - - if not os.path.exists(os.path.join(target_dpkg_dir, "status")): - open(os.path.join(target_dpkg_dir, "status"), "w+").close() - if not os.path.exists(os.path.join(target_dpkg_dir, "available")): - open(os.path.join(target_dpkg_dir, "available"), "w+").close() - - def remove_packaging_data(self): - bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True) - bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) - - def fix_broken_dependencies(self): - os.environ['APT_CONFIG'] = self.apt_conf_file - - cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args) - - try: - subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) - except subprocess.CalledProcessError as e: - bb.fatal("Cannot fix broken dependencies. Command '%s' " - "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) - - def list_installed(self): - return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs() - - def package_info(self, pkg): - """ - Returns a dictionary with the package info. - """ - cmd = "%s show %s" % (self.apt_cache_cmd, pkg) - pkg_info = super(DpkgPM, self).package_info(pkg, cmd) - - pkg_arch = pkg_info[pkg]["pkgarch"] - pkg_filename = pkg_info[pkg]["filename"] - pkg_info[pkg]["filepath"] = \ - os.path.join(self.deploy_dir, pkg_arch, pkg_filename) - - return pkg_info - - def extract(self, pkg): - """ - Returns the path to a tmpdir where resides the contents of a package. - - Deleting the tmpdir is responsability of the caller. - """ - pkg_info = self.package_info(pkg) - if not pkg_info: - bb.fatal("Unable to get information for package '%s' while " - "trying to extract the package." % pkg) - - tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info) - bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) - - return tmp_dir - -def generate_index_files(d): - classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split() - - indexer_map = { - "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')), - "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')), - "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB')) - } - - result = None - - for pkg_class in classes: - if not pkg_class in indexer_map: - continue - - if os.path.exists(indexer_map[pkg_class][1]): - result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() - - if result is not None: - bb.fatal(result) diff --git a/poky/meta/lib/oe/package_manager/__init__.py b/poky/meta/lib/oe/package_manager/__init__.py new file mode 100644 index 000000000..865d6f949 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/__init__.py @@ -0,0 +1,550 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from abc import ABCMeta, abstractmethod +import os +import glob +import subprocess +import shutil +import re +import collections +import bb +import tempfile +import oe.utils +import oe.path +import string +from oe.gpg_sign import get_signer +import hashlib +import fnmatch + +# this can be used by all PM backends to create the index files in parallel +def create_index(arg): + index_cmd = arg + + bb.note("Executing '%s' ..." % index_cmd) + result = subprocess.check_output(index_cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + if result: + bb.note(result) + +def opkg_query(cmd_output): + """ + This method parse the output from the package managerand return + a dictionary with the information of the packages. This is used + when the packages are in deb or ipk format. + """ + verregex = re.compile(r' \([=<>]* [^ )]*\)') + output = dict() + pkg = "" + arch = "" + ver = "" + filename = "" + dep = [] + prov = [] + pkgarch = "" + for line in cmd_output.splitlines()+['']: + line = line.rstrip() + if ':' in line: + if line.startswith("Package: "): + pkg = line.split(": ")[1] + elif line.startswith("Architecture: "): + arch = line.split(": ")[1] + elif line.startswith("Version: "): + ver = line.split(": ")[1] + elif line.startswith("File: ") or line.startswith("Filename:"): + filename = line.split(": ")[1] + if "/" in filename: + filename = os.path.basename(filename) + elif line.startswith("Depends: "): + depends = verregex.sub('', line.split(": ")[1]) + for depend in depends.split(", "): + dep.append(depend) + elif line.startswith("Recommends: "): + recommends = verregex.sub('', line.split(": ")[1]) + for recommend in recommends.split(", "): + dep.append("%s [REC]" % recommend) + elif line.startswith("PackageArch: "): + pkgarch = line.split(": ")[1] + elif line.startswith("Provides: "): + provides = verregex.sub('', line.split(": ")[1]) + for provide in provides.split(", "): + prov.append(provide) + + # When there is a blank line save the package information + elif not line: + # IPK doesn't include the filename + if not filename: + filename = "%s_%s_%s.ipk" % (pkg, ver, arch) + if pkg: + output[pkg] = {"arch":arch, "ver":ver, + "filename":filename, "deps": dep, "pkgarch":pkgarch, "provs": prov} + pkg = "" + arch = "" + ver = "" + filename = "" + dep = [] + prov = [] + pkgarch = "" + + return output + +def failed_postinsts_abort(pkgs, log_path): + bb.fatal("""Postinstall scriptlets of %s have failed. If the intention is to defer them to first boot, +then please place them into pkg_postinst_ontarget_${PN} (). +Deferring to first boot via 'exit 1' is no longer supported. +Details of the failure are in %s.""" %(pkgs, log_path)) + +def generate_locale_archive(d, rootfs, target_arch, localedir): + # Pretty sure we don't need this for locale archive generation but + # keeping it to be safe... + locale_arch_options = { \ + "arc": ["--uint32-align=4", "--little-endian"], + "arceb": ["--uint32-align=4", "--big-endian"], + "arm": ["--uint32-align=4", "--little-endian"], + "armeb": ["--uint32-align=4", "--big-endian"], + "aarch64": ["--uint32-align=4", "--little-endian"], + "aarch64_be": ["--uint32-align=4", "--big-endian"], + "sh4": ["--uint32-align=4", "--big-endian"], + "powerpc": ["--uint32-align=4", "--big-endian"], + "powerpc64": ["--uint32-align=4", "--big-endian"], + "powerpc64le": ["--uint32-align=4", "--little-endian"], + "mips": ["--uint32-align=4", "--big-endian"], + "mipsisa32r6": ["--uint32-align=4", "--big-endian"], + "mips64": ["--uint32-align=4", "--big-endian"], + "mipsisa64r6": ["--uint32-align=4", "--big-endian"], + "mipsel": ["--uint32-align=4", "--little-endian"], + "mipsisa32r6el": ["--uint32-align=4", "--little-endian"], + "mips64el": ["--uint32-align=4", "--little-endian"], + "mipsisa64r6el": ["--uint32-align=4", "--little-endian"], + "riscv64": ["--uint32-align=4", "--little-endian"], + "riscv32": ["--uint32-align=4", "--little-endian"], + "i586": ["--uint32-align=4", "--little-endian"], + "i686": ["--uint32-align=4", "--little-endian"], + "x86_64": ["--uint32-align=4", "--little-endian"] + } + if target_arch in locale_arch_options: + arch_options = locale_arch_options[target_arch] + else: + bb.error("locale_arch_options not found for target_arch=" + target_arch) + bb.fatal("unknown arch:" + target_arch + " for locale_arch_options") + + # Need to set this so cross-localedef knows where the archive is + env = dict(os.environ) + env["LOCALEARCHIVE"] = oe.path.join(localedir, "locale-archive") + + for name in sorted(os.listdir(localedir)): + path = os.path.join(localedir, name) + if os.path.isdir(path): + cmd = ["cross-localedef", "--verbose"] + cmd += arch_options + cmd += ["--add-to-archive", path] + subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT) + +class Indexer(object, metaclass=ABCMeta): + def __init__(self, d, deploy_dir): + self.d = d + self.deploy_dir = deploy_dir + + @abstractmethod + def write_index(self): + pass + +class PkgsList(object, metaclass=ABCMeta): + def __init__(self, d, rootfs_dir): + self.d = d + self.rootfs_dir = rootfs_dir + + @abstractmethod + def list_pkgs(self): + pass + +class PackageManager(object, metaclass=ABCMeta): + """ + This is an abstract class. Do not instantiate this directly. + """ + + def __init__(self, d, target_rootfs): + self.d = d + self.target_rootfs = target_rootfs + self.deploy_dir = None + self.deploy_lock = None + self._initialize_intercepts() + + def _initialize_intercepts(self): + bb.note("Initializing intercept dir for %s" % self.target_rootfs) + # As there might be more than one instance of PackageManager operating at the same time + # we need to isolate the intercept_scripts directories from each other, + # hence the ugly hash digest in dir name. + self.intercepts_dir = os.path.join(self.d.getVar('WORKDIR'), "intercept_scripts-%s" % + (hashlib.sha256(self.target_rootfs.encode()).hexdigest())) + + postinst_intercepts = (self.d.getVar("POSTINST_INTERCEPTS") or "").split() + if not postinst_intercepts: + postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_PATH") + if not postinst_intercepts_path: + postinst_intercepts_path = self.d.getVar("POSTINST_INTERCEPTS_DIR") or self.d.expand("${COREBASE}/scripts/postinst-intercepts") + postinst_intercepts = oe.path.which_wild('*', postinst_intercepts_path) + + bb.debug(1, 'Collected intercepts:\n%s' % ''.join(' %s\n' % i for i in postinst_intercepts)) + bb.utils.remove(self.intercepts_dir, True) + bb.utils.mkdirhier(self.intercepts_dir) + for intercept in postinst_intercepts: + bb.utils.copyfile(intercept, os.path.join(self.intercepts_dir, os.path.basename(intercept))) + + @abstractmethod + def _handle_intercept_failure(self, failed_script): + pass + + def _postpone_to_first_boot(self, postinst_intercept_hook): + with open(postinst_intercept_hook) as intercept: + registered_pkgs = None + for line in intercept.read().split("\n"): + m = re.match(r"^##PKGS:(.*)", line) + if m is not None: + registered_pkgs = m.group(1).strip() + break + + if registered_pkgs is not None: + bb.note("If an image is being built, the postinstalls for the following packages " + "will be postponed for first boot: %s" % + registered_pkgs) + + # call the backend dependent handler + self._handle_intercept_failure(registered_pkgs) + + + def run_intercepts(self, populate_sdk=None): + intercepts_dir = self.intercepts_dir + + bb.note("Running intercept scripts:") + os.environ['D'] = self.target_rootfs + os.environ['STAGING_DIR_NATIVE'] = self.d.getVar('STAGING_DIR_NATIVE') + for script in os.listdir(intercepts_dir): + script_full = os.path.join(intercepts_dir, script) + + if script == "postinst_intercept" or not os.access(script_full, os.X_OK): + continue + + # we do not want to run any multilib variant of this + if script.startswith("delay_to_first_boot"): + self._postpone_to_first_boot(script_full) + continue + + if populate_sdk == 'host' and self.d.getVar('SDK_OS') == 'mingw32': + bb.note("The postinstall intercept hook '%s' could not be executed due to missing wine support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + continue + + bb.note("> Executing %s intercept ..." % script) + + try: + output = subprocess.check_output(script_full, stderr=subprocess.STDOUT) + if output: bb.note(output.decode("utf-8")) + except subprocess.CalledProcessError as e: + bb.note("Exit code %d. Output:\n%s" % (e.returncode, e.output.decode("utf-8"))) + if populate_sdk == 'host': + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + elif populate_sdk == 'target': + if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): + bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + else: + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + else: + if "qemuwrapper: qemu usermode is not supported" in e.output.decode("utf-8"): + bb.note("The postinstall intercept hook '%s' could not be executed due to missing qemu usermode support, details in %s/log.do_%s" + % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + self._postpone_to_first_boot(script_full) + else: + bb.fatal("The postinstall intercept hook '%s' failed, details in %s/log.do_%s" % (script, self.d.getVar('T'), self.d.getVar('BB_CURRENTTASK'))) + + @abstractmethod + def update(self): + """ + Update the package manager package database. + """ + pass + + @abstractmethod + def install(self, pkgs, attempt_only=False): + """ + Install a list of packages. 'pkgs' is a list object. If 'attempt_only' is + True, installation failures are ignored. + """ + pass + + @abstractmethod + def remove(self, pkgs, with_dependencies=True): + """ + Remove a list of packages. 'pkgs' is a list object. If 'with_dependencies' + is False, then any dependencies are left in place. + """ + pass + + @abstractmethod + def write_index(self): + """ + This function creates the index files + """ + pass + + @abstractmethod + def remove_packaging_data(self): + pass + + @abstractmethod + def list_installed(self): + pass + + @abstractmethod + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + Deleting the tmpdir is responsability of the caller. + """ + pass + + @abstractmethod + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + """ + Add remote package feeds into repository manager configuration. The parameters + for the feeds are set by feed_uris, feed_base_paths and feed_archs. + See http://www.yoctoproject.org/docs/current/ref-manual/ref-manual.html#var-PACKAGE_FEED_URIS + for their description. + """ + pass + + def install_glob(self, globs, sdk=False): + """ + Install all packages that match a glob. + """ + # TODO don't have sdk here but have a property on the superclass + # (and respect in install_complementary) + if sdk: + pkgdatadir = self.d.expand("${TMPDIR}/pkgdata/${SDK_SYS}") + else: + pkgdatadir = self.d.getVar("PKGDATA_DIR") + + try: + bb.note("Installing globbed packages...") + cmd = ["oe-pkgdata-util", "-p", pkgdatadir, "list-pkgs", globs] + pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") + self.install(pkgs.split(), attempt_only=True) + except subprocess.CalledProcessError as e: + # Return code 1 means no packages matched + if e.returncode != 1: + bb.fatal("Could not compute globbed packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + def install_complementary(self, globs=None): + """ + Install complementary packages based upon the list of currently installed + packages e.g. locales, *-dev, *-dbg, etc. This will only attempt to install + these packages, if they don't exist then no error will occur. Note: every + backend needs to call this function explicitly after the normal package + installation + """ + if globs is None: + globs = self.d.getVar('IMAGE_INSTALL_COMPLEMENTARY') + split_linguas = set() + + for translation in self.d.getVar('IMAGE_LINGUAS').split(): + split_linguas.add(translation) + split_linguas.add(translation.split('-')[0]) + + split_linguas = sorted(split_linguas) + + for lang in split_linguas: + globs += " *-locale-%s" % lang + for complementary_linguas in (self.d.getVar('IMAGE_LINGUAS_COMPLEMENTARY') or "").split(): + globs += (" " + complementary_linguas) % lang + + if globs is None: + return + + # we need to write the list of installed packages to a file because the + # oe-pkgdata-util reads it from a file + with tempfile.NamedTemporaryFile(mode="w+", prefix="installed-pkgs") as installed_pkgs: + pkgs = self.list_installed() + + provided_pkgs = set() + for pkg in pkgs.values(): + provided_pkgs |= set(pkg.get('provs', [])) + + output = oe.utils.format_pkg_list(pkgs, "arch") + installed_pkgs.write(output) + installed_pkgs.flush() + + cmd = ["oe-pkgdata-util", + "-p", self.d.getVar('PKGDATA_DIR'), "glob", installed_pkgs.name, + globs] + exclude = self.d.getVar('PACKAGE_EXCLUDE_COMPLEMENTARY') + if exclude: + cmd.extend(['--exclude=' + '|'.join(exclude.split())]) + try: + bb.note('Running %s' % cmd) + complementary_pkgs = subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode("utf-8") + complementary_pkgs = set(complementary_pkgs.split()) + skip_pkgs = sorted(complementary_pkgs & provided_pkgs) + install_pkgs = sorted(complementary_pkgs - provided_pkgs) + bb.note("Installing complementary packages ... %s (skipped already provided packages %s)" % ( + ' '.join(install_pkgs), + ' '.join(skip_pkgs))) + self.install(install_pkgs, attempt_only=True) + except subprocess.CalledProcessError as e: + bb.fatal("Could not compute complementary packages list. Command " + "'%s' returned %d:\n%s" % + (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + target_arch = self.d.getVar('TARGET_ARCH') + localedir = oe.path.join(self.target_rootfs, self.d.getVar("libdir"), "locale") + if os.path.exists(localedir) and os.listdir(localedir): + generate_locale_archive(self.d, self.target_rootfs, target_arch, localedir) + # And now delete the binary locales + self.remove(fnmatch.filter(self.list_installed(), "glibc-binary-localedata-*"), False) + + def deploy_dir_lock(self): + if self.deploy_dir is None: + raise RuntimeError("deploy_dir is not set!") + + lock_file_name = os.path.join(self.deploy_dir, "deploy.lock") + + self.deploy_lock = bb.utils.lockfile(lock_file_name) + + def deploy_dir_unlock(self): + if self.deploy_lock is None: + return + + bb.utils.unlockfile(self.deploy_lock) + + self.deploy_lock = None + + def construct_uris(self, uris, base_paths): + """ + Construct URIs based on the following pattern: uri/base_path where 'uri' + and 'base_path' correspond to each element of the corresponding array + argument leading to len(uris) x len(base_paths) elements on the returned + array + """ + def _append(arr1, arr2, sep='/'): + res = [] + narr1 = [a.rstrip(sep) for a in arr1] + narr2 = [a.rstrip(sep).lstrip(sep) for a in arr2] + for a1 in narr1: + if arr2: + for a2 in narr2: + res.append("%s%s%s" % (a1, sep, a2)) + else: + res.append(a1) + return res + return _append(uris, base_paths) + +def create_packages_dir(d, subrepo_dir, deploydir, taskname, filterbydependencies): + """ + Go through our do_package_write_X dependencies and hardlink the packages we depend + upon into the repo directory. This prevents us seeing other packages that may + have been built that we don't depend upon and also packages for architectures we don't + support. + """ + import errno + + taskdepdata = d.getVar("BB_TASKDEPDATA", False) + mytaskname = d.getVar("BB_RUNTASK") + pn = d.getVar("PN") + seendirs = set() + multilibs = {} + + bb.utils.remove(subrepo_dir, recurse=True) + bb.utils.mkdirhier(subrepo_dir) + + # Detect bitbake -b usage + nodeps = d.getVar("BB_LIMITEDDEPS") or False + if nodeps or not filterbydependencies: + oe.path.symlink(deploydir, subrepo_dir, True) + return + + start = None + for dep in taskdepdata: + data = taskdepdata[dep] + if data[1] == mytaskname and data[0] == pn: + start = dep + break + if start is None: + bb.fatal("Couldn't find ourself in BB_TASKDEPDATA?") + pkgdeps = set() + start = [start] + seen = set(start) + # Support direct dependencies (do_rootfs -> do_package_write_X) + # or indirect dependencies within PN (do_populate_sdk_ext -> do_rootfs -> do_package_write_X) + while start: + next = [] + for dep2 in start: + for dep in taskdepdata[dep2][3]: + if taskdepdata[dep][0] != pn: + if "do_" + taskname in dep: + pkgdeps.add(dep) + elif dep not in seen: + next.append(dep) + seen.add(dep) + start = next + + for dep in pkgdeps: + c = taskdepdata[dep][0] + manifest, d2 = oe.sstatesig.find_sstate_manifest(c, taskdepdata[dep][2], taskname, d, multilibs) + if not manifest: + bb.fatal("No manifest generated from: %s in %s" % (c, taskdepdata[dep][2])) + if not os.path.exists(manifest): + continue + with open(manifest, "r") as f: + for l in f: + l = l.strip() + deploydir = os.path.normpath(deploydir) + if bb.data.inherits_class('packagefeed-stability', d): + dest = l.replace(deploydir + "-prediff", "") + else: + dest = l.replace(deploydir, "") + dest = subrepo_dir + dest + if l.endswith("/"): + if dest not in seendirs: + bb.utils.mkdirhier(dest) + seendirs.add(dest) + continue + # Try to hardlink the file, copy if that fails + destdir = os.path.dirname(dest) + if destdir not in seendirs: + bb.utils.mkdirhier(destdir) + seendirs.add(destdir) + try: + os.link(l, dest) + except OSError as err: + if err.errno == errno.EXDEV: + bb.utils.copyfile(l, dest) + else: + raise + + +def generate_index_files(d): + from oe.package_manager.rpm import RpmSubdirIndexer + from oe.package_manager.ipk import OpkgIndexer + from oe.package_manager.deb import DpkgIndexer + + classes = d.getVar('PACKAGE_CLASSES').replace("package_", "").split() + + indexer_map = { + "rpm": (RpmSubdirIndexer, d.getVar('DEPLOY_DIR_RPM')), + "ipk": (OpkgIndexer, d.getVar('DEPLOY_DIR_IPK')), + "deb": (DpkgIndexer, d.getVar('DEPLOY_DIR_DEB')) + } + + result = None + + for pkg_class in classes: + if not pkg_class in indexer_map: + continue + + if os.path.exists(indexer_map[pkg_class][1]): + result = indexer_map[pkg_class][0](d, indexer_map[pkg_class][1]).write_index() + + if result is not None: + bb.fatal(result) diff --git a/poky/meta/lib/oe/package_manager/deb/__init__.py b/poky/meta/lib/oe/package_manager/deb/__init__.py new file mode 100644 index 000000000..72155b178 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/deb/__init__.py @@ -0,0 +1,492 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import subprocess +from oe.package_manager import * + +class DpkgIndexer(Indexer): + def _create_configs(self): + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "lists", "partial")) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "apt.conf.d")) + bb.utils.mkdirhier(os.path.join(self.apt_conf_dir, "preferences.d")) + + with open(os.path.join(self.apt_conf_dir, "preferences"), + "w") as prefs_file: + pass + with open(os.path.join(self.apt_conf_dir, "sources.list"), + "w+") as sources_file: + pass + + with open(self.apt_conf_file, "w") as apt_conf: + with open(os.path.join(self.d.expand("${STAGING_ETCDIR_NATIVE}"), + "apt", "apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + line = re.sub(r"#ROOTFS#", "/dev/null", line) + line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) + apt_conf.write(line + "\n") + + def write_index(self): + self.apt_conf_dir = os.path.join(self.d.expand("${APTCONF_TARGET}"), + "apt-ftparchive") + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self._create_configs() + + os.environ['APT_CONFIG'] = self.apt_conf_file + + pkg_archs = self.d.getVar('PACKAGE_ARCHS') + if pkg_archs is not None: + arch_list = pkg_archs.split() + sdk_pkg_archs = self.d.getVar('SDK_PACKAGE_ARCHS') + if sdk_pkg_archs is not None: + for a in sdk_pkg_archs.split(): + if a not in pkg_archs: + arch_list.append(a) + + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() + arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in arch_list) + + apt_ftparchive = bb.utils.which(os.getenv('PATH'), "apt-ftparchive") + gzip = bb.utils.which(os.getenv('PATH'), "gzip") + + index_cmds = [] + deb_dirs_found = False + for arch in arch_list: + arch_dir = os.path.join(self.deploy_dir, arch) + if not os.path.isdir(arch_dir): + continue + + cmd = "cd %s; PSEUDO_UNLOAD=1 %s packages . > Packages;" % (arch_dir, apt_ftparchive) + + cmd += "%s -fcn Packages > Packages.gz;" % gzip + + with open(os.path.join(arch_dir, "Release"), "w+") as release: + release.write("Label: %s\n" % arch) + + cmd += "PSEUDO_UNLOAD=1 %s release . >> Release" % apt_ftparchive + + index_cmds.append(cmd) + + deb_dirs_found = True + + if not deb_dirs_found: + bb.note("There are no packages in %s" % self.deploy_dir) + return + + oe.utils.multiprocess_launch(create_index, index_cmds, self.d) + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + raise NotImplementedError('Package feed signing not implementd for dpkg') + +class DpkgPkgsList(PkgsList): + + def list_pkgs(self): + cmd = [bb.utils.which(os.getenv('PATH'), "dpkg-query"), + "--admindir=%s/var/lib/dpkg" % self.rootfs_dir, + "-W"] + + cmd.append("-f=Package: ${Package}\nArchitecture: ${PackageArch}\nVersion: ${Version}\nFile: ${Package}_${Version}_${Architecture}.deb\nDepends: ${Depends}\nRecommends: ${Recommends}\nProvides: ${Provides}\n\n") + + try: + cmd_output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).strip().decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + return opkg_query(cmd_output) + +class OpkgDpkgPM(PackageManager): + def __init__(self, d, target_rootfs): + """ + This is an abstract class. Do not instantiate this directly. + """ + super(OpkgDpkgPM, self).__init__(d, target_rootfs) + + def package_info(self, pkg, cmd): + """ + Returns a dictionary with the package info. + + This method extracts the common parts for Opkg and Dpkg + """ + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Unable to list available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + return opkg_query(output) + + def extract(self, pkg, pkg_info): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + + This method extracts the common parts for Opkg and Dpkg + """ + + ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") + tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") + pkg_path = pkg_info[pkg]["filepath"] + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + data_tar = 'data.tar.xz' + + try: + cmd = [ar_cmd, 'x', pkg_path] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + cmd = [tar_cmd, 'xf', data_tar] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) + bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) + os.chdir(current_dir) + + return tmp_dir + + def _handle_intercept_failure(self, registered_pkgs): + self.mark_packages("unpacked", registered_pkgs.split()) + +class DpkgPM(OpkgDpkgPM): + def __init__(self, d, target_rootfs, archs, base_archs, apt_conf_dir=None, deb_repo_workdir="oe-rootfs-repo", filterbydependencies=True): + super(DpkgPM, self).__init__(d, target_rootfs) + self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), deb_repo_workdir) + + create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_DEB"), "package_write_deb", filterbydependencies) + + if apt_conf_dir is None: + self.apt_conf_dir = self.d.expand("${APTCONF_TARGET}/apt") + else: + self.apt_conf_dir = apt_conf_dir + self.apt_conf_file = os.path.join(self.apt_conf_dir, "apt.conf") + self.apt_get_cmd = bb.utils.which(os.getenv('PATH'), "apt-get") + self.apt_cache_cmd = bb.utils.which(os.getenv('PATH'), "apt-cache") + + self.apt_args = d.getVar("APT_ARGS") + + self.all_arch_list = archs.split() + all_mlb_pkg_arch_list = (self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS') or "").split() + self.all_arch_list.extend(arch for arch in all_mlb_pkg_arch_list if arch not in self.all_arch_list) + + self._create_configs(archs, base_archs) + + self.indexer = DpkgIndexer(self.d, self.deploy_dir) + + def mark_packages(self, status_tag, packages=None): + """ + This function will change a package's status in /var/lib/dpkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + status_file = self.target_rootfs + "/var/lib/dpkg/status" + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + def run_pre_post_installs(self, package_name=None): + """ + Run the pre/post installs for package "package_name". If package_name is + None, then run all pre/post install scriptlets. + """ + info_dir = self.target_rootfs + "/var/lib/dpkg/info" + ControlScript = collections.namedtuple("ControlScript", ["suffix", "name", "argument"]) + control_scripts = [ + ControlScript(".preinst", "Preinstall", "install"), + ControlScript(".postinst", "Postinstall", "configure")] + status_file = self.target_rootfs + "/var/lib/dpkg/status" + installed_pkgs = [] + + with open(status_file, "r") as status: + for line in status.read().split('\n'): + m = re.match(r"^Package: (.*)", line) + if m is not None: + installed_pkgs.append(m.group(1)) + + if package_name is not None and not package_name in installed_pkgs: + return + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + for pkg_name in installed_pkgs: + for control_script in control_scripts: + p_full = os.path.join(info_dir, pkg_name + control_script.suffix) + if os.path.exists(p_full): + try: + bb.note("Executing %s for package: %s ..." % + (control_script.name.lower(), pkg_name)) + output = subprocess.check_output([p_full, control_script.argument], + stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.warn("%s for package %s failed with %d:\n%s" % + (control_script.name, pkg_name, e.returncode, + e.output.decode("utf-8"))) + failed_postinsts_abort([pkg_name], self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + + def update(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + self.deploy_dir_lock() + + cmd = "%s update" % self.apt_get_cmd + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if attempt_only and len(pkgs) == 0: + return + + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s install --force-yes --allow-unauthenticated --no-remove %s" % \ + (self.apt_get_cmd, self.apt_args, ' '.join(pkgs)) + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output.decode("utf-8"))) + + # rename *.dpkg-new files/dirs + for root, dirs, files in os.walk(self.target_rootfs): + for dir in dirs: + new_dir = re.sub(r"\.dpkg-new", "", dir) + if dir != new_dir: + os.rename(os.path.join(root, dir), + os.path.join(root, new_dir)) + + for file in files: + new_file = re.sub(r"\.dpkg-new", "", file) + if file != new_file: + os.rename(os.path.join(root, file), + os.path.join(root, new_file)) + + + def remove(self, pkgs, with_dependencies=True): + if not pkgs: + return + + if with_dependencies: + os.environ['APT_CONFIG'] = self.apt_conf_file + cmd = "%s purge %s" % (self.apt_get_cmd, ' '.join(pkgs)) + else: + cmd = "%s --admindir=%s/var/lib/dpkg --instdir=%s" \ + " -P --force-depends %s" % \ + (bb.utils.which(os.getenv('PATH'), "dpkg"), + self.target_rootfs, self.target_rootfs, ' '.join(pkgs)) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + if feed_uris == "": + return + + sources_conf = os.path.join("%s/etc/apt/sources.list" + % self.target_rootfs) + arch_list = [] + + if feed_archs is None: + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + else: + arch_list = feed_archs.split() + + feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + + with open(sources_conf, "w+") as sources_file: + for uri in feed_uris: + if arch_list: + for arch in arch_list: + bb.note('Adding dpkg channel at (%s)' % uri) + sources_file.write("deb %s/%s ./\n" % + (uri, arch)) + else: + bb.note('Adding dpkg channel at (%s)' % uri) + sources_file.write("deb %s ./\n" % uri) + + def _create_configs(self, archs, base_archs): + base_archs = re.sub(r"_", r"-", base_archs) + + if os.path.exists(self.apt_conf_dir): + bb.utils.remove(self.apt_conf_dir, True) + + bb.utils.mkdirhier(self.apt_conf_dir) + bb.utils.mkdirhier(self.apt_conf_dir + "/lists/partial/") + bb.utils.mkdirhier(self.apt_conf_dir + "/apt.conf.d/") + bb.utils.mkdirhier(self.apt_conf_dir + "/preferences.d/") + + arch_list = [] + for arch in self.all_arch_list: + if not os.path.exists(os.path.join(self.deploy_dir, arch)): + continue + arch_list.append(arch) + + with open(os.path.join(self.apt_conf_dir, "preferences"), "w+") as prefs_file: + priority = 801 + for arch in arch_list: + prefs_file.write( + "Package: *\n" + "Pin: release l=%s\n" + "Pin-Priority: %d\n\n" % (arch, priority)) + + priority += 5 + + pkg_exclude = self.d.getVar('PACKAGE_EXCLUDE') or "" + for pkg in pkg_exclude.split(): + prefs_file.write( + "Package: %s\n" + "Pin: release *\n" + "Pin-Priority: -1\n\n" % pkg) + + arch_list.reverse() + + with open(os.path.join(self.apt_conf_dir, "sources.list"), "w+") as sources_file: + for arch in arch_list: + sources_file.write("deb file:%s/ ./\n" % + os.path.join(self.deploy_dir, arch)) + + base_arch_list = base_archs.split() + multilib_variants = self.d.getVar("MULTILIB_VARIANTS"); + for variant in multilib_variants.split(): + localdata = bb.data.createCopy(self.d) + variant_tune = localdata.getVar("DEFAULTTUNE_virtclass-multilib-" + variant, False) + orig_arch = localdata.getVar("DPKG_ARCH") + localdata.setVar("DEFAULTTUNE", variant_tune) + variant_arch = localdata.getVar("DPKG_ARCH") + if variant_arch not in base_arch_list: + base_arch_list.append(variant_arch) + + with open(self.apt_conf_file, "w+") as apt_conf: + with open(self.d.expand("${STAGING_ETCDIR_NATIVE}/apt/apt.conf.sample")) as apt_conf_sample: + for line in apt_conf_sample.read().split("\n"): + match_arch = re.match(r" Architecture \".*\";$", line) + architectures = "" + if match_arch: + for base_arch in base_arch_list: + architectures += "\"%s\";" % base_arch + apt_conf.write(" Architectures {%s};\n" % architectures); + apt_conf.write(" Architecture \"%s\";\n" % base_archs) + else: + line = re.sub(r"#ROOTFS#", self.target_rootfs, line) + line = re.sub(r"#APTCONF#", self.apt_conf_dir, line) + apt_conf.write(line + "\n") + + target_dpkg_dir = "%s/var/lib/dpkg" % self.target_rootfs + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "info")) + + bb.utils.mkdirhier(os.path.join(target_dpkg_dir, "updates")) + + if not os.path.exists(os.path.join(target_dpkg_dir, "status")): + open(os.path.join(target_dpkg_dir, "status"), "w+").close() + if not os.path.exists(os.path.join(target_dpkg_dir, "available")): + open(os.path.join(target_dpkg_dir, "available"), "w+").close() + + def remove_packaging_data(self): + bb.utils.remove(self.target_rootfs + self.d.getVar('opkglibdir'), True) + bb.utils.remove(self.target_rootfs + "/var/lib/dpkg/", True) + + def fix_broken_dependencies(self): + os.environ['APT_CONFIG'] = self.apt_conf_file + + cmd = "%s %s --allow-unauthenticated -f install" % (self.apt_get_cmd, self.apt_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Cannot fix broken dependencies. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + def list_installed(self): + return DpkgPkgsList(self.d, self.target_rootfs).list_pkgs() + + def package_info(self, pkg): + """ + Returns a dictionary with the package info. + """ + cmd = "%s show %s" % (self.apt_cache_cmd, pkg) + pkg_info = super(DpkgPM, self).package_info(pkg, cmd) + + pkg_arch = pkg_info[pkg]["pkgarch"] + pkg_filename = pkg_info[pkg]["filename"] + pkg_info[pkg]["filepath"] = \ + os.path.join(self.deploy_dir, pkg_arch, pkg_filename) + + return pkg_info + + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + """ + pkg_info = self.package_info(pkg) + if not pkg_info: + bb.fatal("Unable to get information for package '%s' while " + "trying to extract the package." % pkg) + + tmp_dir = super(DpkgPM, self).extract(pkg, pkg_info) + bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) + + return tmp_dir diff --git a/poky/meta/lib/oe/package_manager/deb/manifest.py b/poky/meta/lib/oe/package_manager/deb/manifest.py new file mode 100644 index 000000000..0b1203664 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/deb/manifest.py @@ -0,0 +1,26 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest + +class DpkgManifest(Manifest): + def create_initial(self): + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + pkg_list = self.d.getVar(var) + + if pkg_list is None: + continue + + for pkg in pkg_list.split(): + manifest.write("%s,%s\n" % + (self.var_maps[self.manifest_type][var], pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass diff --git a/poky/meta/lib/oe/package_manager/deb/rootfs.py b/poky/meta/lib/oe/package_manager/deb/rootfs.py new file mode 100644 index 000000000..819f67eda --- /dev/null +++ b/poky/meta/lib/oe/package_manager/deb/rootfs.py @@ -0,0 +1,210 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import shutil +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.deb.manifest import DpkgManifest +from oe.package_manager.deb import DpkgPM + +class DpkgOpkgRootfs(Rootfs): + def __init__(self, d, progress_reporter=None, logcatcher=None): + super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + + def _get_pkgs_postinsts(self, status_file): + def _get_pkg_depends_list(pkg_depends): + pkg_depends_list = [] + # filter version requirements like libc (>= 1.1) + for dep in pkg_depends.split(', '): + m_dep = re.match(r"^(.*) \(.*\)$", dep) + if m_dep: + dep = m_dep.group(1) + pkg_depends_list.append(dep) + + return pkg_depends_list + + pkgs = {} + pkg_name = "" + pkg_status_match = False + pkg_depends = "" + + with open(status_file) as status: + data = status.read() + status.close() + for line in data.split('\n'): + m_pkg = re.match(r"^Package: (.*)", line) + m_status = re.match(r"^Status:.*unpacked", line) + m_depends = re.match(r"^Depends: (.*)", line) + + #Only one of m_pkg, m_status or m_depends is not None at time + #If m_pkg is not None, we started a new package + if m_pkg is not None: + #Get Package name + pkg_name = m_pkg.group(1) + #Make sure we reset other variables + pkg_status_match = False + pkg_depends = "" + elif m_status is not None: + #New status matched + pkg_status_match = True + elif m_depends is not None: + #New depends macthed + pkg_depends = m_depends.group(1) + else: + pass + + #Now check if we can process package depends and postinst + if "" != pkg_name and pkg_status_match: + pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) + else: + #Not enough information + pass + + # remove package dependencies not in postinsts + pkg_names = list(pkgs.keys()) + for pkg_name in pkg_names: + deps = pkgs[pkg_name][:] + + for d in deps: + if d not in pkg_names: + pkgs[pkg_name].remove(d) + + return pkgs + + def _get_delayed_postinsts_common(self, status_file): + def _dep_resolve(graph, node, resolved, seen): + seen.append(node) + + for edge in graph[node]: + if edge not in resolved: + if edge in seen: + raise RuntimeError("Packages %s and %s have " \ + "a circular dependency in postinsts scripts." \ + % (node, edge)) + _dep_resolve(graph, edge, resolved, seen) + + resolved.append(node) + + pkg_list = [] + + pkgs = None + if not self.d.getVar('PACKAGE_INSTALL').strip(): + bb.note("Building empty image") + else: + pkgs = self._get_pkgs_postinsts(status_file) + if pkgs: + root = "__packagegroup_postinst__" + pkgs[root] = list(pkgs.keys()) + _dep_resolve(pkgs, root, pkg_list, []) + pkg_list.remove(root) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + num = 0 + for p in self._get_delayed_postinsts(): + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + +class DpkgRootfs(DpkgOpkgRootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = '^E:' + self.log_check_expected_regexes = \ + [ + "^E: Unmet dependencies." + ] + + bb.utils.remove(self.image_rootfs, True) + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + self.manifest = DpkgManifest(d, manifest_dir) + self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'), + d.getVar('PACKAGE_ARCHS'), + d.getVar('DPKG_ARCH')) + + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS') + deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS') + + alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") + bb.utils.mkdirhier(alt_dir) + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, deb_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + # Don't support incremental, so skip that + self.progress_reporter.next_stage() + + self.pm.update() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + self.pm.fix_broken_dependencies() + + if self.progress_reporter: + # Don't support attemptonly, so skip that + self.progress_reporter.next_stage() + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self._setup_dbg_rootfs(['/var/lib/dpkg']) + + self.pm.fix_broken_dependencies() + + self.pm.mark_packages("installed") + + self.pm.run_pre_post_installs() + + execute_pre_post_process(self.d, deb_post_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS'] + + def _get_delayed_postinsts(self): + status_file = self.image_rootfs + "/var/lib/dpkg/status" + return self._get_delayed_postinsts_common(status_file) + + def _save_postinsts(self): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") + return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + pass diff --git a/poky/meta/lib/oe/package_manager/deb/sdk.py b/poky/meta/lib/oe/package_manager/deb/sdk.py new file mode 100644 index 000000000..b25eb70b0 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/deb/sdk.py @@ -0,0 +1,96 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +import shutil +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.manifest import Manifest +from oe.package_manager.deb import DpkgPM + +class DpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(DpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt") + self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk") + + from oe.package_manager.deb.manifest import DpkgManifest + + self.target_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = DpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + deb_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + deb_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = DpkgPM(d, self.sdk_target_sysroot, + self.d.getVar("PACKAGE_ARCHS"), + self.d.getVar("DPKG_ARCH"), + self.target_conf_dir, + deb_repo_workdir=deb_repo_workdir) + + self.host_pm = DpkgPM(d, self.sdk_host_sysroot, + self.d.getVar("SDK_PACKAGE_ARCHS"), + self.d.getVar("DEB_SDK_ARCH"), + self.host_conf_dir, + deb_repo_workdir=deb_repo_workdir) + + def _copy_apt_dir_to(self, dst_dir): + staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE") + + self.remove(dst_dir, True) + + shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.write_index() + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, + "etc", "apt")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + "var", "lib", "dpkg") + self.mkdirhier(native_dpkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): + self.movefile(f, native_dpkg_state_dir) + self.remove(os.path.join(self.sdk_output, "var"), True) diff --git a/poky/meta/lib/oe/package_manager/ipk/__init__.py b/poky/meta/lib/oe/package_manager/ipk/__init__.py new file mode 100644 index 000000000..9603993a5 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/ipk/__init__.py @@ -0,0 +1,507 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import shutil +import subprocess +from oe.package_manager import * + +class OpkgIndexer(Indexer): + def write_index(self): + arch_vars = ["ALL_MULTILIB_PACKAGE_ARCHS", + "SDK_PACKAGE_ARCHS", + ] + + opkg_index_cmd = bb.utils.which(os.getenv('PATH'), "opkg-make-index") + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) + else: + signer = None + + if not os.path.exists(os.path.join(self.deploy_dir, "Packages")): + open(os.path.join(self.deploy_dir, "Packages"), "w").close() + + index_cmds = set() + index_sign_files = set() + for arch_var in arch_vars: + archs = self.d.getVar(arch_var) + if archs is None: + continue + + for arch in archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + pkgs_file = os.path.join(pkgs_dir, "Packages") + + if not os.path.isdir(pkgs_dir): + continue + + if not os.path.exists(pkgs_file): + open(pkgs_file, "w").close() + + index_cmds.add('%s --checksum md5 --checksum sha256 -r %s -p %s -m %s' % + (opkg_index_cmd, pkgs_file, pkgs_file, pkgs_dir)) + + index_sign_files.add(pkgs_file) + + if len(index_cmds) == 0: + bb.note("There are no packages in %s!" % self.deploy_dir) + return + + oe.utils.multiprocess_launch(create_index, index_cmds, self.d) + + if signer: + feed_sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') + is_ascii_sig = (feed_sig_type.upper() != "BIN") + for f in index_sign_files: + signer.detach_sign(f, + self.d.getVar('PACKAGE_FEED_GPG_NAME'), + self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), + armor=is_ascii_sig) + +class OpkgPkgsList(PkgsList): + def __init__(self, d, rootfs_dir, config_file): + super(OpkgPkgsList, self).__init__(d, rootfs_dir) + + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") + self.opkg_args = "-f %s -o %s " % (config_file, rootfs_dir) + self.opkg_args += self.d.getVar("OPKG_ARGS") + + def list_pkgs(self, format=None): + cmd = "%s %s status" % (self.opkg_cmd, self.opkg_args) + + # opkg returns success even when it printed some + # "Collected errors:" report to stderr. Mixing stderr into + # stdout then leads to random failures later on when + # parsing the output. To avoid this we need to collect both + # output streams separately and check for empty stderr. + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + cmd_output, cmd_stderr = p.communicate() + cmd_output = cmd_output.decode("utf-8") + cmd_stderr = cmd_stderr.decode("utf-8") + if p.returncode or cmd_stderr: + bb.fatal("Cannot get the installed packages list. Command '%s' " + "returned %d and stderr:\n%s" % (cmd, p.returncode, cmd_stderr)) + + return opkg_query(cmd_output) + + + +class OpkgDpkgPM(PackageManager): + def __init__(self, d, target_rootfs): + """ + This is an abstract class. Do not instantiate this directly. + """ + super(OpkgDpkgPM, self).__init__(d, target_rootfs) + + def package_info(self, pkg, cmd): + """ + Returns a dictionary with the package info. + + This method extracts the common parts for Opkg and Dpkg + """ + + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Unable to list available packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + return opkg_query(output) + + def extract(self, pkg, pkg_info): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + + This method extracts the common parts for Opkg and Dpkg + """ + + ar_cmd = bb.utils.which(os.getenv("PATH"), "ar") + tar_cmd = bb.utils.which(os.getenv("PATH"), "tar") + pkg_path = pkg_info[pkg]["filepath"] + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + data_tar = 'data.tar.xz' + + try: + cmd = [ar_cmd, 'x', pkg_path] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + cmd = [tar_cmd, 'xf', data_tar] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, ' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, ' '.join(cmd), e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + bb.utils.remove(os.path.join(tmp_dir, "debian-binary")) + bb.utils.remove(os.path.join(tmp_dir, "control.tar.gz")) + os.chdir(current_dir) + + return tmp_dir + + def _handle_intercept_failure(self, registered_pkgs): + self.mark_packages("unpacked", registered_pkgs.split()) + +class OpkgPM(OpkgDpkgPM): + def __init__(self, d, target_rootfs, config_file, archs, task_name='target', ipk_repo_workdir="oe-rootfs-repo", filterbydependencies=True, prepare_index=True): + super(OpkgPM, self).__init__(d, target_rootfs) + + self.config_file = config_file + self.pkg_archs = archs + self.task_name = task_name + + self.deploy_dir = oe.path.join(self.d.getVar('WORKDIR'), ipk_repo_workdir) + self.deploy_lock_file = os.path.join(self.deploy_dir, "deploy.lock") + self.opkg_cmd = bb.utils.which(os.getenv('PATH'), "opkg") + self.opkg_args = "--volatile-cache -f %s -t %s -o %s " % (self.config_file, self.d.expand('${T}/ipktemp/') ,target_rootfs) + self.opkg_args += self.d.getVar("OPKG_ARGS") + + if prepare_index: + create_packages_dir(self.d, self.deploy_dir, d.getVar("DEPLOY_DIR_IPK"), "package_write_ipk", filterbydependencies) + + opkg_lib_dir = self.d.getVar('OPKGLIBDIR') + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + + self.opkg_dir = os.path.join(target_rootfs, opkg_lib_dir, "opkg") + + bb.utils.mkdirhier(self.opkg_dir) + + self.saved_opkg_dir = self.d.expand('${T}/saved/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + self.from_feeds = (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") == "1" + if self.from_feeds: + self._create_custom_config() + else: + self._create_config() + + self.indexer = OpkgIndexer(self.d, self.deploy_dir) + + def mark_packages(self, status_tag, packages=None): + """ + This function will change a package's status in /var/lib/opkg/status file. + If 'packages' is None then the new_status will be applied to all + packages + """ + status_file = os.path.join(self.opkg_dir, "status") + + with open(status_file, "r") as sf: + with open(status_file + ".tmp", "w+") as tmp_sf: + if packages is None: + tmp_sf.write(re.sub(r"Package: (.*?)\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)", + r"Package: \1\n\2Status: \3%s" % status_tag, + sf.read())) + else: + if type(packages).__name__ != "list": + raise TypeError("'packages' should be a list object") + + status = sf.read() + for pkg in packages: + status = re.sub(r"Package: %s\n((?:[^\n]+\n)*?)Status: (.*)(?:unpacked|installed)" % pkg, + r"Package: %s\n\1Status: \2%s" % (pkg, status_tag), + status) + + tmp_sf.write(status) + + os.rename(status_file + ".tmp", status_file) + + def _create_custom_config(self): + bb.note("Building from feeds activated!") + + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + for line in (self.d.getVar('IPK_FEED_URIS') or "").split(): + feed_match = re.match(r"^[ \t]*(.*)##([^ \t]*)[ \t]*$", line) + + if feed_match is not None: + feed_name = feed_match.group(1) + feed_uri = feed_match.group(2) + + bb.note("Add %s feed with URL %s" % (feed_name, feed_uri)) + + config_file.write("src/gz %s %s\n" % (feed_name, feed_uri)) + + """ + Allow to use package deploy directory contents as quick devel-testing + feed. This creates individual feed configs for each arch subdir of those + specified as compatible for the current machine. + NOTE: Development-helper feature, NOT a full-fledged feed. + """ + if (self.d.getVar('FEED_DEPLOYDIR_BASE_URI') or "") != "": + for arch in self.pkg_archs.split(): + cfg_file_name = os.path.join(self.target_rootfs, + self.d.getVar("sysconfdir"), + "opkg", + "local-%s-feed.conf" % arch) + + with open(cfg_file_name, "w+") as cfg_file: + cfg_file.write("src/gz local-%s %s/%s" % + (arch, + self.d.getVar('FEED_DEPLOYDIR_BASE_URI'), + arch)) + + if self.d.getVar('OPKGLIBDIR') != '/var/lib': + # There is no command line option for this anymore, we need to add + # info_dir and status_file to config file, if OPKGLIBDIR doesn't have + # the default value of "/var/lib" as defined in opkg: + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" + cfg_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) + cfg_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) + cfg_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) + + + def _create_config(self): + with open(self.config_file, "w+") as config_file: + priority = 1 + for arch in self.pkg_archs.split(): + config_file.write("arch %s %d\n" % (arch, priority)) + priority += 5 + + config_file.write("src oe file:%s\n" % self.deploy_dir) + + for arch in self.pkg_archs.split(): + pkgs_dir = os.path.join(self.deploy_dir, arch) + if os.path.isdir(pkgs_dir): + config_file.write("src oe-%s file:%s\n" % + (arch, pkgs_dir)) + + if self.d.getVar('OPKGLIBDIR') != '/var/lib': + # There is no command line option for this anymore, we need to add + # info_dir and status_file to config file, if OPKGLIBDIR doesn't have + # the default value of "/var/lib" as defined in opkg: + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_LISTS_DIR VARDIR "/lib/opkg/lists" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_INFO_DIR VARDIR "/lib/opkg/info" + # libopkg/opkg_conf.h:#define OPKG_CONF_DEFAULT_STATUS_FILE VARDIR "/lib/opkg/status" + config_file.write("option info_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'info')) + config_file.write("option lists_dir %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'lists')) + config_file.write("option status_file %s\n" % os.path.join(self.d.getVar('OPKGLIBDIR'), 'opkg', 'status')) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + if feed_uris == "": + return + + rootfs_config = os.path.join('%s/etc/opkg/base-feeds.conf' + % self.target_rootfs) + + os.makedirs('%s/etc/opkg' % self.target_rootfs, exist_ok=True) + + feed_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + archs = self.pkg_archs.split() if feed_archs is None else feed_archs.split() + + with open(rootfs_config, "w+") as config_file: + uri_iterator = 0 + for uri in feed_uris: + if archs: + for arch in archs: + if (feed_archs is None) and (not os.path.exists(oe.path.join(self.deploy_dir, arch))): + continue + bb.note('Adding opkg feed url-%s-%d (%s)' % + (arch, uri_iterator, uri)) + config_file.write("src/gz uri-%s-%d %s/%s\n" % + (arch, uri_iterator, uri, arch)) + else: + bb.note('Adding opkg feed url-%d (%s)' % + (uri_iterator, uri)) + config_file.write("src/gz uri-%d %s\n" % + (uri_iterator, uri)) + + uri_iterator += 1 + + def update(self): + self.deploy_dir_lock() + + cmd = "%s %s update" % (self.opkg_cmd, self.opkg_args) + + try: + subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + self.deploy_dir_unlock() + bb.fatal("Unable to update the package index files. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + self.deploy_dir_unlock() + + def install(self, pkgs, attempt_only=False): + if not pkgs: + return + + cmd = "%s %s" % (self.opkg_cmd, self.opkg_args) + for exclude in (self.d.getVar("PACKAGE_EXCLUDE") or "").split(): + cmd += " --add-exclude %s" % exclude + for bad_recommendation in (self.d.getVar("BAD_RECOMMENDATIONS") or "").split(): + cmd += " --add-ignore-recommends %s" % bad_recommendation + cmd += " install " + cmd += " ".join(pkgs) + + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + try: + bb.note("Installing the following packages: %s" % ' '.join(pkgs)) + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + failed_pkgs = [] + for line in output.split('\n'): + if line.endswith("configuration required on target."): + bb.warn(line) + failed_pkgs.append(line.split(".")[0]) + if failed_pkgs: + failed_postinsts_abort(failed_pkgs, self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + except subprocess.CalledProcessError as e: + (bb.fatal, bb.warn)[attempt_only]("Unable to install packages. " + "Command '%s' returned %d:\n%s" % + (cmd, e.returncode, e.output.decode("utf-8"))) + + def remove(self, pkgs, with_dependencies=True): + if not pkgs: + return + + if with_dependencies: + cmd = "%s %s --force-remove --force-removal-of-dependent-packages remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + else: + cmd = "%s %s --force-depends remove %s" % \ + (self.opkg_cmd, self.opkg_args, ' '.join(pkgs)) + + try: + bb.note(cmd) + output = subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to remove packages. Command '%s' " + "returned %d:\n%s" % (e.cmd, e.returncode, e.output.decode("utf-8"))) + + def write_index(self): + self.deploy_dir_lock() + + result = self.indexer.write_index() + + self.deploy_dir_unlock() + + if result is not None: + bb.fatal(result) + + def remove_packaging_data(self): + bb.utils.remove(self.opkg_dir, True) + # create the directory back, it's needed by PM lock + bb.utils.mkdirhier(self.opkg_dir) + + def remove_lists(self): + if not self.from_feeds: + bb.utils.remove(os.path.join(self.opkg_dir, "lists"), True) + + def list_installed(self): + return OpkgPkgsList(self.d, self.target_rootfs, self.config_file).list_pkgs() + + def dummy_install(self, pkgs): + """ + The following function dummy installs pkgs and returns the log of output. + """ + if len(pkgs) == 0: + return + + # Create an temp dir as opkg root for dummy installation + temp_rootfs = self.d.expand('${T}/opkg') + opkg_lib_dir = self.d.getVar('OPKGLIBDIR') + if opkg_lib_dir[0] == "/": + opkg_lib_dir = opkg_lib_dir[1:] + temp_opkg_dir = os.path.join(temp_rootfs, opkg_lib_dir, 'opkg') + bb.utils.mkdirhier(temp_opkg_dir) + + opkg_args = "-f %s -o %s " % (self.config_file, temp_rootfs) + opkg_args += self.d.getVar("OPKG_ARGS") + + cmd = "%s %s update" % (self.opkg_cmd, opkg_args) + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to update. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + # Dummy installation + cmd = "%s %s --noaction install %s " % (self.opkg_cmd, + opkg_args, + ' '.join(pkgs)) + try: + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.fatal("Unable to dummy install packages. Command '%s' " + "returned %d:\n%s" % (cmd, e.returncode, e.output.decode("utf-8"))) + + bb.utils.remove(temp_rootfs, True) + + return output + + def backup_packaging_data(self): + # Save the opkglib for increment ipk image generation + if os.path.exists(self.saved_opkg_dir): + bb.utils.remove(self.saved_opkg_dir, True) + shutil.copytree(self.opkg_dir, + self.saved_opkg_dir, + symlinks=True) + + def recover_packaging_data(self): + # Move the opkglib back + if os.path.exists(self.saved_opkg_dir): + if os.path.exists(self.opkg_dir): + bb.utils.remove(self.opkg_dir, True) + + bb.note('Recover packaging data') + shutil.copytree(self.saved_opkg_dir, + self.opkg_dir, + symlinks=True) + + def package_info(self, pkg): + """ + Returns a dictionary with the package info. + """ + cmd = "%s %s info %s" % (self.opkg_cmd, self.opkg_args, pkg) + pkg_info = super(OpkgPM, self).package_info(pkg, cmd) + + pkg_arch = pkg_info[pkg]["arch"] + pkg_filename = pkg_info[pkg]["filename"] + pkg_info[pkg]["filepath"] = \ + os.path.join(self.deploy_dir, pkg_arch, pkg_filename) + + return pkg_info + + def extract(self, pkg): + """ + Returns the path to a tmpdir where resides the contents of a package. + + Deleting the tmpdir is responsability of the caller. + """ + pkg_info = self.package_info(pkg) + if not pkg_info: + bb.fatal("Unable to get information for package '%s' while " + "trying to extract the package." % pkg) + + tmp_dir = super(OpkgPM, self).extract(pkg, pkg_info) + bb.utils.remove(os.path.join(tmp_dir, "data.tar.xz")) + + return tmp_dir diff --git a/poky/meta/lib/oe/package_manager/ipk/manifest.py b/poky/meta/lib/oe/package_manager/ipk/manifest.py new file mode 100644 index 000000000..69676903a --- /dev/null +++ b/poky/meta/lib/oe/package_manager/ipk/manifest.py @@ -0,0 +1,73 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest + +class OpkgManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var)) + if split_pkgs is not None: + pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) + else: + pkg_list = self.d.getVar(var) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) + + for pkg_type in sorted(pkgs): + for pkg in sorted(pkgs[pkg_type].split()): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + if not os.path.exists(self.initial_manifest): + self.create_initial() + + initial_manifest = self.parse_initial_manifest() + pkgs_to_install = list() + for pkg_type in initial_manifest: + pkgs_to_install += initial_manifest[pkg_type] + if len(pkgs_to_install) == 0: + return + + output = pm.dummy_install(pkgs_to_install) + + with open(self.full_manifest, 'w+') as manifest: + pkg_re = re.compile('^Installing ([^ ]+) [^ ].*') + for line in set(output.split('\n')): + m = pkg_re.match(line) + if m: + manifest.write(m.group(1) + '\n') + + return diff --git a/poky/meta/lib/oe/package_manager/ipk/rootfs.py b/poky/meta/lib/oe/package_manager/ipk/rootfs.py new file mode 100644 index 000000000..63b4a59c4 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/ipk/rootfs.py @@ -0,0 +1,387 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import re +import filecmp +import shutil +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.ipk.manifest import OpkgManifest +from oe.package_manager.ipk import OpkgPM + +class DpkgOpkgRootfs(Rootfs): + def __init__(self, d, progress_reporter=None, logcatcher=None): + super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + + def _get_pkgs_postinsts(self, status_file): + def _get_pkg_depends_list(pkg_depends): + pkg_depends_list = [] + # filter version requirements like libc (>= 1.1) + for dep in pkg_depends.split(', '): + m_dep = re.match(r"^(.*) \(.*\)$", dep) + if m_dep: + dep = m_dep.group(1) + pkg_depends_list.append(dep) + + return pkg_depends_list + + pkgs = {} + pkg_name = "" + pkg_status_match = False + pkg_depends = "" + + with open(status_file) as status: + data = status.read() + status.close() + for line in data.split('\n'): + m_pkg = re.match(r"^Package: (.*)", line) + m_status = re.match(r"^Status:.*unpacked", line) + m_depends = re.match(r"^Depends: (.*)", line) + + #Only one of m_pkg, m_status or m_depends is not None at time + #If m_pkg is not None, we started a new package + if m_pkg is not None: + #Get Package name + pkg_name = m_pkg.group(1) + #Make sure we reset other variables + pkg_status_match = False + pkg_depends = "" + elif m_status is not None: + #New status matched + pkg_status_match = True + elif m_depends is not None: + #New depends macthed + pkg_depends = m_depends.group(1) + else: + pass + + #Now check if we can process package depends and postinst + if "" != pkg_name and pkg_status_match: + pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) + else: + #Not enough information + pass + + # remove package dependencies not in postinsts + pkg_names = list(pkgs.keys()) + for pkg_name in pkg_names: + deps = pkgs[pkg_name][:] + + for d in deps: + if d not in pkg_names: + pkgs[pkg_name].remove(d) + + return pkgs + + def _get_delayed_postinsts_common(self, status_file): + def _dep_resolve(graph, node, resolved, seen): + seen.append(node) + + for edge in graph[node]: + if edge not in resolved: + if edge in seen: + raise RuntimeError("Packages %s and %s have " \ + "a circular dependency in postinsts scripts." \ + % (node, edge)) + _dep_resolve(graph, edge, resolved, seen) + + resolved.append(node) + + pkg_list = [] + + pkgs = None + if not self.d.getVar('PACKAGE_INSTALL').strip(): + bb.note("Building empty image") + else: + pkgs = self._get_pkgs_postinsts(status_file) + if pkgs: + root = "__packagegroup_postinst__" + pkgs[root] = list(pkgs.keys()) + _dep_resolve(pkgs, root, pkg_list, []) + pkg_list.remove(root) + + if len(pkg_list) == 0: + return None + + return pkg_list + + def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): + if bb.utils.contains("IMAGE_FEATURES", "package-management", + True, False, self.d): + return + num = 0 + for p in self._get_delayed_postinsts(): + bb.utils.mkdirhier(dst_postinst_dir) + + if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): + shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), + os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) + + num += 1 + +class OpkgRootfs(DpkgOpkgRootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = '(exit 1|Collected errors)' + + self.manifest = OpkgManifest(d, manifest_dir) + self.opkg_conf = self.d.getVar("IPKGCONF_TARGET") + self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS") + + self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or "" + if self._remove_old_rootfs(): + bb.utils.remove(self.image_rootfs, True) + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + else: + self.pm = OpkgPM(d, + self.image_rootfs, + self.opkg_conf, + self.pkg_archs) + self.pm.recover_packaging_data() + + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + + def _prelink_file(self, root_dir, filename): + bb.note('prelink %s in %s' % (filename, root_dir)) + prelink_cfg = oe.path.join(root_dir, + self.d.expand('${sysconfdir}/prelink.conf')) + if not os.path.exists(prelink_cfg): + shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), + prelink_cfg) + + cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') + self._exec_shell_cmd([cmd_prelink, + '--root', + root_dir, + '-amR', + '-N', + '-c', + self.d.expand('${sysconfdir}/prelink.conf')]) + + ''' + Compare two files with the same key twice to see if they are equal. + If they are not equal, it means they are duplicated and come from + different packages. + 1st: Comapre them directly; + 2nd: While incremental image creation is enabled, one of the + files could be probaly prelinked in the previous image + creation and the file has been changed, so we need to + prelink the other one and compare them. + ''' + def _file_equal(self, key, f1, f2): + + # Both of them are not prelinked + if filecmp.cmp(f1, f2): + return True + + if bb.data.inherits_class('image-prelink', self.d): + if self.image_rootfs not in f1: + self._prelink_file(f1.replace(key, ''), f1) + + if self.image_rootfs not in f2: + self._prelink_file(f2.replace(key, ''), f2) + + # Both of them are prelinked + if filecmp.cmp(f1, f2): + return True + + # Not equal + return False + + """ + This function was reused from the old implementation. + See commit: "image.bbclass: Added variables for multilib support." by + Lianhao Lu. + """ + def _multilib_sanity_test(self, dirs): + + allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP") + if allow_replace is None: + allow_replace = "" + + allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace)) + error_prompt = "Multilib check error:" + + files = {} + for dir in dirs: + for root, subfolders, subfiles in os.walk(dir): + for file in subfiles: + item = os.path.join(root, file) + key = str(os.path.join("/", os.path.relpath(item, dir))) + + valid = True + if key in files: + #check whether the file is allow to replace + if allow_rep.match(key): + valid = True + else: + if os.path.exists(files[key]) and \ + os.path.exists(item) and \ + not self._file_equal(key, files[key], item): + valid = False + bb.fatal("%s duplicate files %s %s is not the same\n" % + (error_prompt, item, files[key])) + + #pass the check, add to list + if valid: + files[key] = item + + def _multilib_test_install(self, pkgs): + ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS") + bb.utils.mkdirhier(ml_temp) + + dirs = [self.image_rootfs] + + for variant in self.d.getVar("MULTILIB_VARIANTS").split(): + ml_target_rootfs = os.path.join(ml_temp, variant) + + bb.utils.remove(ml_target_rootfs, True) + + ml_opkg_conf = os.path.join(ml_temp, + variant + "-" + os.path.basename(self.opkg_conf)) + + ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False) + + ml_pm.update() + ml_pm.install(pkgs) + + dirs.append(ml_target_rootfs) + + self._multilib_sanity_test(dirs) + + ''' + While ipk incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the old full manifest in previous existing + image and the new full manifest in the current image. + ''' + def _remove_extra_packages(self, pkgs_initial_install): + if self.inc_opkg_image_gen == "1": + # Parse full manifest in previous existing image creation session + old_full_manifest = self.manifest.parse_full_manifest() + + # Create full manifest for the current image session, the old one + # will be replaced by the new one. + self.manifest.create_full(self.pm) + + # Parse full manifest in current image creation session + new_full_manifest = self.manifest.parse_full_manifest() + + pkg_to_remove = list() + for pkg in old_full_manifest: + if pkg not in new_full_manifest: + pkg_to_remove.append(pkg) + + if pkg_to_remove != []: + bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + ''' + Compare with previous existing image creation, if some conditions + triggered, the previous old image should be removed. + The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS + and BAD_RECOMMENDATIONS' has been changed. + ''' + def _remove_old_rootfs(self): + if self.inc_opkg_image_gen != "1": + return True + + vars_list_file = self.d.expand('${T}/vars_list') + + old_vars_list = "" + if os.path.exists(vars_list_file): + old_vars_list = open(vars_list_file, 'r+').read() + + new_vars_list = '%s:%s:%s\n' % \ + ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(), + (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(), + (self.d.getVar('PACKAGE_EXCLUDE') or '').strip()) + open(vars_list_file, 'w+').write(new_vars_list) + + if old_vars_list != new_vars_list: + return True + + return False + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS') + opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS') + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, opkg_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + # Steps are a bit different in order, skip next + self.progress_reporter.next_stage() + + self.pm.update() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + if self.inc_opkg_image_gen == "1": + self._remove_extra_packages(pkgs_to_install) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + # For multilib, we perform a sanity test before final install + # If sanity test fails, it will automatically do a bb.fatal() + # and the installation will stop + if pkg_type == Manifest.PKG_TYPE_MULTILIB: + self._multilib_test_install(pkgs_to_install[pkg_type]) + + self.pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + opkg_lib_dir = self.d.getVar('OPKGLIBDIR') + opkg_dir = os.path.join(opkg_lib_dir, 'opkg') + self._setup_dbg_rootfs([opkg_dir]) + + execute_pre_post_process(self.d, opkg_post_process_cmds) + + if self.inc_opkg_image_gen == "1": + self.pm.backup_packaging_data() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + @staticmethod + def _depends_list(): + return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] + + def _get_delayed_postinsts(self): + status_file = os.path.join(self.image_rootfs, + self.d.getVar('OPKGLIBDIR').strip('/'), + "opkg", "status") + return self._get_delayed_postinsts_common(status_file) + + def _save_postinsts(self): + dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") + src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") + return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + self.pm.remove_lists() diff --git a/poky/meta/lib/oe/package_manager/ipk/sdk.py b/poky/meta/lib/oe/package_manager/ipk/sdk.py new file mode 100644 index 000000000..47c0a92c1 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/ipk/sdk.py @@ -0,0 +1,96 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +import shutil +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.manifest import Manifest +from oe.package_manager.ipk import OpkgPM + +class OpkgSdk(Sdk): + def __init__(self, d, manifest_dir=None): + super(OpkgSdk, self).__init__(d, manifest_dir) + + self.target_conf = self.d.getVar("IPKGCONF_TARGET") + self.host_conf = self.d.getVar("IPKGCONF_SDK") + + from oe.package_manager.ipk.manifest import OpkgManifest + self.target_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = OpkgManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + ipk_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + ipk_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, + self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"), + ipk_repo_workdir=ipk_repo_workdir) + + self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, + self.d.getVar("SDK_PACKAGE_ARCHS"), + ipk_repo_workdir=ipk_repo_workdir) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1": + pm.write_index() + + pm.update() + + for pkg_type in self.install_order: + if pkg_type in pkgs_to_install: + pm.install(pkgs_to_install[pkg_type], + [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) + host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) + + self.mkdirhier(target_sysconfdir) + shutil.copy(self.target_conf, target_sysconfdir) + os.chmod(os.path.join(target_sysconfdir, + os.path.basename(self.target_conf)), 0o644) + + self.mkdirhier(host_sysconfdir) + shutil.copy(self.host_conf, host_sysconfdir) + os.chmod(os.path.join(host_sysconfdir, + os.path.basename(self.host_conf)), 0o644) + + native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk').strip('/'), + "lib", "opkg") + self.mkdirhier(native_opkg_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): + self.movefile(f, native_opkg_state_dir) + + self.remove(os.path.join(self.sdk_output, "var"), True) diff --git a/poky/meta/lib/oe/package_manager/rpm/__init__.py b/poky/meta/lib/oe/package_manager/rpm/__init__.py new file mode 100644 index 000000000..c91f61ae5 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/rpm/__init__.py @@ -0,0 +1,404 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import shutil +import subprocess +from oe.package_manager import * + +class RpmIndexer(Indexer): + def write_index(self): + self.do_write_index(self.deploy_dir) + + def do_write_index(self, deploy_dir): + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + signer = get_signer(self.d, self.d.getVar('PACKAGE_FEED_GPG_BACKEND')) + else: + signer = None + + createrepo_c = bb.utils.which(os.environ['PATH'], "createrepo_c") + result = create_index("%s --update -q %s" % (createrepo_c, deploy_dir)) + if result: + bb.fatal(result) + + # Sign repomd + if signer: + sig_type = self.d.getVar('PACKAGE_FEED_GPG_SIGNATURE_TYPE') + is_ascii_sig = (sig_type.upper() != "BIN") + signer.detach_sign(os.path.join(deploy_dir, 'repodata', 'repomd.xml'), + self.d.getVar('PACKAGE_FEED_GPG_NAME'), + self.d.getVar('PACKAGE_FEED_GPG_PASSPHRASE_FILE'), + armor=is_ascii_sig) + +class RpmSubdirIndexer(RpmIndexer): + def write_index(self): + bb.note("Generating package index for %s" %(self.deploy_dir)) + self.do_write_index(self.deploy_dir) + for entry in os.walk(self.deploy_dir): + if os.path.samefile(self.deploy_dir, entry[0]): + for dir in entry[1]: + if dir != 'repodata': + dir_path = oe.path.join(self.deploy_dir, dir) + bb.note("Generating package index for %s" %(dir_path)) + self.do_write_index(dir_path) + + +class RpmPkgsList(PkgsList): + def list_pkgs(self): + return RpmPM(self.d, self.rootfs_dir, self.d.getVar('TARGET_VENDOR'), needfeed=False).list_installed() + +class RpmPM(PackageManager): + def __init__(self, + d, + target_rootfs, + target_vendor, + task_name='target', + arch_var=None, + os_var=None, + rpm_repo_workdir="oe-rootfs-repo", + filterbydependencies=True, + needfeed=True): + super(RpmPM, self).__init__(d, target_rootfs) + self.target_vendor = target_vendor + self.task_name = task_name + if arch_var == None: + self.archs = self.d.getVar('ALL_MULTILIB_PACKAGE_ARCHS').replace("-","_") + else: + self.archs = self.d.getVar(arch_var).replace("-","_") + if task_name == "host": + self.primary_arch = self.d.getVar('SDK_ARCH') + else: + self.primary_arch = self.d.getVar('MACHINE_ARCH') + + if needfeed: + self.rpm_repo_dir = oe.path.join(self.d.getVar('WORKDIR'), rpm_repo_workdir) + create_packages_dir(self.d, oe.path.join(self.rpm_repo_dir, "rpm"), d.getVar("DEPLOY_DIR_RPM"), "package_write_rpm", filterbydependencies) + + self.saved_packaging_data = self.d.expand('${T}/saved_packaging_data/%s' % self.task_name) + if not os.path.exists(self.d.expand('${T}/saved_packaging_data')): + bb.utils.mkdirhier(self.d.expand('${T}/saved_packaging_data')) + self.packaging_data_dirs = ['etc/rpm', 'etc/rpmrc', 'etc/dnf', 'var/lib/rpm', 'var/lib/dnf', 'var/cache/dnf'] + self.solution_manifest = self.d.expand('${T}/saved/%s_solution' % + self.task_name) + if not os.path.exists(self.d.expand('${T}/saved')): + bb.utils.mkdirhier(self.d.expand('${T}/saved')) + + def _configure_dnf(self): + # libsolv handles 'noarch' internally, we don't need to specify it explicitly + archs = [i for i in reversed(self.archs.split()) if i not in ["any", "all", "noarch"]] + # This prevents accidental matching against libsolv's built-in policies + if len(archs) <= 1: + archs = archs + ["bogusarch"] + # This architecture needs to be upfront so that packages using it are properly prioritized + archs = ["sdk_provides_dummy_target"] + archs + confdir = "%s/%s" %(self.target_rootfs, "etc/dnf/vars/") + bb.utils.mkdirhier(confdir) + open(confdir + "arch", 'w').write(":".join(archs)) + distro_codename = self.d.getVar('DISTRO_CODENAME') + open(confdir + "releasever", 'w').write(distro_codename if distro_codename is not None else '') + + open(oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), 'w').write("") + + + def _configure_rpm(self): + # We need to configure rpm to use our primary package architecture as the installation architecture, + # and to make it compatible with other package architectures that we use. + # Otherwise it will refuse to proceed with packages installation. + platformconfdir = "%s/%s" %(self.target_rootfs, "etc/rpm/") + rpmrcconfdir = "%s/%s" %(self.target_rootfs, "etc/") + bb.utils.mkdirhier(platformconfdir) + open(platformconfdir + "platform", 'w').write("%s-pc-linux" % self.primary_arch) + with open(rpmrcconfdir + "rpmrc", 'w') as f: + f.write("arch_compat: %s: %s\n" % (self.primary_arch, self.archs if len(self.archs) > 0 else self.primary_arch)) + f.write("buildarch_compat: %s: noarch\n" % self.primary_arch) + + open(platformconfdir + "macros", 'w').write("%_transaction_color 7\n") + if self.d.getVar('RPM_PREFER_ELF_ARCH'): + open(platformconfdir + "macros", 'a').write("%%_prefer_color %s" % (self.d.getVar('RPM_PREFER_ELF_ARCH'))) + + if self.d.getVar('RPM_SIGN_PACKAGES') == '1': + signer = get_signer(self.d, self.d.getVar('RPM_GPG_BACKEND')) + pubkey_path = oe.path.join(self.d.getVar('B'), 'rpm-key') + signer.export_pubkey(pubkey_path, self.d.getVar('RPM_GPG_NAME')) + rpm_bin = bb.utils.which(os.getenv('PATH'), "rpmkeys") + cmd = [rpm_bin, '--root=%s' % self.target_rootfs, '--import', pubkey_path] + try: + subprocess.check_output(cmd, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + bb.fatal("Importing GPG key failed. Command '%s' " + "returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + + def create_configs(self): + self._configure_dnf() + self._configure_rpm() + + def write_index(self): + lockfilename = self.d.getVar('DEPLOY_DIR_RPM') + "/rpm.lock" + lf = bb.utils.lockfile(lockfilename, False) + RpmIndexer(self.d, self.rpm_repo_dir).write_index() + bb.utils.unlockfile(lf) + + def insert_feeds_uris(self, feed_uris, feed_base_paths, feed_archs): + from urllib.parse import urlparse + + if feed_uris == "": + return + + gpg_opts = '' + if self.d.getVar('PACKAGE_FEED_SIGN') == '1': + gpg_opts += 'repo_gpgcheck=1\n' + gpg_opts += 'gpgkey=file://%s/pki/packagefeed-gpg/PACKAGEFEED-GPG-KEY-%s-%s\n' % (self.d.getVar('sysconfdir'), self.d.getVar('DISTRO'), self.d.getVar('DISTRO_CODENAME')) + + if self.d.getVar('RPM_SIGN_PACKAGES') != '1': + gpg_opts += 'gpgcheck=0\n' + + bb.utils.mkdirhier(oe.path.join(self.target_rootfs, "etc", "yum.repos.d")) + remote_uris = self.construct_uris(feed_uris.split(), feed_base_paths.split()) + for uri in remote_uris: + repo_base = "oe-remote-repo" + "-".join(urlparse(uri).path.split("/")) + if feed_archs is not None: + for arch in feed_archs.split(): + repo_uri = uri + "/" + arch + repo_id = "oe-remote-repo" + "-".join(urlparse(repo_uri).path.split("/")) + repo_name = "OE Remote Repo:" + " ".join(urlparse(repo_uri).path.split("/")) + open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'a').write( + "[%s]\nname=%s\nbaseurl=%s\n%s\n" % (repo_id, repo_name, repo_uri, gpg_opts)) + else: + repo_name = "OE Remote Repo:" + " ".join(urlparse(uri).path.split("/")) + repo_uri = uri + open(oe.path.join(self.target_rootfs, "etc", "yum.repos.d", repo_base + ".repo"), 'w').write( + "[%s]\nname=%s\nbaseurl=%s\n%s" % (repo_base, repo_name, repo_uri, gpg_opts)) + + def _prepare_pkg_transaction(self): + os.environ['D'] = self.target_rootfs + os.environ['OFFLINE_ROOT'] = self.target_rootfs + os.environ['IPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['OPKG_OFFLINE_ROOT'] = self.target_rootfs + os.environ['INTERCEPT_DIR'] = self.intercepts_dir + os.environ['NATIVE_ROOT'] = self.d.getVar('STAGING_DIR_NATIVE') + + + def install(self, pkgs, attempt_only = False): + if len(pkgs) == 0: + return + self._prepare_pkg_transaction() + + bad_recommendations = self.d.getVar('BAD_RECOMMENDATIONS') + package_exclude = self.d.getVar('PACKAGE_EXCLUDE') + exclude_pkgs = (bad_recommendations.split() if bad_recommendations else []) + (package_exclude.split() if package_exclude else []) + + output = self._invoke_dnf((["--skip-broken"] if attempt_only else []) + + (["-x", ",".join(exclude_pkgs)] if len(exclude_pkgs) > 0 else []) + + (["--setopt=install_weak_deps=False"] if self.d.getVar('NO_RECOMMENDATIONS') == "1" else []) + + (["--nogpgcheck"] if self.d.getVar('RPM_SIGN_PACKAGES') != '1' else ["--setopt=gpgcheck=True"]) + + ["install"] + + pkgs) + + failed_scriptlets_pkgnames = collections.OrderedDict() + for line in output.splitlines(): + if line.startswith("Error in POSTIN scriptlet in rpm package"): + failed_scriptlets_pkgnames[line.split()[-1]] = True + + if len(failed_scriptlets_pkgnames) > 0: + failed_postinsts_abort(list(failed_scriptlets_pkgnames.keys()), self.d.expand("${T}/log.do_${BB_CURRENTTASK}")) + + def remove(self, pkgs, with_dependencies = True): + if not pkgs: + return + + self._prepare_pkg_transaction() + + if with_dependencies: + self._invoke_dnf(["remove"] + pkgs) + else: + cmd = bb.utils.which(os.getenv('PATH'), "rpm") + args = ["-e", "-v", "--nodeps", "--root=%s" %self.target_rootfs] + + try: + bb.note("Running %s" % ' '.join([cmd] + args + pkgs)) + output = subprocess.check_output([cmd] + args + pkgs, stderr=subprocess.STDOUT).decode("utf-8") + bb.note(output) + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke rpm. Command " + "'%s' returned %d:\n%s" % (' '.join([cmd] + args + pkgs), e.returncode, e.output.decode("utf-8"))) + + def upgrade(self): + self._prepare_pkg_transaction() + self._invoke_dnf(["upgrade"]) + + def autoremove(self): + self._prepare_pkg_transaction() + self._invoke_dnf(["autoremove"]) + + def remove_packaging_data(self): + self._invoke_dnf(["clean", "all"]) + for dir in self.packaging_data_dirs: + bb.utils.remove(oe.path.join(self.target_rootfs, dir), True) + + def backup_packaging_data(self): + # Save the packaging dirs for increment rpm image generation + if os.path.exists(self.saved_packaging_data): + bb.utils.remove(self.saved_packaging_data, True) + for i in self.packaging_data_dirs: + source_dir = oe.path.join(self.target_rootfs, i) + target_dir = oe.path.join(self.saved_packaging_data, i) + if os.path.isdir(source_dir): + shutil.copytree(source_dir, target_dir, symlinks=True) + elif os.path.isfile(source_dir): + shutil.copy2(source_dir, target_dir) + + def recovery_packaging_data(self): + # Move the rpmlib back + if os.path.exists(self.saved_packaging_data): + for i in self.packaging_data_dirs: + target_dir = oe.path.join(self.target_rootfs, i) + if os.path.exists(target_dir): + bb.utils.remove(target_dir, True) + source_dir = oe.path.join(self.saved_packaging_data, i) + if os.path.isdir(source_dir): + shutil.copytree(source_dir, target_dir, symlinks=True) + elif os.path.isfile(source_dir): + shutil.copy2(source_dir, target_dir) + + def list_installed(self): + output = self._invoke_dnf(["repoquery", "--installed", "--queryformat", "Package: %{name} %{arch} %{version} %{name}-%{version}-%{release}.%{arch}.rpm\nDependencies:\n%{requires}\nRecommendations:\n%{recommends}\nDependenciesEndHere:\n"], + print_output = False) + packages = {} + current_package = None + current_deps = None + current_state = "initial" + for line in output.splitlines(): + if line.startswith("Package:"): + package_info = line.split(" ")[1:] + current_package = package_info[0] + package_arch = package_info[1] + package_version = package_info[2] + package_rpm = package_info[3] + packages[current_package] = {"arch":package_arch, "ver":package_version, "filename":package_rpm} + current_deps = [] + elif line.startswith("Dependencies:"): + current_state = "dependencies" + elif line.startswith("Recommendations"): + current_state = "recommendations" + elif line.startswith("DependenciesEndHere:"): + current_state = "initial" + packages[current_package]["deps"] = current_deps + elif len(line) > 0: + if current_state == "dependencies": + current_deps.append(line) + elif current_state == "recommendations": + current_deps.append("%s [REC]" % line) + + return packages + + def update(self): + self._invoke_dnf(["makecache", "--refresh"]) + + def _invoke_dnf(self, dnf_args, fatal = True, print_output = True ): + os.environ['RPM_ETCCONFIGDIR'] = self.target_rootfs + + dnf_cmd = bb.utils.which(os.getenv('PATH'), "dnf") + standard_dnf_args = ["-v", "--rpmverbosity=info", "-y", + "-c", oe.path.join(self.target_rootfs, "etc/dnf/dnf.conf"), + "--setopt=reposdir=%s" %(oe.path.join(self.target_rootfs, "etc/yum.repos.d")), + "--installroot=%s" % (self.target_rootfs), + "--setopt=logdir=%s" % (self.d.getVar('T')) + ] + if hasattr(self, "rpm_repo_dir"): + standard_dnf_args.append("--repofrompath=oe-repo,%s" % (self.rpm_repo_dir)) + cmd = [dnf_cmd] + standard_dnf_args + dnf_args + bb.note('Running %s' % ' '.join(cmd)) + try: + output = subprocess.check_output(cmd,stderr=subprocess.STDOUT).decode("utf-8") + if print_output: + bb.debug(1, output) + return output + except subprocess.CalledProcessError as e: + if print_output: + (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " + "'%s' returned %d:\n%s" % (' '.join(cmd), e.returncode, e.output.decode("utf-8"))) + else: + (bb.note, bb.fatal)[fatal]("Could not invoke dnf. Command " + "'%s' returned %d:" % (' '.join(cmd), e.returncode)) + return e.output.decode("utf-8") + + def dump_install_solution(self, pkgs): + open(self.solution_manifest, 'w').write(" ".join(pkgs)) + return pkgs + + def load_old_install_solution(self): + if not os.path.exists(self.solution_manifest): + return [] + with open(self.solution_manifest, 'r') as fd: + return fd.read().split() + + def _script_num_prefix(self, path): + files = os.listdir(path) + numbers = set() + numbers.add(99) + for f in files: + numbers.add(int(f.split("-")[0])) + return max(numbers) + 1 + + def save_rpmpostinst(self, pkg): + bb.note("Saving postinstall script of %s" % (pkg)) + cmd = bb.utils.which(os.getenv('PATH'), "rpm") + args = ["-q", "--root=%s" % self.target_rootfs, "--queryformat", "%{postin}", pkg] + + try: + output = subprocess.check_output([cmd] + args,stderr=subprocess.STDOUT).decode("utf-8") + except subprocess.CalledProcessError as e: + bb.fatal("Could not invoke rpm. Command " + "'%s' returned %d:\n%s" % (' '.join([cmd] + args), e.returncode, e.output.decode("utf-8"))) + + # may need to prepend #!/bin/sh to output + + target_path = oe.path.join(self.target_rootfs, self.d.expand('${sysconfdir}/rpm-postinsts/')) + bb.utils.mkdirhier(target_path) + num = self._script_num_prefix(target_path) + saved_script_name = oe.path.join(target_path, "%d-%s" % (num, pkg)) + open(saved_script_name, 'w').write(output) + os.chmod(saved_script_name, 0o755) + + def _handle_intercept_failure(self, registered_pkgs): + rpm_postinsts_dir = self.target_rootfs + self.d.expand('${sysconfdir}/rpm-postinsts/') + bb.utils.mkdirhier(rpm_postinsts_dir) + + # Save the package postinstalls in /etc/rpm-postinsts + for pkg in registered_pkgs.split(): + self.save_rpmpostinst(pkg) + + def extract(self, pkg): + output = self._invoke_dnf(["repoquery", "--queryformat", "%{location}", pkg]) + pkg_name = output.splitlines()[-1] + if not pkg_name.endswith(".rpm"): + bb.fatal("dnf could not find package %s in repository: %s" %(pkg, output)) + pkg_path = oe.path.join(self.rpm_repo_dir, pkg_name) + + cpio_cmd = bb.utils.which(os.getenv("PATH"), "cpio") + rpm2cpio_cmd = bb.utils.which(os.getenv("PATH"), "rpm2cpio") + + if not os.path.isfile(pkg_path): + bb.fatal("Unable to extract package for '%s'." + "File %s doesn't exists" % (pkg, pkg_path)) + + tmp_dir = tempfile.mkdtemp() + current_dir = os.getcwd() + os.chdir(tmp_dir) + + try: + cmd = "%s %s | %s -idmv" % (rpm2cpio_cmd, pkg_path, cpio_cmd) + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + except subprocess.CalledProcessError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s" % (pkg_path, cmd, e.returncode, e.output.decode("utf-8"))) + except OSError as e: + bb.utils.remove(tmp_dir, recurse=True) + bb.fatal("Unable to extract %s package. Command '%s' " + "returned %d:\n%s at %s" % (pkg_path, cmd, e.errno, e.strerror, e.filename)) + + bb.note("Extracted %s to %s" % (pkg_path, tmp_dir)) + os.chdir(current_dir) + + return tmp_dir diff --git a/poky/meta/lib/oe/package_manager/rpm/manifest.py b/poky/meta/lib/oe/package_manager/rpm/manifest.py new file mode 100644 index 000000000..a75f6bdab --- /dev/null +++ b/poky/meta/lib/oe/package_manager/rpm/manifest.py @@ -0,0 +1,54 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.manifest import Manifest + +class RpmManifest(Manifest): + """ + Returns a dictionary object with mip and mlp packages. + """ + def _split_multilib(self, pkg_list): + pkgs = dict() + + for pkg in pkg_list.split(): + pkg_type = self.PKG_TYPE_MUST_INSTALL + + ml_variants = self.d.getVar('MULTILIB_VARIANTS').split() + + for ml_variant in ml_variants: + if pkg.startswith(ml_variant + '-'): + pkg_type = self.PKG_TYPE_MULTILIB + + if not pkg_type in pkgs: + pkgs[pkg_type] = pkg + else: + pkgs[pkg_type] += " " + pkg + + return pkgs + + def create_initial(self): + pkgs = dict() + + with open(self.initial_manifest, "w+") as manifest: + manifest.write(self.initial_manifest_file_header) + + for var in self.var_maps[self.manifest_type]: + if var in self.vars_to_split: + split_pkgs = self._split_multilib(self.d.getVar(var)) + if split_pkgs is not None: + pkgs = dict(list(pkgs.items()) + list(split_pkgs.items())) + else: + pkg_list = self.d.getVar(var) + if pkg_list is not None: + pkgs[self.var_maps[self.manifest_type][var]] = self.d.getVar(var) + + for pkg_type in pkgs: + for pkg in pkgs[pkg_type].split(): + manifest.write("%s,%s\n" % (pkg_type, pkg)) + + def create_final(self): + pass + + def create_full(self, pm): + pass diff --git a/poky/meta/lib/oe/package_manager/rpm/rootfs.py b/poky/meta/lib/oe/package_manager/rpm/rootfs.py new file mode 100644 index 000000000..2de5752b9 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/rpm/rootfs.py @@ -0,0 +1,148 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +from oe.rootfs import Rootfs +from oe.manifest import Manifest +from oe.utils import execute_pre_post_process +from oe.package_manager.rpm.manifest import RpmManifest +from oe.package_manager.rpm import RpmPM + +class RpmRootfs(Rootfs): + def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): + super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher) + self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\ + r'|exit 1|ERROR: |Error: |Error |ERROR '\ + r'|Failed |Failed: |Failed$|Failed\(\d+\):)' + + self.manifest = RpmManifest(d, manifest_dir) + + self.pm = RpmPM(d, + d.getVar('IMAGE_ROOTFS'), + self.d.getVar('TARGET_VENDOR') + ) + + self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN') + if self.inc_rpm_image_gen != "1": + bb.utils.remove(self.image_rootfs, True) + else: + self.pm.recovery_packaging_data() + bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) + + self.pm.create_configs() + + ''' + While rpm incremental image generation is enabled, it will remove the + unneeded pkgs by comparing the new install solution manifest and the + old installed manifest. + ''' + def _create_incremental(self, pkgs_initial_install): + if self.inc_rpm_image_gen == "1": + + pkgs_to_install = list() + for pkg_type in pkgs_initial_install: + pkgs_to_install += pkgs_initial_install[pkg_type] + + installed_manifest = self.pm.load_old_install_solution() + solution_manifest = self.pm.dump_install_solution(pkgs_to_install) + + pkg_to_remove = list() + for pkg in installed_manifest: + if pkg not in solution_manifest: + pkg_to_remove.append(pkg) + + self.pm.update() + + bb.note('incremental update -- upgrade packages in place ') + self.pm.upgrade() + if pkg_to_remove != []: + bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) + self.pm.remove(pkg_to_remove) + + self.pm.autoremove() + + def _create(self): + pkgs_to_install = self.manifest.parse_initial_manifest() + rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS') + rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS') + + # update PM index files + self.pm.write_index() + + execute_pre_post_process(self.d, rpm_pre_process_cmds) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + if self.inc_rpm_image_gen == "1": + self._create_incremental(pkgs_to_install) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install(pkgs) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install(pkgs_attempt, True) + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self.pm.install_complementary() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf']) + + execute_pre_post_process(self.d, rpm_post_process_cmds) + + if self.inc_rpm_image_gen == "1": + self.pm.backup_packaging_data() + + if self.progress_reporter: + self.progress_reporter.next_stage() + + + @staticmethod + def _depends_list(): + return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', + 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] + + def _get_delayed_postinsts(self): + postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") + if os.path.isdir(postinst_dir): + files = os.listdir(postinst_dir) + for f in files: + bb.note('Delayed package scriptlet: %s' % f) + return files + + return None + + def _save_postinsts(self): + # this is just a stub. For RPM, the failed postinstalls are + # already saved in /etc/rpm-postinsts + pass + + def _log_check(self): + self._log_check_warn() + self._log_check_error() + + def _cleanup(self): + if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d): + self.pm._invoke_dnf(["clean", "all"]) diff --git a/poky/meta/lib/oe/package_manager/rpm/sdk.py b/poky/meta/lib/oe/package_manager/rpm/sdk.py new file mode 100644 index 000000000..b14b155a8 --- /dev/null +++ b/poky/meta/lib/oe/package_manager/rpm/sdk.py @@ -0,0 +1,114 @@ +# +# SPDX-License-Identifier: GPL-2.0-only +# + +import glob +from oe.utils import execute_pre_post_process +from oe.sdk import Sdk +from oe.manifest import Manifest +from oe.package_manager.rpm import RpmPM + +class RpmSdk(Sdk): + def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"): + super(RpmSdk, self).__init__(d, manifest_dir) + + from oe.package_manager.rpm.manifest import RpmManifest + self.target_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_TARGET) + self.host_manifest = RpmManifest(d, self.manifest_dir, + Manifest.MANIFEST_TYPE_SDK_HOST) + + rpm_repo_workdir = "oe-sdk-repo" + if "sdk_ext" in d.getVar("BB_RUNTASK"): + rpm_repo_workdir = "oe-sdk-ext-repo" + + self.target_pm = RpmPM(d, + self.sdk_target_sysroot, + self.d.getVar('TARGET_VENDOR'), + 'target', + rpm_repo_workdir=rpm_repo_workdir + ) + + self.host_pm = RpmPM(d, + self.sdk_host_sysroot, + self.d.getVar('SDK_VENDOR'), + 'host', + "SDK_PACKAGE_ARCHS", + "SDK_OS", + rpm_repo_workdir=rpm_repo_workdir + ) + + def _populate_sysroot(self, pm, manifest): + pkgs_to_install = manifest.parse_initial_manifest() + + pm.create_configs() + pm.write_index() + pm.update() + + pkgs = [] + pkgs_attempt = [] + for pkg_type in pkgs_to_install: + if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: + pkgs_attempt += pkgs_to_install[pkg_type] + else: + pkgs += pkgs_to_install[pkg_type] + + pm.install(pkgs) + + pm.install(pkgs_attempt, True) + + def _populate(self): + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) + + bb.note("Installing TARGET packages") + self._populate_sysroot(self.target_pm, self.target_manifest) + + self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) + + self.target_pm.run_intercepts(populate_sdk='target') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.target_pm.remove_packaging_data() + + bb.note("Installing NATIVESDK packages") + self._populate_sysroot(self.host_pm, self.host_manifest) + self.install_locales(self.host_pm) + + self.host_pm.run_intercepts(populate_sdk='host') + + execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) + + if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): + self.host_pm.remove_packaging_data() + + # Move host RPM library data + native_rpm_state_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('localstatedir_nativesdk').strip('/'), + "lib", + "rpm" + ) + self.mkdirhier(native_rpm_state_dir) + for f in glob.glob(os.path.join(self.sdk_output, + "var", + "lib", + "rpm", + "*")): + self.movefile(f, native_rpm_state_dir) + + self.remove(os.path.join(self.sdk_output, "var"), True) + + # Move host sysconfig data + native_sysconf_dir = os.path.join(self.sdk_output, + self.sdk_native_path, + self.d.getVar('sysconfdir', + True).strip('/'), + ) + self.mkdirhier(native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")): + self.movefile(f, native_sysconf_dir) + for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")): + self.movefile(f, native_sysconf_dir) + self.remove(os.path.join(self.sdk_output, "etc"), True) diff --git a/poky/meta/lib/oe/patch.py b/poky/meta/lib/oe/patch.py index 7ca2e28b1..40755fbb0 100644 --- a/poky/meta/lib/oe/patch.py +++ b/poky/meta/lib/oe/patch.py @@ -41,7 +41,7 @@ def runcmd(args, dir = None): (exitstatus, output) = subprocess.getstatusoutput(cmd) if exitstatus != 0: raise CmdError(cmd, exitstatus >> 8, output) - if " fuzz " in output: + if " fuzz " in output and "Hunk " in output: # Drop patch fuzz info with header and footer to log file so # insane.bbclass can handle to throw error/warning bb.note("--- Patch fuzz start ---\n%s\n--- Patch fuzz end ---" % format(output)) diff --git a/poky/meta/lib/oe/reproducible.py b/poky/meta/lib/oe/reproducible.py index f4f58dd95..421bb12f5 100644 --- a/poky/meta/lib/oe/reproducible.py +++ b/poky/meta/lib/oe/reproducible.py @@ -56,13 +56,13 @@ def get_source_date_epoch_from_git(d, sourcedir): # Check that the repository has a valid HEAD; it may not if subdir is used # in SRC_URI - p = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=gitpath) + p = subprocess.run(['git', '--git-dir', gitpath, 'rev-parse', 'HEAD'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) if p.returncode != 0: bb.debug(1, "%s does not have a valid HEAD: %s" % (gitpath, p.stdout.decode('utf-8'))) return None bb.debug(1, "git repository: %s" % gitpath) - p = subprocess.run(['git','log','-1','--pretty=%ct'], check=True, stdout=subprocess.PIPE, cwd=gitpath) + p = subprocess.run(['git', '--git-dir', gitpath, 'log', '-1', '--pretty=%ct'], check=True, stdout=subprocess.PIPE) return int(p.stdout.decode('utf-8')) def get_source_date_epoch_from_youngest_file(d, sourcedir): diff --git a/poky/meta/lib/oe/rootfs.py b/poky/meta/lib/oe/rootfs.py index 0e05f1f75..3813f68e8 100644 --- a/poky/meta/lib/oe/rootfs.py +++ b/poky/meta/lib/oe/rootfs.py @@ -6,12 +6,16 @@ from oe.utils import execute_pre_post_process from oe.package_manager import * from oe.manifest import * import oe.path -import filecmp import shutil import os import subprocess import re - +from oe.package_manager.rpm.manifest import RpmManifest +from oe.package_manager.ipk.manifest import OpkgManifest +from oe.package_manager.deb.manifest import DpkgManifest +from oe.package_manager.rpm import RpmPkgsList +from oe.package_manager.ipk import OpkgPkgsList +from oe.package_manager.deb import DpkgPkgsList class Rootfs(object, metaclass=ABCMeta): """ @@ -353,611 +357,10 @@ class Rootfs(object, metaclass=ABCMeta): self.image_rootfs, "-D", devtable]) -class RpmRootfs(Rootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(RpmRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = r'(unpacking of archive failed|Cannot find package'\ - r'|exit 1|ERROR: |Error: |Error |ERROR '\ - r'|Failed |Failed: |Failed$|Failed\(\d+\):)' - self.manifest = RpmManifest(d, manifest_dir) - - self.pm = RpmPM(d, - d.getVar('IMAGE_ROOTFS'), - self.d.getVar('TARGET_VENDOR') - ) - - self.inc_rpm_image_gen = self.d.getVar('INC_RPM_IMAGE_GEN') - if self.inc_rpm_image_gen != "1": - bb.utils.remove(self.image_rootfs, True) - else: - self.pm.recovery_packaging_data() - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - - self.pm.create_configs() - - ''' - While rpm incremental image generation is enabled, it will remove the - unneeded pkgs by comparing the new install solution manifest and the - old installed manifest. - ''' - def _create_incremental(self, pkgs_initial_install): - if self.inc_rpm_image_gen == "1": - - pkgs_to_install = list() - for pkg_type in pkgs_initial_install: - pkgs_to_install += pkgs_initial_install[pkg_type] - - installed_manifest = self.pm.load_old_install_solution() - solution_manifest = self.pm.dump_install_solution(pkgs_to_install) - - pkg_to_remove = list() - for pkg in installed_manifest: - if pkg not in solution_manifest: - pkg_to_remove.append(pkg) - - self.pm.update() - - bb.note('incremental update -- upgrade packages in place ') - self.pm.upgrade() - if pkg_to_remove != []: - bb.note('incremental removed: %s' % ' '.join(pkg_to_remove)) - self.pm.remove(pkg_to_remove) - - self.pm.autoremove() - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - rpm_pre_process_cmds = self.d.getVar('RPM_PREPROCESS_COMMANDS') - rpm_post_process_cmds = self.d.getVar('RPM_POSTPROCESS_COMMANDS') - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, rpm_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - if self.inc_rpm_image_gen == "1": - self._create_incremental(pkgs_to_install) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.update() - - pkgs = [] - pkgs_attempt = [] - for pkg_type in pkgs_to_install: - if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: - pkgs_attempt += pkgs_to_install[pkg_type] - else: - pkgs += pkgs_to_install[pkg_type] - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install(pkgs) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install(pkgs_attempt, True) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self._setup_dbg_rootfs(['/etc', '/var/lib/rpm', '/var/cache/dnf', '/var/lib/dnf']) - - execute_pre_post_process(self.d, rpm_post_process_cmds) - - if self.inc_rpm_image_gen == "1": - self.pm.backup_packaging_data() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - - @staticmethod - def _depends_list(): - return ['DEPLOY_DIR_RPM', 'INC_RPM_IMAGE_GEN', 'RPM_PREPROCESS_COMMANDS', - 'RPM_POSTPROCESS_COMMANDS', 'RPM_PREFER_ELF_ARCH'] - - def _get_delayed_postinsts(self): - postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/rpm-postinsts") - if os.path.isdir(postinst_dir): - files = os.listdir(postinst_dir) - for f in files: - bb.note('Delayed package scriptlet: %s' % f) - return files - - return None - - def _save_postinsts(self): - # this is just a stub. For RPM, the failed postinstalls are - # already saved in /etc/rpm-postinsts - pass - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - if bb.utils.contains("IMAGE_FEATURES", "package-management", True, False, self.d): - self.pm._invoke_dnf(["clean", "all"]) - - -class DpkgOpkgRootfs(Rootfs): - def __init__(self, d, progress_reporter=None, logcatcher=None): - super(DpkgOpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - - def _get_pkgs_postinsts(self, status_file): - def _get_pkg_depends_list(pkg_depends): - pkg_depends_list = [] - # filter version requirements like libc (>= 1.1) - for dep in pkg_depends.split(', '): - m_dep = re.match(r"^(.*) \(.*\)$", dep) - if m_dep: - dep = m_dep.group(1) - pkg_depends_list.append(dep) - - return pkg_depends_list - - pkgs = {} - pkg_name = "" - pkg_status_match = False - pkg_depends = "" - - with open(status_file) as status: - data = status.read() - status.close() - for line in data.split('\n'): - m_pkg = re.match(r"^Package: (.*)", line) - m_status = re.match(r"^Status:.*unpacked", line) - m_depends = re.match(r"^Depends: (.*)", line) - - #Only one of m_pkg, m_status or m_depends is not None at time - #If m_pkg is not None, we started a new package - if m_pkg is not None: - #Get Package name - pkg_name = m_pkg.group(1) - #Make sure we reset other variables - pkg_status_match = False - pkg_depends = "" - elif m_status is not None: - #New status matched - pkg_status_match = True - elif m_depends is not None: - #New depends macthed - pkg_depends = m_depends.group(1) - else: - pass - - #Now check if we can process package depends and postinst - if "" != pkg_name and pkg_status_match: - pkgs[pkg_name] = _get_pkg_depends_list(pkg_depends) - else: - #Not enough information - pass - - # remove package dependencies not in postinsts - pkg_names = list(pkgs.keys()) - for pkg_name in pkg_names: - deps = pkgs[pkg_name][:] - - for d in deps: - if d not in pkg_names: - pkgs[pkg_name].remove(d) - - return pkgs - - def _get_delayed_postinsts_common(self, status_file): - def _dep_resolve(graph, node, resolved, seen): - seen.append(node) - - for edge in graph[node]: - if edge not in resolved: - if edge in seen: - raise RuntimeError("Packages %s and %s have " \ - "a circular dependency in postinsts scripts." \ - % (node, edge)) - _dep_resolve(graph, edge, resolved, seen) - - resolved.append(node) - - pkg_list = [] - - pkgs = None - if not self.d.getVar('PACKAGE_INSTALL').strip(): - bb.note("Building empty image") - else: - pkgs = self._get_pkgs_postinsts(status_file) - if pkgs: - root = "__packagegroup_postinst__" - pkgs[root] = list(pkgs.keys()) - _dep_resolve(pkgs, root, pkg_list, []) - pkg_list.remove(root) - - if len(pkg_list) == 0: - return None - - return pkg_list - - def _save_postinsts_common(self, dst_postinst_dir, src_postinst_dir): - if bb.utils.contains("IMAGE_FEATURES", "package-management", - True, False, self.d): - return - num = 0 - for p in self._get_delayed_postinsts(): - bb.utils.mkdirhier(dst_postinst_dir) - - if os.path.exists(os.path.join(src_postinst_dir, p + ".postinst")): - shutil.copy(os.path.join(src_postinst_dir, p + ".postinst"), - os.path.join(dst_postinst_dir, "%03d-%s" % (num, p))) - - num += 1 - -class DpkgRootfs(DpkgOpkgRootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(DpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = '^E:' - self.log_check_expected_regexes = \ - [ - "^E: Unmet dependencies." - ] - - bb.utils.remove(self.image_rootfs, True) - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - self.manifest = DpkgManifest(d, manifest_dir) - self.pm = DpkgPM(d, d.getVar('IMAGE_ROOTFS'), - d.getVar('PACKAGE_ARCHS'), - d.getVar('DPKG_ARCH')) - - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - deb_pre_process_cmds = self.d.getVar('DEB_PREPROCESS_COMMANDS') - deb_post_process_cmds = self.d.getVar('DEB_POSTPROCESS_COMMANDS') - - alt_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/alternatives") - bb.utils.mkdirhier(alt_dir) - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, deb_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - # Don't support incremental, so skip that - self.progress_reporter.next_stage() - - self.pm.update() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - self.pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - self.pm.fix_broken_dependencies() - - if self.progress_reporter: - # Don't support attemptonly, so skip that - self.progress_reporter.next_stage() - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self._setup_dbg_rootfs(['/var/lib/dpkg']) - - self.pm.fix_broken_dependencies() - - self.pm.mark_packages("installed") - - self.pm.run_pre_post_installs() - - execute_pre_post_process(self.d, deb_post_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - @staticmethod - def _depends_list(): - return ['DEPLOY_DIR_DEB', 'DEB_SDK_ARCH', 'APTCONF_TARGET', 'APT_ARGS', 'DPKG_ARCH', 'DEB_PREPROCESS_COMMANDS', 'DEB_POSTPROCESS_COMMANDS'] - - def _get_delayed_postinsts(self): - status_file = self.image_rootfs + "/var/lib/dpkg/status" - return self._get_delayed_postinsts_common(status_file) - - def _save_postinsts(self): - dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/deb-postinsts") - src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}/var/lib/dpkg/info") - return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - pass - - -class OpkgRootfs(DpkgOpkgRootfs): - def __init__(self, d, manifest_dir, progress_reporter=None, logcatcher=None): - super(OpkgRootfs, self).__init__(d, progress_reporter, logcatcher) - self.log_check_regex = '(exit 1|Collected errors)' - - self.manifest = OpkgManifest(d, manifest_dir) - self.opkg_conf = self.d.getVar("IPKGCONF_TARGET") - self.pkg_archs = self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS") - - self.inc_opkg_image_gen = self.d.getVar('INC_IPK_IMAGE_GEN') or "" - if self._remove_old_rootfs(): - bb.utils.remove(self.image_rootfs, True) - self.pm = OpkgPM(d, - self.image_rootfs, - self.opkg_conf, - self.pkg_archs) - else: - self.pm = OpkgPM(d, - self.image_rootfs, - self.opkg_conf, - self.pkg_archs) - self.pm.recover_packaging_data() - - bb.utils.remove(self.d.getVar('MULTILIB_TEMP_ROOTFS'), True) - - def _prelink_file(self, root_dir, filename): - bb.note('prelink %s in %s' % (filename, root_dir)) - prelink_cfg = oe.path.join(root_dir, - self.d.expand('${sysconfdir}/prelink.conf')) - if not os.path.exists(prelink_cfg): - shutil.copy(self.d.expand('${STAGING_DIR_NATIVE}${sysconfdir_native}/prelink.conf'), - prelink_cfg) - - cmd_prelink = self.d.expand('${STAGING_DIR_NATIVE}${sbindir_native}/prelink') - self._exec_shell_cmd([cmd_prelink, - '--root', - root_dir, - '-amR', - '-N', - '-c', - self.d.expand('${sysconfdir}/prelink.conf')]) - - ''' - Compare two files with the same key twice to see if they are equal. - If they are not equal, it means they are duplicated and come from - different packages. - 1st: Comapre them directly; - 2nd: While incremental image creation is enabled, one of the - files could be probaly prelinked in the previous image - creation and the file has been changed, so we need to - prelink the other one and compare them. - ''' - def _file_equal(self, key, f1, f2): - - # Both of them are not prelinked - if filecmp.cmp(f1, f2): - return True - - if bb.data.inherits_class('image-prelink', self.d): - if self.image_rootfs not in f1: - self._prelink_file(f1.replace(key, ''), f1) - - if self.image_rootfs not in f2: - self._prelink_file(f2.replace(key, ''), f2) - - # Both of them are prelinked - if filecmp.cmp(f1, f2): - return True - - # Not equal - return False - - """ - This function was reused from the old implementation. - See commit: "image.bbclass: Added variables for multilib support." by - Lianhao Lu. - """ - def _multilib_sanity_test(self, dirs): - - allow_replace = self.d.getVar("MULTILIBRE_ALLOW_REP") - if allow_replace is None: - allow_replace = "" - - allow_rep = re.compile(re.sub(r"\|$", r"", allow_replace)) - error_prompt = "Multilib check error:" - - files = {} - for dir in dirs: - for root, subfolders, subfiles in os.walk(dir): - for file in subfiles: - item = os.path.join(root, file) - key = str(os.path.join("/", os.path.relpath(item, dir))) - - valid = True - if key in files: - #check whether the file is allow to replace - if allow_rep.match(key): - valid = True - else: - if os.path.exists(files[key]) and \ - os.path.exists(item) and \ - not self._file_equal(key, files[key], item): - valid = False - bb.fatal("%s duplicate files %s %s is not the same\n" % - (error_prompt, item, files[key])) - - #pass the check, add to list - if valid: - files[key] = item - - def _multilib_test_install(self, pkgs): - ml_temp = self.d.getVar("MULTILIB_TEMP_ROOTFS") - bb.utils.mkdirhier(ml_temp) - - dirs = [self.image_rootfs] - - for variant in self.d.getVar("MULTILIB_VARIANTS").split(): - ml_target_rootfs = os.path.join(ml_temp, variant) - - bb.utils.remove(ml_target_rootfs, True) - - ml_opkg_conf = os.path.join(ml_temp, - variant + "-" + os.path.basename(self.opkg_conf)) - - ml_pm = OpkgPM(self.d, ml_target_rootfs, ml_opkg_conf, self.pkg_archs, prepare_index=False) - - ml_pm.update() - ml_pm.install(pkgs) - - dirs.append(ml_target_rootfs) - - self._multilib_sanity_test(dirs) - - ''' - While ipk incremental image generation is enabled, it will remove the - unneeded pkgs by comparing the old full manifest in previous existing - image and the new full manifest in the current image. - ''' - def _remove_extra_packages(self, pkgs_initial_install): - if self.inc_opkg_image_gen == "1": - # Parse full manifest in previous existing image creation session - old_full_manifest = self.manifest.parse_full_manifest() - - # Create full manifest for the current image session, the old one - # will be replaced by the new one. - self.manifest.create_full(self.pm) - - # Parse full manifest in current image creation session - new_full_manifest = self.manifest.parse_full_manifest() - - pkg_to_remove = list() - for pkg in old_full_manifest: - if pkg not in new_full_manifest: - pkg_to_remove.append(pkg) - - if pkg_to_remove != []: - bb.note('decremental removed: %s' % ' '.join(pkg_to_remove)) - self.pm.remove(pkg_to_remove) - - ''' - Compare with previous existing image creation, if some conditions - triggered, the previous old image should be removed. - The conditions include any of 'PACKAGE_EXCLUDE, NO_RECOMMENDATIONS - and BAD_RECOMMENDATIONS' has been changed. - ''' - def _remove_old_rootfs(self): - if self.inc_opkg_image_gen != "1": - return True - - vars_list_file = self.d.expand('${T}/vars_list') - - old_vars_list = "" - if os.path.exists(vars_list_file): - old_vars_list = open(vars_list_file, 'r+').read() - - new_vars_list = '%s:%s:%s\n' % \ - ((self.d.getVar('BAD_RECOMMENDATIONS') or '').strip(), - (self.d.getVar('NO_RECOMMENDATIONS') or '').strip(), - (self.d.getVar('PACKAGE_EXCLUDE') or '').strip()) - open(vars_list_file, 'w+').write(new_vars_list) - - if old_vars_list != new_vars_list: - return True - - return False - - def _create(self): - pkgs_to_install = self.manifest.parse_initial_manifest() - opkg_pre_process_cmds = self.d.getVar('OPKG_PREPROCESS_COMMANDS') - opkg_post_process_cmds = self.d.getVar('OPKG_POSTPROCESS_COMMANDS') - - # update PM index files - self.pm.write_index() - - execute_pre_post_process(self.d, opkg_pre_process_cmds) - - if self.progress_reporter: - self.progress_reporter.next_stage() - # Steps are a bit different in order, skip next - self.progress_reporter.next_stage() - - self.pm.update() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - if self.inc_opkg_image_gen == "1": - self._remove_extra_packages(pkgs_to_install) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - # For multilib, we perform a sanity test before final install - # If sanity test fails, it will automatically do a bb.fatal() - # and the installation will stop - if pkg_type == Manifest.PKG_TYPE_MULTILIB: - self._multilib_test_install(pkgs_to_install[pkg_type]) - - self.pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - if self.progress_reporter: - self.progress_reporter.next_stage() - - self.pm.install_complementary() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - opkg_lib_dir = self.d.getVar('OPKGLIBDIR') - opkg_dir = os.path.join(opkg_lib_dir, 'opkg') - self._setup_dbg_rootfs([opkg_dir]) - - execute_pre_post_process(self.d, opkg_post_process_cmds) - - if self.inc_opkg_image_gen == "1": - self.pm.backup_packaging_data() - - if self.progress_reporter: - self.progress_reporter.next_stage() - - @staticmethod - def _depends_list(): - return ['IPKGCONF_SDK', 'IPK_FEED_URIS', 'DEPLOY_DIR_IPK', 'IPKGCONF_TARGET', 'INC_IPK_IMAGE_GEN', 'OPKG_ARGS', 'OPKGLIBDIR', 'OPKG_PREPROCESS_COMMANDS', 'OPKG_POSTPROCESS_COMMANDS', 'OPKGLIBDIR'] - - def _get_delayed_postinsts(self): - status_file = os.path.join(self.image_rootfs, - self.d.getVar('OPKGLIBDIR').strip('/'), - "opkg", "status") - return self._get_delayed_postinsts_common(status_file) - - def _save_postinsts(self): - dst_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${sysconfdir}/ipk-postinsts") - src_postinst_dir = self.d.expand("${IMAGE_ROOTFS}${OPKGLIBDIR}/opkg/info") - return self._save_postinsts_common(dst_postinst_dir, src_postinst_dir) - - def _log_check(self): - self._log_check_warn() - self._log_check_error() - - def _cleanup(self): - self.pm.remove_lists() - def get_class_for_type(imgtype): + from oe.package_manager.rpm.rootfs import RpmRootfs + from oe.package_manager.ipk.rootfs import OpkgRootfs + from oe.package_manager.deb.rootfs import DpkgRootfs return {"rpm": RpmRootfs, "ipk": OpkgRootfs, "deb": DpkgRootfs}[imgtype] @@ -970,6 +373,9 @@ def variable_depends(d, manifest_dir=None): def create_rootfs(d, manifest_dir=None, progress_reporter=None, logcatcher=None): env_bkp = os.environ.copy() + from oe.package_manager.rpm.rootfs import RpmRootfs + from oe.package_manager.ipk.rootfs import OpkgRootfs + from oe.package_manager.deb.rootfs import DpkgRootfs img_type = d.getVar('IMAGE_PKGTYPE') if img_type == "rpm": RpmRootfs(d, manifest_dir, progress_reporter, logcatcher).create() diff --git a/poky/meta/lib/oe/sdk.py b/poky/meta/lib/oe/sdk.py index d02a27481..fdcadcb8d 100644 --- a/poky/meta/lib/oe/sdk.py +++ b/poky/meta/lib/oe/sdk.py @@ -7,8 +7,6 @@ from oe.utils import execute_pre_post_process from oe.manifest import * from oe.package_manager import * import os -import shutil -import glob import traceback class Sdk(object, metaclass=ABCMeta): @@ -110,283 +108,6 @@ class Sdk(object, metaclass=ABCMeta): pass -class RpmSdk(Sdk): - def __init__(self, d, manifest_dir=None, rpm_workdir="oe-sdk-repo"): - super(RpmSdk, self).__init__(d, manifest_dir) - - self.target_manifest = RpmManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = RpmManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - rpm_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - rpm_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = RpmPM(d, - self.sdk_target_sysroot, - self.d.getVar('TARGET_VENDOR'), - 'target', - rpm_repo_workdir=rpm_repo_workdir - ) - - self.host_pm = RpmPM(d, - self.sdk_host_sysroot, - self.d.getVar('SDK_VENDOR'), - 'host', - "SDK_PACKAGE_ARCHS", - "SDK_OS", - rpm_repo_workdir=rpm_repo_workdir - ) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - pm.create_configs() - pm.write_index() - pm.update() - - pkgs = [] - pkgs_attempt = [] - for pkg_type in pkgs_to_install: - if pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY: - pkgs_attempt += pkgs_to_install[pkg_type] - else: - pkgs += pkgs_to_install[pkg_type] - - pm.install(pkgs) - - pm.install(pkgs_attempt, True) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - # Move host RPM library data - native_rpm_state_dir = os.path.join(self.sdk_output, - self.sdk_native_path, - self.d.getVar('localstatedir_nativesdk').strip('/'), - "lib", - "rpm" - ) - self.mkdirhier(native_rpm_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, - "var", - "lib", - "rpm", - "*")): - self.movefile(f, native_rpm_state_dir) - - self.remove(os.path.join(self.sdk_output, "var"), True) - - # Move host sysconfig data - native_sysconf_dir = os.path.join(self.sdk_output, - self.sdk_native_path, - self.d.getVar('sysconfdir', - True).strip('/'), - ) - self.mkdirhier(native_sysconf_dir) - for f in glob.glob(os.path.join(self.sdk_output, "etc", "rpm*")): - self.movefile(f, native_sysconf_dir) - for f in glob.glob(os.path.join(self.sdk_output, "etc", "dnf", "*")): - self.movefile(f, native_sysconf_dir) - self.remove(os.path.join(self.sdk_output, "etc"), True) - - -class OpkgSdk(Sdk): - def __init__(self, d, manifest_dir=None): - super(OpkgSdk, self).__init__(d, manifest_dir) - - self.target_conf = self.d.getVar("IPKGCONF_TARGET") - self.host_conf = self.d.getVar("IPKGCONF_SDK") - - self.target_manifest = OpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = OpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - ipk_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - ipk_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = OpkgPM(d, self.sdk_target_sysroot, self.target_conf, - self.d.getVar("ALL_MULTILIB_PACKAGE_ARCHS"), - ipk_repo_workdir=ipk_repo_workdir) - - self.host_pm = OpkgPM(d, self.sdk_host_sysroot, self.host_conf, - self.d.getVar("SDK_PACKAGE_ARCHS"), - ipk_repo_workdir=ipk_repo_workdir) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - if (self.d.getVar('BUILD_IMAGES_FROM_FEEDS') or "") != "1": - pm.write_index() - - pm.update() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - target_sysconfdir = os.path.join(self.sdk_target_sysroot, self.sysconfdir) - host_sysconfdir = os.path.join(self.sdk_host_sysroot, self.sysconfdir) - - self.mkdirhier(target_sysconfdir) - shutil.copy(self.target_conf, target_sysconfdir) - os.chmod(os.path.join(target_sysconfdir, - os.path.basename(self.target_conf)), 0o644) - - self.mkdirhier(host_sysconfdir) - shutil.copy(self.host_conf, host_sysconfdir) - os.chmod(os.path.join(host_sysconfdir, - os.path.basename(self.host_conf)), 0o644) - - native_opkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, - self.d.getVar('localstatedir_nativesdk').strip('/'), - "lib", "opkg") - self.mkdirhier(native_opkg_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "opkg", "*")): - self.movefile(f, native_opkg_state_dir) - - self.remove(os.path.join(self.sdk_output, "var"), True) - - -class DpkgSdk(Sdk): - def __init__(self, d, manifest_dir=None): - super(DpkgSdk, self).__init__(d, manifest_dir) - - self.target_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt") - self.host_conf_dir = os.path.join(self.d.getVar("APTCONF_TARGET"), "apt-sdk") - - self.target_manifest = DpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_TARGET) - self.host_manifest = DpkgManifest(d, self.manifest_dir, - Manifest.MANIFEST_TYPE_SDK_HOST) - - deb_repo_workdir = "oe-sdk-repo" - if "sdk_ext" in d.getVar("BB_RUNTASK"): - deb_repo_workdir = "oe-sdk-ext-repo" - - self.target_pm = DpkgPM(d, self.sdk_target_sysroot, - self.d.getVar("PACKAGE_ARCHS"), - self.d.getVar("DPKG_ARCH"), - self.target_conf_dir, - deb_repo_workdir=deb_repo_workdir) - - self.host_pm = DpkgPM(d, self.sdk_host_sysroot, - self.d.getVar("SDK_PACKAGE_ARCHS"), - self.d.getVar("DEB_SDK_ARCH"), - self.host_conf_dir, - deb_repo_workdir=deb_repo_workdir) - - def _copy_apt_dir_to(self, dst_dir): - staging_etcdir_native = self.d.getVar("STAGING_ETCDIR_NATIVE") - - self.remove(dst_dir, True) - - shutil.copytree(os.path.join(staging_etcdir_native, "apt"), dst_dir) - - def _populate_sysroot(self, pm, manifest): - pkgs_to_install = manifest.parse_initial_manifest() - - pm.write_index() - pm.update() - - for pkg_type in self.install_order: - if pkg_type in pkgs_to_install: - pm.install(pkgs_to_install[pkg_type], - [False, True][pkg_type == Manifest.PKG_TYPE_ATTEMPT_ONLY]) - - def _populate(self): - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_PRE_TARGET_COMMAND")) - - bb.note("Installing TARGET packages") - self._populate_sysroot(self.target_pm, self.target_manifest) - - self.target_pm.install_complementary(self.d.getVar('SDKIMAGE_INSTALL_COMPLEMENTARY')) - - self.target_pm.run_intercepts(populate_sdk='target') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_TARGET_COMMAND")) - - self._copy_apt_dir_to(os.path.join(self.sdk_target_sysroot, "etc", "apt")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.target_pm.remove_packaging_data() - - bb.note("Installing NATIVESDK packages") - self._populate_sysroot(self.host_pm, self.host_manifest) - self.install_locales(self.host_pm) - - self.host_pm.run_intercepts(populate_sdk='host') - - execute_pre_post_process(self.d, self.d.getVar("POPULATE_SDK_POST_HOST_COMMAND")) - - self._copy_apt_dir_to(os.path.join(self.sdk_output, self.sdk_native_path, - "etc", "apt")) - - if not bb.utils.contains("SDKIMAGE_FEATURES", "package-management", True, False, self.d): - self.host_pm.remove_packaging_data() - - native_dpkg_state_dir = os.path.join(self.sdk_output, self.sdk_native_path, - "var", "lib", "dpkg") - self.mkdirhier(native_dpkg_state_dir) - for f in glob.glob(os.path.join(self.sdk_output, "var", "lib", "dpkg", "*")): - self.movefile(f, native_dpkg_state_dir) - self.remove(os.path.join(self.sdk_output, "var"), True) - - - def sdk_list_installed_packages(d, target, rootfs_dir=None): if rootfs_dir is None: sdk_output = d.getVar('SDK_OUTPUT') @@ -394,6 +115,9 @@ def sdk_list_installed_packages(d, target, rootfs_dir=None): rootfs_dir = [sdk_output, os.path.join(sdk_output, target_path)][target is True] + from oe.package_manager.rpm import RpmPkgsList + from oe.package_manager.ipk import OpkgPkgsList + from oe.package_manager.deb import DpkgPkgsList img_type = d.getVar('IMAGE_PKGTYPE') if img_type == "rpm": arch_var = ["SDK_PACKAGE_ARCHS", None][target is True] @@ -409,6 +133,9 @@ def populate_sdk(d, manifest_dir=None): env_bkp = os.environ.copy() img_type = d.getVar('IMAGE_PKGTYPE') + from oe.package_manager.rpm.sdk import RpmSdk + from oe.package_manager.ipk.sdk import OpkgSdk + from oe.package_manager.deb.sdk import DpkgSdk if img_type == "rpm": RpmSdk(d, manifest_dir).populate() elif img_type == "ipk": diff --git a/poky/meta/lib/oeqa/core/runner.py b/poky/meta/lib/oeqa/core/runner.py index 00b7d0bb1..d50690ab3 100644 --- a/poky/meta/lib/oeqa/core/runner.py +++ b/poky/meta/lib/oeqa/core/runner.py @@ -195,6 +195,20 @@ class OETestResult(_TestResult): report['log'] = log if duration: report['duration'] = duration + + alltags = [] + # pull tags from the case class + if hasattr(case, "__oeqa_testtags"): + alltags.extend(getattr(case, "__oeqa_testtags")) + # pull tags from the method itself + test_name = case._testMethodName + if hasattr(case, test_name): + method = getattr(case, test_name) + if hasattr(method, "__oeqa_testtags"): + alltags.extend(getattr(method, "__oeqa_testtags")) + if alltags: + report['oetags'] = alltags + if dump_streams and case.id() in self.logged_output: (stdout, stderr) = self.logged_output[case.id()] report['stdout'] = stdout diff --git a/poky/meta/lib/oeqa/core/target/ssh.py b/poky/meta/lib/oeqa/core/target/ssh.py index 090b40a81..aefb57680 100644 --- a/poky/meta/lib/oeqa/core/target/ssh.py +++ b/poky/meta/lib/oeqa/core/target/ssh.py @@ -107,13 +107,16 @@ class OESSHTarget(OETarget): scpCmd = self.scp + [localSrc, remotePath] return self._run(scpCmd, ignore_status=False) - def copyFrom(self, remoteSrc, localDst): + def copyFrom(self, remoteSrc, localDst, warn_on_failure=False): """ Copy file from target. """ remotePath = '%s@%s:%s' % (self.user, self.ip, remoteSrc) scpCmd = self.scp + [remotePath, localDst] - return self._run(scpCmd, ignore_status=False) + (status, output) = self._run(scpCmd, ignore_status=warn_on_failure) + if warn_on_failure and status: + self.logger.warning("Copy returned non-zero exit status %d:\n%s" % (status, output)) + return (status, output) def copyDirTo(self, localSrc, remoteDst): """ diff --git a/poky/meta/lib/oeqa/manual/bsp-hw.json b/poky/meta/lib/oeqa/manual/bsp-hw.json index a9bc7d450..75b89758c 100644 --- a/poky/meta/lib/oeqa/manual/bsp-hw.json +++ b/poky/meta/lib/oeqa/manual/bsp-hw.json @@ -123,28 +123,6 @@ "summary": "boot_from_runlevel_5" } }, - { - "test": { - "@alias": "bsps-hw.bsps-hw.shutdown_system", - "author": [ - { - "email": "alexandru.c.georgescu@intel.com", - "name": "alexandru.c.georgescu@intel.com" - } - ], - "execution": { - "1": { - "action": "boot system", - "expected_results": "" - }, - "2": { - "action": "launch terminal and run \"shutdown -h now\" or \"poweroff\"", - "expected_results": "System can be shutdown successfully . " - } - }, - "summary": "shutdown_system" - } - }, { "test": { "@alias": "bsps-hw.bsps-hw.switch_among_multi_applications_and_desktop", @@ -261,28 +239,6 @@ "summary": "connman_offline_mode_in_connman-gnome" } }, - { - "test": { - "@alias": "bsps-hw.bsps-hw.X_server_can_start_up_with_runlevel_5_boot", - "author": [ - { - "email": "alexandru.c.georgescu@intel.com", - "name": "alexandru.c.georgescu@intel.com" - } - ], - "execution": { - "1": { - "action": "boot up system with default runlevel \n\n", - "expected_results": "X server can start up well and desktop display has no problem . \n\n" - }, - "2": { - "action": "type runlevel at command prompt", - "expected_results": "Output:N 5" - } - }, - "summary": "X_server_can_start_up_with_runlevel_5_boot" - } - }, { "test": { "@alias": "bsps-hw.bsps-hw.standby", diff --git a/poky/meta/lib/oeqa/runtime/cases/buildcpio.py b/poky/meta/lib/oeqa/runtime/cases/buildcpio.py index d0f91668b..e29bf16cc 100644 --- a/poky/meta/lib/oeqa/runtime/cases/buildcpio.py +++ b/poky/meta/lib/oeqa/runtime/cases/buildcpio.py @@ -27,6 +27,7 @@ class BuildCpioTest(OERuntimeTestCase): @OEHasPackage(['autoconf']) def test_cpio(self): self.project.download_archive() - self.project.run_configure('--disable-maintainer-mode','') + self.project.run_configure('--disable-maintainer-mode', + 'sed -i -e "/char \*program_name/d" src/global.c;') self.project.run_make() self.project.run_install() diff --git a/poky/meta/lib/oeqa/runtime/cases/ltp.py b/poky/meta/lib/oeqa/runtime/cases/ltp.py index 6dc5ef22a..a66d5d13d 100644 --- a/poky/meta/lib/oeqa/runtime/cases/ltp.py +++ b/poky/meta/lib/oeqa/runtime/cases/ltp.py @@ -78,9 +78,10 @@ class LtpTest(LtpTestBase): # copy nice log from DUT dst = os.path.join(self.ltptest_log_dir, "%s" % ltp_group ) remote_src = "/opt/ltp/results/%s" % ltp_group - (status, output) = self.target.copyFrom(remote_src, dst) + (status, output) = self.target.copyFrom(remote_src, dst, True) msg = 'File could not be copied. Output: %s' % output - self.assertEqual(status, 0, msg=msg) + if status: + self.target.logger.warning(msg) parser = LtpParser() results, sections = parser.parse(dst) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildcpio.py b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py index 902e93f62..e56582654 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildcpio.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py @@ -28,6 +28,7 @@ class BuildCpioTest(OESDKTestCase): self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) + self._run("sed -i -e '/char.*program_name/d' {source}/src/global.c".format(**dirs)) self._run("cd {build} && {source}/configure --disable-maintainer-mode $CONFIGURE_FLAGS".format(**dirs)) self._run("cd {build} && make -j".format(**dirs)) self._run("cd {build} && make install DESTDIR={install}".format(**dirs)) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py index bbaa5c55c..1121ed20e 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py @@ -35,7 +35,7 @@ class GalculatorTest(OESDKTestCase): self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) - self._run("cd {source} && autoreconf -i -f -I $OECORE_TARGET_SYSROOT/usr/share/aclocal -I m4".format(**dirs)) + self._run("cd {source} && sed -i -e '/s_preferences.*prefs;/d' src/main.c && autoreconf -i -f -I $OECORE_TARGET_SYSROOT/usr/share/aclocal -I m4".format(**dirs)) self._run("cd {build} && {source}/configure $CONFIGURE_FLAGS".format(**dirs)) self._run("cd {build} && make -j".format(**dirs)) self._run("cd {build} && make install DESTDIR={install}".format(**dirs)) diff --git a/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py index dea519e6d..f7a253374 100644 --- a/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py +++ b/poky/meta/lib/oeqa/selftest/cases/imagefeatures.py @@ -263,6 +263,80 @@ PNBLACKLIST[busybox] = "Don't build this" bitbake("--graphviz core-image-sato") + def test_fit_image(self): + """ + Summary: Check if FIT image and Image Tree Source (its) are built + and the Image Tree Source has the correct fields. + Expected: 1. fitImage and fitImage-its can be built + 2. The type, load address, entrypoint address and + default values of kernel and ramdisk are as expected + in the Image Tree Source. Not all the fields are tested, + only the key fields that wont vary between different + architectures. + Product: oe-core + Author: Usama Arif + """ + config = """ +# Enable creation of fitImage +KERNEL_IMAGETYPE = "Image" +KERNEL_IMAGETYPES += " fitImage " +KERNEL_CLASSES = " kernel-fitimage " + +# RAM disk variables including load address and entrypoint for kernel and RAM disk +IMAGE_FSTYPES += "cpio.gz" +INITRAMFS_IMAGE = "core-image-minimal" +UBOOT_RD_LOADADDRESS = "0x88000000" +UBOOT_RD_ENTRYPOINT = "0x88000000" +UBOOT_LOADADDRESS = "0x80080000" +UBOOT_ENTRYPOINT = "0x80080000" +""" + self.write_config(config) + + # fitImage is created as part of linux recipe + bitbake("virtual/kernel") + + image_type = "core-image-minimal" + deploy_dir_image = get_bb_var('DEPLOY_DIR_IMAGE') + machine = get_bb_var('MACHINE') + fitimage_its_path = os.path.join(deploy_dir_image, + "fitImage-its-%s-%s-%s" % (image_type, machine, machine)) + fitimage_path = os.path.join(deploy_dir_image, + "fitImage-%s-%s-%s" % (image_type, machine, machine)) + + self.assertTrue(os.path.exists(fitimage_its_path), + "%s image tree source doesn't exist" % (fitimage_its_path)) + self.assertTrue(os.path.exists(fitimage_path), + "%s FIT image doesn't exist" % (fitimage_path)) + + # Check that the type, load address, entrypoint address and default + # values for kernel and ramdisk in Image Tree Source are as expected. + # The order of fields in the below array is important. Not all the + # fields are tested, only the key fields that wont vary between + # different architectures. + its_field_check = ['type = "kernel";', + 'load = <0x80080000>;', + 'entry = <0x80080000>;', + 'type = "ramdisk";', + 'load = <0x88000000>;', + 'entry = <0x88000000>;', + 'default = "conf@1";', + 'kernel = "kernel@1";', + 'ramdisk = "ramdisk@1";' + ] + + with open(fitimage_its_path) as its_file: + field_index = 0 + for line in its_file: + if field_index == len(its_field_check): + break + if its_field_check[field_index] in line: + field_index +=1 + + if field_index != len(its_field_check): # if its equal, the test passed + self.assertTrue(field_index == len(its_field_check), + "Fields in Image Tree Source File %s did not match, error in finding %s" + % (fitimage_its_path, its_field_check[field_index])) + def test_image_gen_debugfs(self): """ Summary: Check debugfs generation diff --git a/poky/meta/lib/oeqa/selftest/cases/tinfoil.py b/poky/meta/lib/oeqa/selftest/cases/tinfoil.py index d1aa7b9af..206168ed0 100644 --- a/poky/meta/lib/oeqa/selftest/cases/tinfoil.py +++ b/poky/meta/lib/oeqa/selftest/cases/tinfoil.py @@ -100,8 +100,9 @@ class TinfoilTests(OESelftestTestCase): eventreceived = False commandcomplete = False start = time.time() - # Wait for 5s in total so we'd detect spurious heartbeat events for example - while time.time() - start < 5: + # Wait for 10s in total so we'd detect spurious heartbeat events for example + # The test is IO load sensitive too + while time.time() - start < 10: event = tinfoil.wait_event(1) if event: if isinstance(event, bb.command.CommandCompleted): diff --git a/poky/meta/lib/oeqa/utils/package_manager.py b/poky/meta/lib/oeqa/utils/package_manager.py index 2d358f717..362329929 100644 --- a/poky/meta/lib/oeqa/utils/package_manager.py +++ b/poky/meta/lib/oeqa/utils/package_manager.py @@ -12,7 +12,9 @@ def get_package_manager(d, root_path): """ Returns an OE package manager that can install packages in root_path. """ - from oe.package_manager import RpmPM, OpkgPM, DpkgPM + from oe.package_manager.rpm import RpmPM + from oe.package_manager.ipk import OpkgPM + from oe.package_manager.deb import DpkgPM pkg_class = d.getVar("IMAGE_PKGTYPE") if pkg_class == "rpm": diff --git a/poky/meta/recipes-bsp/grub/grub2.inc b/poky/meta/recipes-bsp/grub/grub2.inc index 2024e1378..628ca6492 100644 --- a/poky/meta/recipes-bsp/grub/grub2.inc +++ b/poky/meta/recipes-bsp/grub/grub2.inc @@ -11,6 +11,8 @@ SECTION = "bootloaders" LICENSE = "GPLv3" LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" +CVE_PRODUCT = "grub2" + SRC_URI = "${GNU_MIRROR}/grub/grub-${PV}.tar.gz \ file://0001-Disable-mfpmath-sse-as-well-when-SSE-is-disabled.patch \ file://autogen.sh-exclude-pc.patch \ diff --git a/poky/meta/recipes-bsp/libacpi/files/0001-libacpi-Fix-build-witth-fno-commom.patch b/poky/meta/recipes-bsp/libacpi/files/0001-libacpi-Fix-build-witth-fno-commom.patch new file mode 100644 index 000000000..32808fb92 --- /dev/null +++ b/poky/meta/recipes-bsp/libacpi/files/0001-libacpi-Fix-build-witth-fno-commom.patch @@ -0,0 +1,68 @@ +From 9839c169f513e65fc711646257b3e8588cce623c Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 5 Aug 2020 12:06:01 -0700 +Subject: [PATCH] libacpi: Fix build witth -fno-commom + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + libacpi.c | 16 ++++++++++++++++ + libacpi.h | 6 +++--- + 2 files changed, 19 insertions(+), 3 deletions(-) + +diff --git a/libacpi.c b/libacpi.c +index 4e34725..9344ea4 100644 +--- a/libacpi.c ++++ b/libacpi.c +@@ -14,6 +14,22 @@ + #include "libacpi.h" + #include "list.h" + ++/** ++ * Array for existing batteries, loop until ++ * globals->battery_count ++ */ ++battery_t batteries[MAX_ITEMS]; ++/** ++ * Array for existing thermal zones, loop until ++ * globals->thermal_count ++ */ ++thermal_t thermals[MAX_ITEMS]; ++/** ++ * Array for existing fans, loop until ++ * globals->fan_count ++ */ ++fan_t fans[MAX_ITEMS]; ++ + static int read_acpi_battinfo(const int num); + static int read_acpi_battalarm(const int num); + static int read_acpi_battstate(const int num); +diff --git a/libacpi.h b/libacpi.h +index 9334b79..5242d3d 100644 +--- a/libacpi.h ++++ b/libacpi.h +@@ -183,17 +183,17 @@ typedef struct { + * Array for existing batteries, loop until + * globals->battery_count + */ +-battery_t batteries[MAX_ITEMS]; ++extern battery_t batteries[MAX_ITEMS]; + /** + * Array for existing thermal zones, loop until + * globals->thermal_count + */ +-thermal_t thermals[MAX_ITEMS]; ++extern thermal_t thermals[MAX_ITEMS]; + /** + * Array for existing fans, loop until + * globals->fan_count + */ +-fan_t fans[MAX_ITEMS]; ++extern fan_t fans[MAX_ITEMS]; + /** + * Finds existing batteries and fills the + * corresponding batteries structures with the paths +-- +2.28.0 + diff --git a/poky/meta/recipes-bsp/libacpi/libacpi_0.2.bb b/poky/meta/recipes-bsp/libacpi/libacpi_0.2.bb index 5e7f7fa8e..fa9e3d489 100644 --- a/poky/meta/recipes-bsp/libacpi/libacpi_0.2.bb +++ b/poky/meta/recipes-bsp/libacpi/libacpi_0.2.bb @@ -11,7 +11,9 @@ SRC_URI = "http://www.ngolde.de/download/libacpi-${PV}.tar.gz \ file://makefile-fix.patch \ file://libacpi_fix_for_x32.patch \ file://use_correct_strip_in_cross_environment.patch \ - file://ldflags.patch " + file://ldflags.patch \ + file://0001-libacpi-Fix-build-witth-fno-commom.patch \ + " SRC_URI[md5sum] = "05b53dd7bead66dda35fec502b91066c" SRC_URI[sha256sum] = "13086e31d428b9c125954d48ac497b754bbbce2ef34ea29ecd903e82e25bad29" diff --git a/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb b/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb deleted file mode 100644 index 3820ba262..000000000 --- a/poky/meta/recipes-bsp/u-boot/libubootenv_0.2.bb +++ /dev/null @@ -1,27 +0,0 @@ -SUMMARY = "U-Boot libraries and tools to access environment" - -DESCRIPTION = "This package contains tools and libraries to read \ -and modify U-Boot environment. \ -It provides a hardware-independent replacement for fw_printenv/setenv utilities \ -provided by U-Boot" - -HOMEPAGE = "https://github.com/sbabic/libubootenv" -LICENSE = "LGPL-2.1" -LIC_FILES_CHKSUM = "file://Licenses/lgpl-2.1.txt;md5=4fbd65380cdd255951079008b364516c" -SECTION = "libs" - -PV = "0.2+git${SRCPV}" -SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https" -SRCREV = "86bd30a14e153a18f670b25708795253d8736f0f" - -S = "${WORKDIR}/git" - -inherit cmake lib_package - -EXTRA_OECMAKE = "-DCMAKE_BUILD_TYPE=Release" - -DEPENDS = "zlib" -PROVIDES += "u-boot-fw-utils" -RPROVIDES_${PN}-bin += "u-boot-fw-utils" - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb b/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb new file mode 100644 index 000000000..47e64f911 --- /dev/null +++ b/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb @@ -0,0 +1,30 @@ +SUMMARY = "U-Boot libraries and tools to access environment" + +DESCRIPTION = "This package contains tools and libraries to read \ +and modify U-Boot environment. \ +It provides a hardware-independent replacement for fw_printenv/setenv utilities \ +provided by U-Boot" + +HOMEPAGE = "https://github.com/sbabic/libubootenv" +LICENSE = "LGPL-2.1" +LIC_FILES_CHKSUM = "file://Licenses/lgpl-2.1.txt;md5=4fbd65380cdd255951079008b364516c" +SECTION = "libs" + +SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https" +SRCREV = "ad253cfdb07c8492f2ee46a52fbc607ad0b96414" + +S = "${WORKDIR}/git" + +inherit cmake lib_package + +EXTRA_OECMAKE = "-DCMAKE_BUILD_TYPE=Release" + +DEPENDS = "zlib" +PROVIDES += "u-boot-fw-utils" +RPROVIDES_${PN}-bin += "u-boot-fw-utils" + +PACKAGE_ARCH = "${MACHINE_ARCH}" + +RRECOMMENDS_${PN}-bin_append_class-target = " u-boot-default-env" + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb b/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb index b56a20482..cf4af8260 100644 --- a/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb +++ b/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb @@ -19,3 +19,5 @@ LDFLAGS_append = " -pthread" PACKAGECONFIG ?= "" PACKAGECONFIG[bind-httpstats] = "--with-libxml2,--without-libxml2,libxml2" + +CFLAGS += "-fcommon" diff --git a/poky/meta/recipes-connectivity/inetutils/inetutils/0001-ftpd-telnetd-Fix-multiple-definitions-of-errcatch-an.patch b/poky/meta/recipes-connectivity/inetutils/inetutils/0001-ftpd-telnetd-Fix-multiple-definitions-of-errcatch-an.patch new file mode 100644 index 000000000..49d319f59 --- /dev/null +++ b/poky/meta/recipes-connectivity/inetutils/inetutils/0001-ftpd-telnetd-Fix-multiple-definitions-of-errcatch-an.patch @@ -0,0 +1,58 @@ +From 7d39930468e272c740b0eed3c7e5b7fb3abf29e8 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 5 Aug 2020 10:36:22 -0700 +Subject: [PATCH] ftpd,telnetd: Fix multiple definitions of errcatch and not42 + +This helps fix build failures when -fno-common option is used + +Upstream-Status: Pending +Signed-off-by: Khem Raj + +Signed-off-by: Khem Raj +--- + ftpd/extern.h | 2 +- + ftpd/ftpcmd.c | 1 + + telnetd/utility.c | 2 +- + 3 files changed, 3 insertions(+), 2 deletions(-) + +diff --git a/ftpd/extern.h b/ftpd/extern.h +index ab33cf3..91dbbee 100644 +--- a/ftpd/extern.h ++++ b/ftpd/extern.h +@@ -90,7 +90,7 @@ extern void user (const char *); + extern char *sgetsave (const char *); + + /* Exported from ftpd.c. */ +-jmp_buf errcatch; ++extern jmp_buf errcatch; + extern struct sockaddr_storage data_dest; + extern socklen_t data_dest_len; + extern struct sockaddr_storage his_addr; +diff --git a/ftpd/ftpcmd.c b/ftpd/ftpcmd.c +index beb1f06..d272e9d 100644 +--- a/ftpd/ftpcmd.c ++++ b/ftpd/ftpcmd.c +@@ -106,6 +106,7 @@ + #endif + + off_t restart_point; ++jmp_buf errcatch; + + static char cbuf[512]; /* Command Buffer. */ + static char *fromname; +diff --git a/telnetd/utility.c b/telnetd/utility.c +index e7ffb8e..46bf91e 100644 +--- a/telnetd/utility.c ++++ b/telnetd/utility.c +@@ -63,7 +63,7 @@ static int ncc; + static char ptyibuf[BUFSIZ], *ptyip; + static int pcc; + +-int not42; ++extern int not42; + + static int + readstream (int p, char *ibuf, int bufsize) +-- +2.28.0 + diff --git a/poky/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb b/poky/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb index cc9410b94..adf6d4414 100644 --- a/poky/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb +++ b/poky/meta/recipes-connectivity/inetutils/inetutils_1.9.4.bb @@ -23,6 +23,7 @@ SRC_URI = "${GNU_MIRROR}/inetutils/inetutils-${PV}.tar.gz \ file://inetutils-only-check-pam_appl.h-when-pam-enabled.patch \ file://0001-rcp-fix-to-work-with-large-files.patch \ file://fix-buffer-fortify-tfpt.patch \ + file://0001-ftpd-telnetd-Fix-multiple-definitions-of-errcatch-an.patch \ " SRC_URI[md5sum] = "04852c26c47cc8c6b825f2b74f191f52" diff --git a/poky/meta/recipes-connectivity/iproute2/iproute2/0001-devlink.c-add-missing-include.patch b/poky/meta/recipes-connectivity/iproute2/iproute2/0001-devlink.c-add-missing-include.patch index f9580b7d4..fdd8bbfb3 100644 --- a/poky/meta/recipes-connectivity/iproute2/iproute2/0001-devlink.c-add-missing-include.patch +++ b/poky/meta/recipes-connectivity/iproute2/iproute2/0001-devlink.c-add-missing-include.patch @@ -1,23 +1,24 @@ -From 5df629d825df4ccc4283228bc0739da126326072 Mon Sep 17 00:00:00 2001 +From ce39396d4617874323f6039a5b476e44bf552908 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Sat, 6 Jun 2020 18:00:13 +0000 Subject: [PATCH] devlink.c: add missing include Upstream-Status: Pending Signed-off-by: Alexander Kanavin + --- devlink/devlink.c | 1 + 1 file changed, 1 insertion(+) diff --git a/devlink/devlink.c b/devlink/devlink.c -index 16602ab..c278e6d 100644 +index 0982fae..93dc01c 100644 --- a/devlink/devlink.c +++ b/devlink/devlink.c -@@ -26,6 +26,7 @@ - #include - #include +@@ -33,6 +33,7 @@ + #include + #include #include +#include - #include "SNAPSHOT.h" + #include "version.h" #include "list.h" diff --git a/poky/meta/recipes-connectivity/iproute2/iproute2/0001-libc-compat.h-add-musl-workaround.patch b/poky/meta/recipes-connectivity/iproute2/iproute2/0001-libc-compat.h-add-musl-workaround.patch index 50c4bfb0f..74e3de1ce 100644 --- a/poky/meta/recipes-connectivity/iproute2/iproute2/0001-libc-compat.h-add-musl-workaround.patch +++ b/poky/meta/recipes-connectivity/iproute2/iproute2/0001-libc-compat.h-add-musl-workaround.patch @@ -1,4 +1,4 @@ -From b7d96340c55afb7023ded0041107c63dbd886196 Mon Sep 17 00:00:00 2001 +From c25f8d1f7a6203dfeb10b39f80ffd314bb84a58d Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Thu, 22 Dec 2016 15:26:30 +0200 Subject: [PATCH] libc-compat.h: add musl workaround @@ -14,15 +14,16 @@ https://git.buildroot.net/buildroot/tree/package/iproute2/0001-Add-the-musl-work Signed-off-by: Baruch Siach Signed-off-by: Maxin B. John + --- include/uapi/linux/libc-compat.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h -index f38571d..30f0b67 100644 +index a159991..22198fa 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h -@@ -49,10 +49,12 @@ +@@ -50,10 +50,12 @@ #define _LIBC_COMPAT_H /* We have included glibc headers... */ @@ -36,6 +37,3 @@ index f38571d..30f0b67 100644 /* GLIBC headers included first so don't define anything * that would already be defined. */ --- -2.4.0 - diff --git a/poky/meta/recipes-connectivity/iproute2/iproute2_5.7.0.bb b/poky/meta/recipes-connectivity/iproute2/iproute2_5.7.0.bb deleted file mode 100644 index 276afeab1..000000000 --- a/poky/meta/recipes-connectivity/iproute2/iproute2_5.7.0.bb +++ /dev/null @@ -1,12 +0,0 @@ -require iproute2.inc - -SRC_URI = "${KERNELORG_MIRROR}/linux/utils/net/${BPN}/${BP}.tar.xz \ - file://0001-libc-compat.h-add-musl-workaround.patch \ - file://0001-devlink.c-add-missing-include.patch \ - " - -SRC_URI[sha256sum] = "725dc7ba94aae54c6f8d4223ca055d9fb4fe89d6994b1c03bfb4411c4dd10f21" - -# CFLAGS are computed in Makefile and reference CCOPTS -# -EXTRA_OEMAKE_append = " CCOPTS='${CFLAGS}'" diff --git a/poky/meta/recipes-connectivity/iproute2/iproute2_5.8.0.bb b/poky/meta/recipes-connectivity/iproute2/iproute2_5.8.0.bb new file mode 100644 index 000000000..3a590f912 --- /dev/null +++ b/poky/meta/recipes-connectivity/iproute2/iproute2_5.8.0.bb @@ -0,0 +1,12 @@ +require iproute2.inc + +SRC_URI = "${KERNELORG_MIRROR}/linux/utils/net/${BPN}/${BP}.tar.xz \ + file://0001-libc-compat.h-add-musl-workaround.patch \ + file://0001-devlink.c-add-missing-include.patch \ + " + +SRC_URI[sha256sum] = "cfcd1f890290f8c8afcc91d9444ad929b9252c16f9ab3f286c50dd3c59dc646e" + +# CFLAGS are computed in Makefile and reference CCOPTS +# +EXTRA_OEMAKE_append = " CCOPTS='${CFLAGS}'" diff --git a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/nfs-server.service b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/nfs-server.service index 6481377d8..5c845b7e8 100644 --- a/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/nfs-server.service +++ b/poky/meta/recipes-connectivity/nfs-utils/nfs-utils/nfs-server.service @@ -17,7 +17,6 @@ ExecStop=@SBINDIR@/rpc.nfsd 0 ExecStopPost=@SBINDIR@/exportfs -au ExecStopPost=@SBINDIR@/exportfs -f ExecReload=@SBINDIR@/exportfs -r -StandardError=syslog RemainAfterExit=yes [Install] diff --git a/poky/meta/recipes-connectivity/openssh/openssh/sshd@.service b/poky/meta/recipes-connectivity/openssh/openssh/sshd@.service index 422450c7a..9d9965e62 100644 --- a/poky/meta/recipes-connectivity/openssh/openssh/sshd@.service +++ b/poky/meta/recipes-connectivity/openssh/openssh/sshd@.service @@ -7,5 +7,4 @@ Environment="SSHD_OPTS=" EnvironmentFile=-/etc/default/ssh ExecStart=-@SBINDIR@/sshd -i $SSHD_OPTS StandardInput=socket -StandardError=syslog KillMode=process diff --git a/poky/meta/recipes-core/glib-2.0/glib-2.0/meson.cross.d/common-linux b/poky/meta/recipes-core/glib-2.0/glib-2.0/meson.cross.d/common-linux index 83596e0ef..adad7e62e 100644 --- a/poky/meta/recipes-core/glib-2.0/glib-2.0/meson.cross.d/common-linux +++ b/poky/meta/recipes-core/glib-2.0/glib-2.0/meson.cross.d/common-linux @@ -2,4 +2,4 @@ have_proc_self_cmdline = true [binaries] -env = "/usr/bin/env" +env = '/usr/bin/env' diff --git a/poky/meta/recipes-core/glibc/cross-localedef-native_2.31.bb b/poky/meta/recipes-core/glibc/cross-localedef-native_2.31.bb deleted file mode 100644 index 24de55d92..000000000 --- a/poky/meta/recipes-core/glibc/cross-localedef-native_2.31.bb +++ /dev/null @@ -1,52 +0,0 @@ -SUMMARY = "Cross locale generation tool for glibc" -HOMEPAGE = "http://www.gnu.org/software/libc/libc.html" -SECTION = "libs" -LICENSE = "LGPL-2.1" - -LIC_FILES_CHKSUM = "file://LICENSES;md5=1541fd8f5e8f1579512bf05f533371ba \ - file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://posix/rxspencer/COPYRIGHT;md5=dc5485bb394a13b2332ec1c785f5d83a \ - file://COPYING.LIB;md5=4fbd65380cdd255951079008b364516c" - -require glibc-version.inc - -# Tell autotools that we're working in the localedef directory -# -AUTOTOOLS_SCRIPT_PATH = "${S}/localedef" - -inherit native -inherit autotools - -FILESEXTRAPATHS =. "${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/glibc:" - -SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \ - git://github.com/kraj/localedef;branch=master;name=localedef;destsuffix=git/localedef \ - \ - file://0001-localedef-Add-hardlink-resolver-to-build.patch;patchdir=localedef \ - \ - file://0001-localedef-Add-hardlink-resolver-from-util-linux.patch \ - file://0002-localedef-fix-ups-hardlink-to-make-it-compile.patch \ - \ - file://0018-timezone-re-written-tzselect-as-posix-sh.patch \ - file://0019-Remove-bash-dependency-for-nscd-init-script.patch \ - file://0020-eglibc-Cross-building-and-testing-instructions.patch \ - file://0021-eglibc-Help-bootstrap-cross-toolchain.patch \ - file://0022-eglibc-Resolve-__fpscr_values-on-SH4.patch \ - file://0023-eglibc-Forward-port-cross-locale-generation-support.patch \ - file://0024-Define-DUMMY_LOCALE_T-if-not-defined.patch \ - file://0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch \ -" -# Makes for a rather long rev (22 characters), but... -# -SRCREV_FORMAT = "glibc_localedef" - -S = "${WORKDIR}/git" - -EXTRA_OECONF = "--with-glibc=${S}" -CFLAGS += "-fgnu89-inline -std=gnu99 -DIS_IN\(x\)='0'" - -do_install() { - install -d ${D}${bindir} - install -m 0755 ${B}/localedef ${D}${bindir}/cross-localedef - install -m 0755 ${B}/cross-localedef-hardlink ${D}${bindir}/cross-localedef-hardlink -} diff --git a/poky/meta/recipes-core/glibc/cross-localedef-native_2.32.bb b/poky/meta/recipes-core/glibc/cross-localedef-native_2.32.bb new file mode 100644 index 000000000..5a0abbb70 --- /dev/null +++ b/poky/meta/recipes-core/glibc/cross-localedef-native_2.32.bb @@ -0,0 +1,50 @@ +SUMMARY = "Cross locale generation tool for glibc" +HOMEPAGE = "http://www.gnu.org/software/libc/libc.html" +SECTION = "libs" +LICENSE = "LGPL-2.1" + +LIC_FILES_CHKSUM = "file://LICENSES;md5=1541fd8f5e8f1579512bf05f533371ba \ + file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://posix/rxspencer/COPYRIGHT;md5=dc5485bb394a13b2332ec1c785f5d83a \ + file://COPYING.LIB;md5=4fbd65380cdd255951079008b364516c" + +require glibc-version.inc + +# Tell autotools that we're working in the localedef directory +# +AUTOTOOLS_SCRIPT_PATH = "${S}/localedef" + +inherit native +inherit autotools + +FILESEXTRAPATHS =. "${FILE_DIRNAME}/${PN}:${FILE_DIRNAME}/glibc:" + +SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \ + git://github.com/kraj/localedef;branch=master;name=localedef;destsuffix=git/localedef \ + \ + file://0001-localedef-Add-hardlink-resolver-from-util-linux.patch \ + file://0002-localedef-fix-ups-hardlink-to-make-it-compile.patch \ + \ + file://0016-timezone-re-written-tzselect-as-posix-sh.patch \ + file://0017-Remove-bash-dependency-for-nscd-init-script.patch \ + file://0018-eglibc-Cross-building-and-testing-instructions.patch \ + file://0019-eglibc-Help-bootstrap-cross-toolchain.patch \ + file://0020-eglibc-Resolve-__fpscr_values-on-SH4.patch \ + file://0021-eglibc-Forward-port-cross-locale-generation-support.patch \ + file://0022-Define-DUMMY_LOCALE_T-if-not-defined.patch \ + file://0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch \ +" +# Makes for a rather long rev (22 characters), but... +# +SRCREV_FORMAT = "glibc_localedef" + +S = "${WORKDIR}/git" + +EXTRA_OECONF = "--with-glibc=${S}" +CFLAGS += "-fgnu89-inline -std=gnu99 -DIS_IN\(x\)='0'" + +do_install() { + install -d ${D}${bindir} + install -m 0755 ${B}/localedef ${D}${bindir}/cross-localedef + install -m 0755 ${B}/cross-localedef-hardlink ${D}${bindir}/cross-localedef-hardlink +} diff --git a/poky/meta/recipes-core/glibc/glibc-common.inc b/poky/meta/recipes-core/glibc/glibc-common.inc index 8d412cc85..027c70099 100644 --- a/poky/meta/recipes-core/glibc/glibc-common.inc +++ b/poky/meta/recipes-core/glibc/glibc-common.inc @@ -22,4 +22,4 @@ ARM_INSTRUCTION_SET_armv6 = "arm" # COMPATIBLE_HOST_libc-musl_class-target = "null" -PV = "2.31" +PV = "2.32" diff --git a/poky/meta/recipes-core/glibc/glibc-locale_2.31.bb b/poky/meta/recipes-core/glibc/glibc-locale_2.31.bb deleted file mode 100644 index f7702e035..000000000 --- a/poky/meta/recipes-core/glibc/glibc-locale_2.31.bb +++ /dev/null @@ -1 +0,0 @@ -require glibc-locale.inc diff --git a/poky/meta/recipes-core/glibc/glibc-locale_2.32.bb b/poky/meta/recipes-core/glibc/glibc-locale_2.32.bb new file mode 100644 index 000000000..f7702e035 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc-locale_2.32.bb @@ -0,0 +1 @@ +require glibc-locale.inc diff --git a/poky/meta/recipes-core/glibc/glibc-mtrace_2.31.bb b/poky/meta/recipes-core/glibc/glibc-mtrace_2.31.bb deleted file mode 100644 index 0b69bad46..000000000 --- a/poky/meta/recipes-core/glibc/glibc-mtrace_2.31.bb +++ /dev/null @@ -1 +0,0 @@ -require glibc-mtrace.inc diff --git a/poky/meta/recipes-core/glibc/glibc-mtrace_2.32.bb b/poky/meta/recipes-core/glibc/glibc-mtrace_2.32.bb new file mode 100644 index 000000000..0b69bad46 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc-mtrace_2.32.bb @@ -0,0 +1 @@ +require glibc-mtrace.inc diff --git a/poky/meta/recipes-core/glibc/glibc-scripts_2.31.bb b/poky/meta/recipes-core/glibc/glibc-scripts_2.31.bb deleted file mode 100644 index 5a89bd802..000000000 --- a/poky/meta/recipes-core/glibc/glibc-scripts_2.31.bb +++ /dev/null @@ -1 +0,0 @@ -require glibc-scripts.inc diff --git a/poky/meta/recipes-core/glibc/glibc-scripts_2.32.bb b/poky/meta/recipes-core/glibc/glibc-scripts_2.32.bb new file mode 100644 index 000000000..5a89bd802 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc-scripts_2.32.bb @@ -0,0 +1 @@ +require glibc-scripts.inc diff --git a/poky/meta/recipes-core/glibc/glibc-testsuite_2.31.bb b/poky/meta/recipes-core/glibc/glibc-testsuite_2.31.bb deleted file mode 100644 index d887aeff7..000000000 --- a/poky/meta/recipes-core/glibc/glibc-testsuite_2.31.bb +++ /dev/null @@ -1,63 +0,0 @@ -require glibc_${PV}.bb - -EXCLUDE_FROM_WORLD = "1" - -# handle PN differences -FILESEXTRAPATHS_prepend := "${THISDIR}/glibc:" - -# strip provides -PROVIDES = "" -# setup depends -INHIBIT_DEFAULT_DEPS = "" - -python () { - libc = d.getVar("PREFERRED_PROVIDER_virtual/libc") - libclocale = d.getVar("PREFERRED_PROVIDER_virtual/libc-locale") - if libc != "glibc" or libclocale != "glibc-locale": - raise bb.parse.SkipRecipe("glibc-testsuite requires that virtual/libc is glibc") -} - -DEPENDS += "glibc-locale libgcc gcc-runtime" - -# remove the initial depends -DEPENDS_remove = "libgcc-initial" - -inherit qemu - -SRC_URI += "file://check-test-wrapper" - -DEPENDS += "${@'qemu-native' if d.getVar('TOOLCHAIN_TEST_TARGET') == 'user' else ''}" - -TOOLCHAIN_TEST_TARGET ??= "user" -TOOLCHAIN_TEST_HOST ??= "localhost" -TOOLCHAIN_TEST_HOST_USER ??= "root" -TOOLCHAIN_TEST_HOST_PORT ??= "2222" - -do_check[dirs] += "${B}" -do_check[nostamp] = "1" -do_check () { - chmod 0755 ${WORKDIR}/check-test-wrapper - - # clean out previous test results - oe_runmake tests-clean - # makefiles don't clean entirely (and also sometimes fails due to too many args) - find ${B} -type f -name "*.out" -delete - find ${B} -type f -name "*.test-result" -delete - find ${B}/catgets -name "*.cat" -delete - find ${B}/conform -name "symlist-*" -delete - [ ! -e ${B}/timezone/testdata ] || rm -rf ${B}/timezone/testdata - - oe_runmake -i \ - QEMU_SYSROOT="${RECIPE_SYSROOT}" \ - QEMU_OPTIONS="${@qemu_target_binary(d)} ${QEMU_OPTIONS}" \ - SSH_HOST="${TOOLCHAIN_TEST_HOST}" \ - SSH_HOST_USER="${TOOLCHAIN_TEST_HOST_USER}" \ - SSH_HOST_PORT="${TOOLCHAIN_TEST_HOST_PORT}" \ - test-wrapper="${WORKDIR}/check-test-wrapper ${TOOLCHAIN_TEST_TARGET}" \ - check -} -addtask do_check after do_compile - -inherit nopackages -deltask do_stash_locale -deltask do_install diff --git a/poky/meta/recipes-core/glibc/glibc-testsuite_2.32.bb b/poky/meta/recipes-core/glibc/glibc-testsuite_2.32.bb new file mode 100644 index 000000000..d887aeff7 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc-testsuite_2.32.bb @@ -0,0 +1,63 @@ +require glibc_${PV}.bb + +EXCLUDE_FROM_WORLD = "1" + +# handle PN differences +FILESEXTRAPATHS_prepend := "${THISDIR}/glibc:" + +# strip provides +PROVIDES = "" +# setup depends +INHIBIT_DEFAULT_DEPS = "" + +python () { + libc = d.getVar("PREFERRED_PROVIDER_virtual/libc") + libclocale = d.getVar("PREFERRED_PROVIDER_virtual/libc-locale") + if libc != "glibc" or libclocale != "glibc-locale": + raise bb.parse.SkipRecipe("glibc-testsuite requires that virtual/libc is glibc") +} + +DEPENDS += "glibc-locale libgcc gcc-runtime" + +# remove the initial depends +DEPENDS_remove = "libgcc-initial" + +inherit qemu + +SRC_URI += "file://check-test-wrapper" + +DEPENDS += "${@'qemu-native' if d.getVar('TOOLCHAIN_TEST_TARGET') == 'user' else ''}" + +TOOLCHAIN_TEST_TARGET ??= "user" +TOOLCHAIN_TEST_HOST ??= "localhost" +TOOLCHAIN_TEST_HOST_USER ??= "root" +TOOLCHAIN_TEST_HOST_PORT ??= "2222" + +do_check[dirs] += "${B}" +do_check[nostamp] = "1" +do_check () { + chmod 0755 ${WORKDIR}/check-test-wrapper + + # clean out previous test results + oe_runmake tests-clean + # makefiles don't clean entirely (and also sometimes fails due to too many args) + find ${B} -type f -name "*.out" -delete + find ${B} -type f -name "*.test-result" -delete + find ${B}/catgets -name "*.cat" -delete + find ${B}/conform -name "symlist-*" -delete + [ ! -e ${B}/timezone/testdata ] || rm -rf ${B}/timezone/testdata + + oe_runmake -i \ + QEMU_SYSROOT="${RECIPE_SYSROOT}" \ + QEMU_OPTIONS="${@qemu_target_binary(d)} ${QEMU_OPTIONS}" \ + SSH_HOST="${TOOLCHAIN_TEST_HOST}" \ + SSH_HOST_USER="${TOOLCHAIN_TEST_HOST_USER}" \ + SSH_HOST_PORT="${TOOLCHAIN_TEST_HOST_PORT}" \ + test-wrapper="${WORKDIR}/check-test-wrapper ${TOOLCHAIN_TEST_TARGET}" \ + check +} +addtask do_check after do_compile + +inherit nopackages +deltask do_stash_locale +deltask do_install diff --git a/poky/meta/recipes-core/glibc/glibc-version.inc b/poky/meta/recipes-core/glibc/glibc-version.inc index c2d68979e..156605629 100644 --- a/poky/meta/recipes-core/glibc/glibc-version.inc +++ b/poky/meta/recipes-core/glibc/glibc-version.inc @@ -1,7 +1,7 @@ -SRCBRANCH ?= "release/2.31/master" -PV = "2.31+git${SRCPV}" -SRCREV_glibc ?= "109474122400ca7d60782b131dc867a5c1f2fe55" -SRCREV_localedef ?= "cd9f958c4c94a638fa7b2b4e21627364f1a1a655" +SRCBRANCH ?= "release/2.32/master" +PV = "2.32" +SRCREV_glibc ?= "3de512be7ea6053255afed6154db9ee31d4e557a" +SRCREV_localedef ?= "bd644c9e6f3e20c5504da1488448173c69c56c28" GLIBC_GIT_URI ?= "git://sourceware.org/git/glibc.git" diff --git a/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-from-util-linux.patch b/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-from-util-linux.patch index d0786be8b..e0ec1887d 100644 --- a/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-from-util-linux.patch +++ b/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-from-util-linux.patch @@ -1,7 +1,7 @@ -From 3dcf144e998aed17b3fb957a255b1e923ba07d71 Mon Sep 17 00:00:00 2001 +From 5db90855621a81d02f1434d5602cefea8c45de1c Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Sat, 7 Dec 2019 09:59:22 -0800 -Subject: [PATCH] localedef: Add hardlink resolver from util-linux +Subject: [PATCH 01/29] localedef: Add hardlink resolver from util-linux The hard link resolver that is built into localedef cannot be run in parallel. It will search sibling directories (which are be processed @@ -1128,3 +1128,6 @@ index 0000000000..0129a85e2e +} + +#endif +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-to-build.patch b/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-to-build.patch deleted file mode 100644 index d6652981a..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0001-localedef-Add-hardlink-resolver-to-build.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 56ae260ac24ade94e2e0e77c81751312372d253f Mon Sep 17 00:00:00 2001 -From: Jason Wessel -Date: Sat, 7 Dec 2019 09:56:23 -0800 -Subject: [PATCH] localedef: Add hardlink resolver to build - -The sourcecode for this is imported from util-linux and is kept with -glibc sources - -Upstream-Status: Pending -Signed-off-by: Jason Wessel -Signed-off-by: Khem Raj ---- - Makefile.in | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - -diff --git a/Makefile.in b/Makefile.in -index 77fbded..a87e5b2 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -40,6 +40,8 @@ WARNFLAGS = -Wall -Wno-format - FULLCC = $(CC) $(CPPFLAGS) $(CFLAGS) \ - $(DEFINES) $(INCLUDES) $(WARNFLAGS) - -+CROSS_LOCALEDEF_HARDLINK_OBJS = cross-localedef-hardlink.o -+ - LOCALEDEF_OBJS = charmap.o charmap-dir.o ld-address.o ld-collate.o \ - ld-ctype.o ld-identification.o ld-measurement.o \ - ld-messages.o ld-monetary.o ld-name.o ld-numeric.o \ -@@ -54,11 +56,14 @@ LOCALEDEF_OBJS = charmap.o charmap-dir.o ld-address.o ld-collate.o \ - asprintf.o getdelim.o localedef_extra.o \ - obstack_printf.o vasprintf.o - --all: localedef$(EXEEXT) -+all: localedef$(EXEEXT) cross-localedef-hardlink$(EXEEXT) - - localedef$(EXEEXT): $(LOCALEDEF_OBJS) - $(CC) -o $@ $(LOCALEDEF_OBJS) $(LIBS) - -+cross-localedef-hardlink$(EXEEXT): $(CROSS_LOCALEDEF_HARDLINK_OBJS) -+ $(CC) -o $@ $(CROSS_LOCALEDEF_HARDLINK_OBJS) $(LIBS) -+ - clean: - rm -f locale$(EXEEXT) $(LOCALEDEF_OBJS) - -@@ -77,6 +82,7 @@ clean: - %.o: $(srcdir)/%.c - $(FULLCC) -c -o $@ $< - -+cross-localedef-hardlink.o: glibc/locale/programs/cross-localedef-hardlink.c - charmap.o: glibc/locale/programs/charmap.c - charmap-dir.o: glibc/locale/programs/charmap-dir.c - ld-address.o: glibc/locale/programs/ld-address.c diff --git a/poky/meta/recipes-core/glibc/glibc/0002-localedef-fix-ups-hardlink-to-make-it-compile.patch b/poky/meta/recipes-core/glibc/glibc/0002-localedef-fix-ups-hardlink-to-make-it-compile.patch index 5222e37d1..05b76803b 100644 --- a/poky/meta/recipes-core/glibc/glibc/0002-localedef-fix-ups-hardlink-to-make-it-compile.patch +++ b/poky/meta/recipes-core/glibc/glibc/0002-localedef-fix-ups-hardlink-to-make-it-compile.patch @@ -1,7 +1,7 @@ -From b52dba15527380cc18635e3696e0ef87efee9a84 Mon Sep 17 00:00:00 2001 +From ab022ce3c1c01fd6c850f541a33efd0cacabe052 Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Sat, 7 Dec 2019 10:01:37 -0800 -Subject: [PATCH] localedef: fix-ups hardlink to make it compile +Subject: [PATCH 02/29] localedef: fix-ups hardlink to make it compile Upstream-Status: Pending Signed-off-by: Jason Wessel @@ -236,3 +236,6 @@ index 63615896b0..726e6dd948 100644 } continue; } +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch b/poky/meta/recipes-core/glibc/glibc/0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch index 4cdc8354b..743994f2d 100644 --- a/poky/meta/recipes-core/glibc/glibc/0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch +++ b/poky/meta/recipes-core/glibc/glibc/0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch @@ -1,7 +1,8 @@ -From 6987ad183770cb56680ccc4f6ea065a04f31ccb6 Mon Sep 17 00:00:00 2001 +From de4322ef6d4dc9fc3ee9b69af1c10edbc64a66a3 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 18 Mar 2015 01:48:24 +0000 -Subject: [PATCH] nativesdk-glibc: Look for host system ld.so.cache as well +Subject: [PATCH 03/29] nativesdk-glibc: Look for host system ld.so.cache as + well Upstream-Status: Inappropriate [embedded specific] @@ -30,10 +31,10 @@ Signed-off-by: Khem Raj 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/elf/dl-load.c b/elf/dl-load.c -index a6b80f9395..3226f2e531 100644 +index e39980fb19..565b039b23 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c -@@ -2073,6 +2073,14 @@ _dl_map_object (struct link_map *loader, const char *name, +@@ -2160,6 +2160,14 @@ _dl_map_object (struct link_map *loader, const char *name, } } @@ -48,7 +49,7 @@ index a6b80f9395..3226f2e531 100644 #ifdef USE_LDCONFIG if (fd == -1 && (__glibc_likely ((mode & __RTLD_SECURE) == 0) -@@ -2131,14 +2139,6 @@ _dl_map_object (struct link_map *loader, const char *name, +@@ -2218,14 +2226,6 @@ _dl_map_object (struct link_map *loader, const char *name, } #endif @@ -63,3 +64,6 @@ index a6b80f9395..3226f2e531 100644 /* Add another newline when we are tracing the library loading. */ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_LIBS)) _dl_debug_printf ("\n"); +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch b/poky/meta/recipes-core/glibc/glibc/0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch index 62e12897f..a3b5b7750 100644 --- a/poky/meta/recipes-core/glibc/glibc/0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch +++ b/poky/meta/recipes-core/glibc/glibc/0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch @@ -1,7 +1,8 @@ -From 7806340c2accc2c51e7e861b618c29fb5609a007 Mon Sep 17 00:00:00 2001 +From 258c44e4ecffd830cb89d0016d45b2bac765f559 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 18 Mar 2015 01:50:00 +0000 -Subject: [PATCH] nativesdk-glibc: Fix buffer overrun with a relocated SDK +Subject: [PATCH 04/29] nativesdk-glibc: Fix buffer overrun with a relocated + SDK When ld-linux-*.so.2 is relocated to a path that is longer than the original fixed location, the dynamic loader will crash in open_path @@ -21,10 +22,10 @@ Signed-off-by: Khem Raj 1 file changed, 12 insertions(+) diff --git a/elf/dl-load.c b/elf/dl-load.c -index 3226f2e531..7cb8a86fab 100644 +index 565b039b23..e1b3486549 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c -@@ -1773,7 +1773,19 @@ open_path (const char *name, size_t namelen, int mode, +@@ -1860,7 +1860,19 @@ open_path (const char *name, size_t namelen, int mode, given on the command line when rtld is run directly. */ return -1; @@ -44,3 +45,6 @@ index 3226f2e531..7cb8a86fab 100644 do { struct r_search_path_elem *this_dir = *dirs; +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch b/poky/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch index 294c2b975..2073576aa 100644 --- a/poky/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch +++ b/poky/meta/recipes-core/glibc/glibc/0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch @@ -1,7 +1,8 @@ -From 1b97befbe693eb93a77b6098f6ae1394a53462f4 Mon Sep 17 00:00:00 2001 +From 19cd858f5f04a6ac584fbd89a2fbc51791263b85 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 18 Mar 2015 01:51:38 +0000 -Subject: [PATCH] nativesdk-glibc: Raise the size of arrays containing dl paths +Subject: [PATCH 05/29] nativesdk-glibc: Raise the size of arrays containing dl + paths This patch puts the dynamic loader path in the binaries, SYSTEM_DIRS strings and lengths as well as ld.so.cache path in the dynamic loader to specific @@ -25,7 +26,7 @@ Signed-off-by: Khem Raj 7 files changed, 14 insertions(+), 10 deletions(-) diff --git a/elf/dl-cache.c b/elf/dl-cache.c -index 3eedd9afcf..efdfe5cae7 100644 +index 93d185e788..e115b18756 100644 --- a/elf/dl-cache.c +++ b/elf/dl-cache.c @@ -133,6 +133,10 @@ do \ @@ -40,10 +41,10 @@ index 3eedd9afcf..efdfe5cae7 100644 _dl_cache_libcmp (const char *p1, const char *p2) { diff --git a/elf/dl-load.c b/elf/dl-load.c -index 7cb8a86fab..e32d4aa936 100644 +index e1b3486549..5226d0c4fa 100644 --- a/elf/dl-load.c +++ b/elf/dl-load.c -@@ -110,8 +110,8 @@ static size_t max_capstrlen attribute_relro; +@@ -111,8 +111,8 @@ static size_t max_capstrlen attribute_relro; gen-trusted-dirs.awk. */ #include "trusted-dirs.h" @@ -66,10 +67,10 @@ index 331cc1df48..885b2d9476 100644 +const char __invoke_dynamic_linker__[4096] __attribute__ ((section (".interp"))) = RUNTIME_LINKER; diff --git a/elf/ldconfig.c b/elf/ldconfig.c -index 681ed78496..8833ed0a6b 100644 +index 0c090dca15..6bb6e0fe72 100644 --- a/elf/ldconfig.c +++ b/elf/ldconfig.c -@@ -168,6 +168,9 @@ static struct argp argp = +@@ -171,6 +171,9 @@ static struct argp argp = options, parse_opt, NULL, doc, NULL, more_help, NULL }; @@ -80,18 +81,18 @@ index 681ed78496..8833ed0a6b 100644 a platform. */ static int diff --git a/elf/rtld.c b/elf/rtld.c -index 553cfbd1b7..39347c2c03 100644 +index 5b882163fa..db407b5d8b 100644 --- a/elf/rtld.c +++ b/elf/rtld.c -@@ -175,6 +175,7 @@ dso_name_valid_for_suid (const char *p) +@@ -217,6 +217,7 @@ dso_name_valid_for_suid (const char *p) } return *p != '\0'; } +extern const char LD_SO_CACHE[4096] __attribute__ ((section (".ldsocache"))); - /* LD_AUDIT variable contents. Must be processed before the - audit_list below. */ -@@ -1222,13 +1223,13 @@ of this helper program; chances are you did not intend to run this program.\n\ + static void + audit_list_init (struct audit_list *list) +@@ -1286,13 +1287,13 @@ of this helper program; chances are you did not intend to run this program.\n\ --list list all dependencies and how they are resolved\n\ --verify verify that given object really is a dynamically linked\n\ object we can handle\n\ @@ -135,3 +136,6 @@ index 6b310e9e15..3877311df4 100644 #ifndef add_system_dir # define add_system_dir(dir) add_dir (dir) #endif +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch b/poky/meta/recipes-core/glibc/glibc/0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch index db8b6c0f1..372dca727 100644 --- a/poky/meta/recipes-core/glibc/glibc/0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch +++ b/poky/meta/recipes-core/glibc/glibc/0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch @@ -1,7 +1,7 @@ -From a752857cc342ee5136c9a593037b6ee6ff8af8ee Mon Sep 17 00:00:00 2001 +From bd0486cab67c3441210aed48caab67418610a765 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Thu, 31 Dec 2015 14:35:35 -0800 -Subject: [PATCH] nativesdk-glibc: Allow 64 bit atomics for x86 +Subject: [PATCH 06/29] nativesdk-glibc: Allow 64 bit atomics for x86 The fix consist of allowing 64bit atomic ops for x86. This should be safe for i586 and newer CPUs. @@ -37,3 +37,6 @@ index bb49648374..aa08d3c0a7 100644 # define SP_REG "esp" # define SEG_REG "gs" # define BR_CONSTRAINT "r" +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch b/poky/meta/recipes-core/glibc/glibc/0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch index 27cd17cdc..729ce1b42 100644 --- a/poky/meta/recipes-core/glibc/glibc/0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch +++ b/poky/meta/recipes-core/glibc/glibc/0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch @@ -1,7 +1,7 @@ -From 3df91d1d8b9c7a01b3ef8133c4f9b9764227d583 Mon Sep 17 00:00:00 2001 +From 9a8bf11ea375a2fe5eddb30bc10943e64d3b96a4 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Fri, 3 Aug 2018 09:55:12 -0700 -Subject: [PATCH] nativesdk-glibc: Make relocatable install for locales +Subject: [PATCH 07/29] nativesdk-glibc: Make relocatable install for locales The glibc locale path is hard-coded to the install prefix, but in SDKs we need to be able to relocate the binaries. Expand the strings to 4K and put them in a @@ -12,16 +12,17 @@ Upstream-Status: Inappropriate (OE-specific) Signed-off-by: Ross Burton Signed-off-by: Khem Raj --- - locale/findlocale.c | 4 ++-- - locale/loadarchive.c | 2 +- - locale/localeinfo.h | 2 +- - 3 files changed, 4 insertions(+), 4 deletions(-) + locale/findlocale.c | 4 ++-- + locale/loadarchive.c | 2 +- + locale/localeinfo.h | 2 +- + locale/programs/locale.c | 7 ++++--- + 4 files changed, 8 insertions(+), 7 deletions(-) -Index: git/locale/findlocale.c -=================================================================== ---- git.orig/locale/findlocale.c -+++ git/locale/findlocale.c -@@ -56,7 +56,7 @@ struct __locale_data *const _nl_C[] attr +diff --git a/locale/findlocale.c b/locale/findlocale.c +index 9cd3b71a6d..84272310e0 100644 +--- a/locale/findlocale.c ++++ b/locale/findlocale.c +@@ -56,7 +56,7 @@ struct __locale_data *const _nl_C[] attribute_hidden = which are somehow addressed. */ struct loaded_l10nfile *_nl_locale_file_list[__LC_LAST]; @@ -30,7 +31,7 @@ Index: git/locale/findlocale.c /* Checks if the name is actually present, that is, not NULL and not empty. */ -@@ -166,7 +166,7 @@ _nl_find_locale (const char *locale_path +@@ -166,7 +166,7 @@ _nl_find_locale (const char *locale_path, size_t locale_path_len, /* Nothing in the archive. Set the default path to search below. */ locale_path = _nl_default_locale_path; @@ -39,10 +40,10 @@ Index: git/locale/findlocale.c } else /* We really have to load some data. First see whether the name is -Index: git/locale/loadarchive.c -=================================================================== ---- git.orig/locale/loadarchive.c -+++ git/locale/loadarchive.c +diff --git a/locale/loadarchive.c b/locale/loadarchive.c +index ba0fe45648..9737fd4cda 100644 +--- a/locale/loadarchive.c ++++ b/locale/loadarchive.c @@ -42,7 +42,7 @@ @@ -52,11 +53,11 @@ Index: git/locale/loadarchive.c /* Size of initial mapping window, optimal if large enough to cover the header plus the initial locale. */ -Index: git/locale/localeinfo.h -=================================================================== ---- git.orig/locale/localeinfo.h -+++ git/locale/localeinfo.h -@@ -331,7 +331,7 @@ _nl_lookup_word (locale_t l, int categor +diff --git a/locale/localeinfo.h b/locale/localeinfo.h +index 1bfe22aa7f..fdc283c69a 100644 +--- a/locale/localeinfo.h ++++ b/locale/localeinfo.h +@@ -331,7 +331,7 @@ _nl_lookup_word (locale_t l, int category, int item) } /* Default search path if no LOCPATH environment variable. */ @@ -65,11 +66,11 @@ Index: git/locale/localeinfo.h /* Load the locale data for CATEGORY from the file specified by *NAME. If *NAME is "", use environment variables as specified by POSIX, and -Index: git/locale/programs/locale.c -=================================================================== ---- git.orig/locale/programs/locale.c -+++ git/locale/programs/locale.c -@@ -632,6 +632,7 @@ nameentcmp (const void *a, const void *b +diff --git a/locale/programs/locale.c b/locale/programs/locale.c +index e2e309c2a1..61a92cdcd1 100644 +--- a/locale/programs/locale.c ++++ b/locale/programs/locale.c +@@ -632,6 +632,7 @@ nameentcmp (const void *a, const void *b) ((const struct nameent *) b)->name); } @@ -77,7 +78,7 @@ Index: git/locale/programs/locale.c static int write_archive_locales (void **all_datap, char *linebuf) -@@ -645,7 +646,7 @@ write_archive_locales (void **all_datap, +@@ -645,7 +646,7 @@ write_archive_locales (void **all_datap, char *linebuf) int fd, ret = 0; uint32_t cnt; @@ -86,7 +87,7 @@ Index: git/locale/programs/locale.c if (fd < 0) return 0; -@@ -700,8 +701,8 @@ write_archive_locales (void **all_datap, +@@ -700,8 +701,8 @@ write_archive_locales (void **all_datap, char *linebuf) if (cnt) putchar_unlocked ('\n'); @@ -97,3 +98,6 @@ Index: git/locale/programs/locale.c locrec = (struct locrecent *) (addr + names[cnt].locrec_offset); +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch b/poky/meta/recipes-core/glibc/glibc/0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch index 3dac32538..5e3b3e2d7 100644 --- a/poky/meta/recipes-core/glibc/glibc/0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch +++ b/poky/meta/recipes-core/glibc/glibc/0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch @@ -1,7 +1,7 @@ -From 61b6c9737897c5828ef4b40699ee0a74c570034a Mon Sep 17 00:00:00 2001 +From 59b0a78ae706a540dbd8905bc97c875220d6aeb2 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 18 Mar 2015 00:01:50 +0000 -Subject: [PATCH] fsl e500/e5500/e6500/603e fsqrt implementation +Subject: [PATCH 08/29] fsl e500/e5500/e6500/603e fsqrt implementation Upstream-Status: Pending Signed-off-by: Edmar Wienskoski @@ -1579,3 +1579,6 @@ index 0000000000..04ff8cc181 +++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/e6500/fpu/Implies @@ -0,0 +1 @@ +powerpc/powerpc64/e6500/fpu +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0009-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch b/poky/meta/recipes-core/glibc/glibc/0009-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch new file mode 100644 index 000000000..a94ed207f --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0009-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch @@ -0,0 +1,208 @@ +From 78f67f016ca9e3f7a37af86cf6e400cf17cf1d05 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:15:07 +0000 +Subject: [PATCH 09/29] ppc/sqrt: Fix undefined reference to `__sqrt_finite' + +on ppc fixes the errors like below +| ./.libs/libpulsecore-1.1.so: undefined reference to `__sqrt_finite' +| collect2: ld returned 1 exit status + +Upstream-Status: Pending + +ChangeLog + +2012-01-06 Khem Raj + + * sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c: Add __*_finite alias. + Remove cruft. + * sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c: Ditto. + * sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c: Ditto. + * sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c: Ditto. + +Signed-off-by: Khem Raj +--- + sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c | 7 +------ + sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c | 7 +------ + sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c | 1 + + sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c | 1 + + sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c | 1 + + sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c | 1 + + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 1 + + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 1 + + sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c | 7 +------ + sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c | 7 +------ + sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c | 1 + + sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c | 1 + + 12 files changed, 12 insertions(+), 24 deletions(-) + +diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c +index 71e516d1c8..1795fd6c3e 100644 +--- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c +@@ -39,14 +39,8 @@ static const float half = 0.5; + We find the actual square root and half of its reciprocal + simultaneously. */ + +-#ifdef __STDC__ + double + __ieee754_sqrt (double b) +-#else +-double +-__ieee754_sqrt (b) +- double b; +-#endif + { + if (__builtin_expect (b > 0, 1)) + { +@@ -132,3 +126,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c +index 26fa067abf..a917f313ab 100644 +--- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c +@@ -37,14 +37,8 @@ static const float threehalf = 1.5; + We find the reciprocal square root and use that to compute the actual + square root. */ + +-#ifdef __STDC__ + float + __ieee754_sqrtf (float b) +-#else +-float +-__ieee754_sqrtf (b) +- float b; +-#endif + { + if (__builtin_expect (b > 0, 1)) + { +@@ -99,3 +93,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c +index 71e516d1c8..fc4a74990e 100644 +--- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c +@@ -132,3 +132,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c +index 26fa067abf..9d175122a8 100644 +--- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c +@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c +index 71e516d1c8..fc4a74990e 100644 +--- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c +@@ -132,3 +132,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c +index 26fa067abf..9d175122a8 100644 +--- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c +@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +index 71e516d1c8..fc4a74990e 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +@@ -132,3 +132,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +index 26fa067abf..9d175122a8 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c +index 71e516d1c8..1795fd6c3e 100644 +--- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c +@@ -39,14 +39,8 @@ static const float half = 0.5; + We find the actual square root and half of its reciprocal + simultaneously. */ + +-#ifdef __STDC__ + double + __ieee754_sqrt (double b) +-#else +-double +-__ieee754_sqrt (b) +- double b; +-#endif + { + if (__builtin_expect (b > 0, 1)) + { +@@ -132,3 +126,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c +index 26fa067abf..a917f313ab 100644 +--- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c +@@ -37,14 +37,8 @@ static const float threehalf = 1.5; + We find the reciprocal square root and use that to compute the actual + square root. */ + +-#ifdef __STDC__ + float + __ieee754_sqrtf (float b) +-#else +-float +-__ieee754_sqrtf (b) +- float b; +-#endif + { + if (__builtin_expect (b > 0, 1)) + { +@@ -99,3 +93,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c +index 71e516d1c8..fc4a74990e 100644 +--- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c +@@ -132,3 +132,4 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c +index 26fa067abf..9d175122a8 100644 +--- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c +@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++strong_alias (__ieee754_sqrtf, __sqrtf_finite) +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0009-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch b/poky/meta/recipes-core/glibc/glibc/0009-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch deleted file mode 100644 index 456f91fb8..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0009-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 13beb1f428ec06778590bf526d6e641f73d5cf62 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:11:22 +0000 -Subject: [PATCH] readlib: Add OECORE_KNOWN_INTERPRETER_NAMES to known names - -This bolts in a hook for OE to pass its own version of interpreter -names into glibc especially for multilib case, where it differs from any -other distros - -Upstream-Status: Inappropriate [OE specific] - -Signed-off-by: Lianhao Lu -Signed-off-by: Khem Raj ---- - elf/readlib.c | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/elf/readlib.c b/elf/readlib.c -index 994a4426a1..baabf099b1 100644 ---- a/elf/readlib.c -+++ b/elf/readlib.c -@@ -51,6 +51,7 @@ static struct known_names interpreters[] = - #ifdef SYSDEP_KNOWN_INTERPRETER_NAMES - SYSDEP_KNOWN_INTERPRETER_NAMES - #endif -+ OECORE_KNOWN_INTERPRETER_NAMES - }; - - static struct known_names known_libs[] = diff --git a/poky/meta/recipes-core/glibc/glibc/0010-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch b/poky/meta/recipes-core/glibc/glibc/0010-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch new file mode 100644 index 000000000..743bea143 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0010-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch @@ -0,0 +1,387 @@ +From 670b5d70ab62b42ab02a8e18b8fcee2879b8c4a0 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:16:38 +0000 +Subject: [PATCH 10/29] __ieee754_sqrt{,f} are now inline functions and call + out __slow versions + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c | 12 ++++++++++-- + sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c | 8 +++++++- + sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c | 14 +++++++++++--- + sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c | 12 ++++++++++-- + sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c | 14 +++++++++++--- + sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c | 12 ++++++++++-- + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 8 ++++++++ + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 8 ++++++++ + sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c | 12 ++++++++++-- + sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c | 9 ++++++++- + sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c | 14 +++++++++++--- + sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c | 12 ++++++++++-- + 12 files changed, 114 insertions(+), 21 deletions(-) + +diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c +index 1795fd6c3e..daa83f3fe8 100644 +--- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c +@@ -40,7 +40,7 @@ static const float half = 0.5; + simultaneously. */ + + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + { + if (__builtin_expect (b > 0, 1)) + { +@@ -77,7 +77,7 @@ __ieee754_sqrt (double b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +@@ -126,4 +126,12 @@ __ieee754_sqrt (double b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c +index a917f313ab..b812cf1705 100644 +--- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c +@@ -38,7 +38,7 @@ static const float threehalf = 1.5; + square root. */ + + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + { + if (__builtin_expect (b > 0, 1)) + { +@@ -93,4 +93,10 @@ __ieee754_sqrtf (float b) + } + return f_washf (b); + } ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c +index fc4a74990e..7038a70b47 100644 +--- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c +@@ -41,10 +41,10 @@ static const float half = 0.5; + + #ifdef __STDC__ + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + #else + double +-__ieee754_sqrt (b) ++__slow_ieee754_sqrt (b) + double b; + #endif + { +@@ -83,7 +83,7 @@ __ieee754_sqrt (b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +@@ -132,4 +132,12 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c +index 9d175122a8..10de1f0cc3 100644 +--- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c +@@ -39,10 +39,10 @@ static const float threehalf = 1.5; + + #ifdef __STDC__ + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + #else + float +-__ieee754_sqrtf (b) ++__slow_ieee754_sqrtf (b) + float b; + #endif + { +@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++ ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} ++ + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c +index fc4a74990e..7038a70b47 100644 +--- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c +@@ -41,10 +41,10 @@ static const float half = 0.5; + + #ifdef __STDC__ + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + #else + double +-__ieee754_sqrt (b) ++__slow_ieee754_sqrt (b) + double b; + #endif + { +@@ -83,7 +83,7 @@ __ieee754_sqrt (b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +@@ -132,4 +132,12 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c +index 9d175122a8..10de1f0cc3 100644 +--- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c +@@ -39,10 +39,10 @@ static const float threehalf = 1.5; + + #ifdef __STDC__ + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + #else + float +-__ieee754_sqrtf (b) ++__slow_ieee754_sqrtf (b) + float b; + #endif + { +@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++ ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} ++ + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +index fc4a74990e..1c34244bd8 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +@@ -132,4 +132,12 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +index 9d175122a8..812653558f 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++ ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} ++ + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c +index 1795fd6c3e..13a81973e3 100644 +--- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c +@@ -40,7 +40,7 @@ static const float half = 0.5; + simultaneously. */ + + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + { + if (__builtin_expect (b > 0, 1)) + { +@@ -77,7 +77,7 @@ __ieee754_sqrt (double b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +@@ -126,4 +126,12 @@ __ieee754_sqrt (double b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c +index a917f313ab..fae2d81210 100644 +--- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c +@@ -38,7 +38,7 @@ static const float threehalf = 1.5; + square root. */ + + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + { + if (__builtin_expect (b > 0, 1)) + { +@@ -93,4 +93,11 @@ __ieee754_sqrtf (float b) + } + return f_washf (b); + } ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} ++ + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c +index fc4a74990e..7038a70b47 100644 +--- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c +@@ -41,10 +41,10 @@ static const float half = 0.5; + + #ifdef __STDC__ + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + #else + double +-__ieee754_sqrt (b) ++__slow_ieee754_sqrt (b) + double b; + #endif + { +@@ -83,7 +83,7 @@ __ieee754_sqrt (b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +@@ -132,4 +132,12 @@ __ieee754_sqrt (b) + } + return f_wash (b); + } ++ ++#undef __ieee754_sqrt ++double ++__ieee754_sqrt (double x) ++{ ++ return __slow_ieee754_sqrt (x); ++} ++ + strong_alias (__ieee754_sqrt, __sqrt_finite) +diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c +index 9d175122a8..10de1f0cc3 100644 +--- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c +@@ -39,10 +39,10 @@ static const float threehalf = 1.5; + + #ifdef __STDC__ + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + #else + float +-__ieee754_sqrtf (b) ++__slow_ieee754_sqrtf (b) + float b; + #endif + { +@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) + } + return f_washf (b); + } ++ ++#undef __ieee754_sqrtf ++float ++__ieee754_sqrtf (float x) ++{ ++ return __slow_ieee754_sqrtf (x); ++} ++ + strong_alias (__ieee754_sqrtf, __sqrtf_finite) +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0010-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch b/poky/meta/recipes-core/glibc/glibc/0010-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch deleted file mode 100644 index 01446abc4..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0010-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch +++ /dev/null @@ -1,205 +0,0 @@ -From 4483a83074a340a921e319b88d72166f18e0df0b Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:15:07 +0000 -Subject: [PATCH] ppc/sqrt: Fix undefined reference to `__sqrt_finite' - -on ppc fixes the errors like below -| ./.libs/libpulsecore-1.1.so: undefined reference to `__sqrt_finite' -| collect2: ld returned 1 exit status - -Upstream-Status: Pending - -ChangeLog - -2012-01-06 Khem Raj - - * sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c: Add __*_finite alias. - Remove cruft. - * sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c: Ditto. - * sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c: Ditto. - * sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c: Ditto. - -Signed-off-by: Khem Raj ---- - sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c | 7 +------ - sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c | 7 +------ - sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c | 1 + - sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c | 1 + - sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c | 1 + - sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c | 1 + - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 1 + - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 1 + - sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c | 7 +------ - sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c | 7 +------ - sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c | 1 + - sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c | 1 + - 12 files changed, 12 insertions(+), 24 deletions(-) - -diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -index 71e516d1c8..1795fd6c3e 100644 ---- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -@@ -39,14 +39,8 @@ static const float half = 0.5; - We find the actual square root and half of its reciprocal - simultaneously. */ - --#ifdef __STDC__ - double - __ieee754_sqrt (double b) --#else --double --__ieee754_sqrt (b) -- double b; --#endif - { - if (__builtin_expect (b > 0, 1)) - { -@@ -132,3 +126,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -index 26fa067abf..a917f313ab 100644 ---- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -@@ -37,14 +37,8 @@ static const float threehalf = 1.5; - We find the reciprocal square root and use that to compute the actual - square root. */ - --#ifdef __STDC__ - float - __ieee754_sqrtf (float b) --#else --float --__ieee754_sqrtf (b) -- float b; --#endif - { - if (__builtin_expect (b > 0, 1)) - { -@@ -99,3 +93,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -index 71e516d1c8..fc4a74990e 100644 ---- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -@@ -132,3 +132,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -index 26fa067abf..9d175122a8 100644 ---- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -index 71e516d1c8..fc4a74990e 100644 ---- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -@@ -132,3 +132,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -index 26fa067abf..9d175122a8 100644 ---- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -index 71e516d1c8..fc4a74990e 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -@@ -132,3 +132,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -index 26fa067abf..9d175122a8 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -index 71e516d1c8..1795fd6c3e 100644 ---- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -@@ -39,14 +39,8 @@ static const float half = 0.5; - We find the actual square root and half of its reciprocal - simultaneously. */ - --#ifdef __STDC__ - double - __ieee754_sqrt (double b) --#else --double --__ieee754_sqrt (b) -- double b; --#endif - { - if (__builtin_expect (b > 0, 1)) - { -@@ -132,3 +126,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -index 26fa067abf..a917f313ab 100644 ---- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -@@ -37,14 +37,8 @@ static const float threehalf = 1.5; - We find the reciprocal square root and use that to compute the actual - square root. */ - --#ifdef __STDC__ - float - __ieee754_sqrtf (float b) --#else --float --__ieee754_sqrtf (b) -- float b; --#endif - { - if (__builtin_expect (b > 0, 1)) - { -@@ -99,3 +93,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -index 71e516d1c8..fc4a74990e 100644 ---- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -@@ -132,3 +132,4 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -index 26fa067abf..9d175122a8 100644 ---- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -@@ -99,3 +99,4 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+strong_alias (__ieee754_sqrtf, __sqrtf_finite) diff --git a/poky/meta/recipes-core/glibc/glibc/0011-Quote-from-bug-1443-which-explains-what-the-patch-do.patch b/poky/meta/recipes-core/glibc/glibc/0011-Quote-from-bug-1443-which-explains-what-the-patch-do.patch new file mode 100644 index 000000000..a9280b7c3 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0011-Quote-from-bug-1443-which-explains-what-the-patch-do.patch @@ -0,0 +1,62 @@ +From de7a7c04a92dbc5d35cb37e47c471e12784cc95e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:20:09 +0000 +Subject: [PATCH 11/29] Quote from bug 1443 which explains what the patch does + : + + We build some random program and link it with -lust. When we run it, + it dies with a SIGSEGV before reaching main(). + + Libust.so depends on liburcu-bp.so from the usermode-rcu package. + Although libust.so is not prelinked, liburcu-bp.so IS prelinked; this + is critical. + + Libust.so uses a TLS / __thread variable that is defined in liburcu- + bp.so. There are special ARM-specific relocation types that allow two + shared libraries to share thread-specific data. This is critical too. + + One more critical issue: although liburcu-bp.so is prelinked, we can't + load it at its prelinked address, because we also link against + librt.so, and librt.so uses that address. + + The dynamic linker is forced to relink liburcu-bp.so at a different + address. In the course of relinking, it processes the special ARM + relocation record mentioned above. The prelinker has already filled + in the information, which is a short offset into a table of thread- + specific data that is allocated per-thread for each library that uses + TLS. Because the normal behavior of a relocation is to add the symbol + value to an addend stored at the address being relocated, we end up + adding the short offset to itself, doubling it. + + Now we have an awkward situation. The libust.so library doesn't know + about the addend, so its TLS data for this element is correct. The + liburcu-bp.so library has a different offset for the element. When we + go to initialize the element for the first time in liburcu-bp.so, we + write the address of the result at the doubled (broken) offset. + Later, when we refer to the address from libust.so, we check the value + at the correct offset, but it's NULL, so we eat hot SIGSEGV. + +Upstream-Status: Pending + +Signed-off-by: Andrei Dinu +Signed-off-by: Khem Raj +--- + sysdeps/arm/dl-machine.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h +index 90856779b1..a29bb86c56 100644 +--- a/sysdeps/arm/dl-machine.h ++++ b/sysdeps/arm/dl-machine.h +@@ -510,7 +510,7 @@ elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc, + + case R_ARM_TLS_DTPOFF32: + if (sym != NULL) +- *reloc_addr += sym->st_value; ++ *reloc_addr = sym->st_value; + break; + + case R_ARM_TLS_TPOFF32: +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0011-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch b/poky/meta/recipes-core/glibc/glibc/0011-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch deleted file mode 100644 index 451f37265..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0011-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch +++ /dev/null @@ -1,384 +0,0 @@ -From 347b2e31d010b04c42e78157a028aa1d58fe0f5e Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:16:38 +0000 -Subject: [PATCH] __ieee754_sqrt{,f} are now inline functions and call out - __slow versions - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c | 12 ++++++++++-- - sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c | 8 +++++++- - sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c | 14 +++++++++++--- - sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c | 12 ++++++++++-- - sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c | 14 +++++++++++--- - sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c | 12 ++++++++++-- - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 8 ++++++++ - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 8 ++++++++ - sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c | 12 ++++++++++-- - sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c | 9 ++++++++- - sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c | 14 +++++++++++--- - sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c | 12 ++++++++++-- - 12 files changed, 114 insertions(+), 21 deletions(-) - -diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -index 1795fd6c3e..daa83f3fe8 100644 ---- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrt.c -@@ -40,7 +40,7 @@ static const float half = 0.5; - simultaneously. */ - - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - { - if (__builtin_expect (b > 0, 1)) - { -@@ -77,7 +77,7 @@ __ieee754_sqrt (double b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -@@ -126,4 +126,12 @@ __ieee754_sqrt (double b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -index a917f313ab..b812cf1705 100644 ---- a/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/603e/fpu/e_sqrtf.c -@@ -38,7 +38,7 @@ static const float threehalf = 1.5; - square root. */ - - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - { - if (__builtin_expect (b > 0, 1)) - { -@@ -93,4 +93,10 @@ __ieee754_sqrtf (float b) - } - return f_washf (b); - } -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} - strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -index fc4a74990e..7038a70b47 100644 ---- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrt.c -@@ -41,10 +41,10 @@ static const float half = 0.5; - - #ifdef __STDC__ - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - #else - double --__ieee754_sqrt (b) -+__slow_ieee754_sqrt (b) - double b; - #endif - { -@@ -83,7 +83,7 @@ __ieee754_sqrt (b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -@@ -132,4 +132,12 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -index 9d175122a8..10de1f0cc3 100644 ---- a/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e500mc/fpu/e_sqrtf.c -@@ -39,10 +39,10 @@ static const float threehalf = 1.5; - - #ifdef __STDC__ - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - #else - float --__ieee754_sqrtf (b) -+__slow_ieee754_sqrtf (b) - float b; - #endif - { -@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+ -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} -+ - strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -index fc4a74990e..7038a70b47 100644 ---- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrt.c -@@ -41,10 +41,10 @@ static const float half = 0.5; - - #ifdef __STDC__ - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - #else - double --__ieee754_sqrt (b) -+__slow_ieee754_sqrt (b) - double b; - #endif - { -@@ -83,7 +83,7 @@ __ieee754_sqrt (b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -@@ -132,4 +132,12 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -index 9d175122a8..10de1f0cc3 100644 ---- a/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e5500/fpu/e_sqrtf.c -@@ -39,10 +39,10 @@ static const float threehalf = 1.5; - - #ifdef __STDC__ - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - #else - float --__ieee754_sqrtf (b) -+__slow_ieee754_sqrtf (b) - float b; - #endif - { -@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+ -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} -+ - strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -index fc4a74990e..1c34244bd8 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -@@ -132,4 +132,12 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -index 9d175122a8..812653558f 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+ -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} -+ - strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -index 1795fd6c3e..13a81973e3 100644 ---- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrt.c -@@ -40,7 +40,7 @@ static const float half = 0.5; - simultaneously. */ - - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - { - if (__builtin_expect (b > 0, 1)) - { -@@ -77,7 +77,7 @@ __ieee754_sqrt (double b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -@@ -126,4 +126,12 @@ __ieee754_sqrt (double b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -index a917f313ab..fae2d81210 100644 ---- a/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc64/e5500/fpu/e_sqrtf.c -@@ -38,7 +38,7 @@ static const float threehalf = 1.5; - square root. */ - - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - { - if (__builtin_expect (b > 0, 1)) - { -@@ -93,4 +93,11 @@ __ieee754_sqrtf (float b) - } - return f_washf (b); - } -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} -+ - strong_alias (__ieee754_sqrtf, __sqrtf_finite) -diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -index fc4a74990e..7038a70b47 100644 ---- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrt.c -@@ -41,10 +41,10 @@ static const float half = 0.5; - - #ifdef __STDC__ - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - #else - double --__ieee754_sqrt (b) -+__slow_ieee754_sqrt (b) - double b; - #endif - { -@@ -83,7 +83,7 @@ __ieee754_sqrt (b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -@@ -132,4 +132,12 @@ __ieee754_sqrt (b) - } - return f_wash (b); - } -+ -+#undef __ieee754_sqrt -+double -+__ieee754_sqrt (double x) -+{ -+ return __slow_ieee754_sqrt (x); -+} -+ - strong_alias (__ieee754_sqrt, __sqrt_finite) -diff --git a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -index 9d175122a8..10de1f0cc3 100644 ---- a/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc64/e6500/fpu/e_sqrtf.c -@@ -39,10 +39,10 @@ static const float threehalf = 1.5; - - #ifdef __STDC__ - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - #else - float --__ieee754_sqrtf (b) -+__slow_ieee754_sqrtf (b) - float b; - #endif - { -@@ -99,4 +99,12 @@ __ieee754_sqrtf (b) - } - return f_washf (b); - } -+ -+#undef __ieee754_sqrtf -+float -+__ieee754_sqrtf (float x) -+{ -+ return __slow_ieee754_sqrtf (x); -+} -+ - strong_alias (__ieee754_sqrtf, __sqrtf_finite) diff --git a/poky/meta/recipes-core/glibc/glibc/0012-Quote-from-bug-1443-which-explains-what-the-patch-do.patch b/poky/meta/recipes-core/glibc/glibc/0012-Quote-from-bug-1443-which-explains-what-the-patch-do.patch deleted file mode 100644 index a0b46c047..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0012-Quote-from-bug-1443-which-explains-what-the-patch-do.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 8ca8e5cd78cbd37a713e1181f8f6641b57352aa8 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:20:09 +0000 -Subject: [PATCH] Quote from bug 1443 which explains what the patch does : - - We build some random program and link it with -lust. When we run it, - it dies with a SIGSEGV before reaching main(). - - Libust.so depends on liburcu-bp.so from the usermode-rcu package. - Although libust.so is not prelinked, liburcu-bp.so IS prelinked; this - is critical. - - Libust.so uses a TLS / __thread variable that is defined in liburcu- - bp.so. There are special ARM-specific relocation types that allow two - shared libraries to share thread-specific data. This is critical too. - - One more critical issue: although liburcu-bp.so is prelinked, we can't - load it at its prelinked address, because we also link against - librt.so, and librt.so uses that address. - - The dynamic linker is forced to relink liburcu-bp.so at a different - address. In the course of relinking, it processes the special ARM - relocation record mentioned above. The prelinker has already filled - in the information, which is a short offset into a table of thread- - specific data that is allocated per-thread for each library that uses - TLS. Because the normal behavior of a relocation is to add the symbol - value to an addend stored at the address being relocated, we end up - adding the short offset to itself, doubling it. - - Now we have an awkward situation. The libust.so library doesn't know - about the addend, so its TLS data for this element is correct. The - liburcu-bp.so library has a different offset for the element. When we - go to initialize the element for the first time in liburcu-bp.so, we - write the address of the result at the doubled (broken) offset. - Later, when we refer to the address from libust.so, we check the value - at the correct offset, but it's NULL, so we eat hot SIGSEGV. - -Upstream-Status: Pending - -Signed-off-by: Andrei Dinu -Signed-off-by: Khem Raj ---- - sysdeps/arm/dl-machine.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h -index 90856779b1..a29bb86c56 100644 ---- a/sysdeps/arm/dl-machine.h -+++ b/sysdeps/arm/dl-machine.h -@@ -510,7 +510,7 @@ elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc, - - case R_ARM_TLS_DTPOFF32: - if (sym != NULL) -- *reloc_addr += sym->st_value; -+ *reloc_addr = sym->st_value; - break; - - case R_ARM_TLS_TPOFF32: diff --git a/poky/meta/recipes-core/glibc/glibc/0012-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch b/poky/meta/recipes-core/glibc/glibc/0012-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch new file mode 100644 index 000000000..ab07455ad --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0012-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch @@ -0,0 +1,36 @@ +From c8807899367e64d803585e7dd4b56a1085d5893b Mon Sep 17 00:00:00 2001 +From: Ting Liu +Date: Wed, 19 Dec 2012 04:39:57 -0600 +Subject: [PATCH 12/29] eglibc: run libm-err-tab.pl with specific dirs in ${S} + +libm-err-tab.pl will parse all the files named "libm-test-ulps" +in the given dir recursively. To avoid parsing the one in +${S}/.pc/ (it does exist after eglibc adds aarch64 support, +${S}/.pc/aarch64-0001-glibc-fsf-v1-eaf6f205.patch/ports/sysdeps/ +aarch64/libm-test-ulps), run libm-err-tab.pl with specific dirs +in ${S}. + +Upstream-Status: inappropriate [OE specific] + +Signed-off-by: Ting Liu +--- + manual/Makefile | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/manual/Makefile b/manual/Makefile +index c61e11dcd7..5d859e6f84 100644 +--- a/manual/Makefile ++++ b/manual/Makefile +@@ -103,7 +103,8 @@ $(objpfx)stamp-libm-err: $(..)math/gen-libm-test.py \ + $(wildcard $(foreach dir,$(sysdirs),\ + $(dir)/libm-test-ulps)) + pwd=`pwd`; \ +- $(PYTHON) $< -s $$pwd/.. -m $(objpfx)libm-err-tmp ++ $(PYTHON) $< -s $$pwd/../ports -m $(objpfx)libm-err-tmp ++ $(PYTHON) $< -s $$pwd/../sysdeps -m $(objpfx)libm-err-tmp + $(move-if-change) $(objpfx)libm-err-tmp $(objpfx)libm-err.texi + touch $@ + +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0013-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch b/poky/meta/recipes-core/glibc/glibc/0013-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch new file mode 100644 index 000000000..d875825ff --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0013-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch @@ -0,0 +1,61 @@ +From 86fea3409b89f9d8884053a519282f2f30d7ea87 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:24:46 +0000 +Subject: [PATCH 13/29] __ieee754_sqrt{,f} are now inline functions and call + out __slow versions + +Upstream-Status: Pending + +Signed-off-by: chunrong guo +Signed-off-by: Khem Raj +--- + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 6 +++--- + sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 4 ++-- + 2 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +index 1c34244bd8..7038a70b47 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c +@@ -41,10 +41,10 @@ static const float half = 0.5; + + #ifdef __STDC__ + double +-__ieee754_sqrt (double b) ++__slow_ieee754_sqrt (double b) + #else + double +-__ieee754_sqrt (b) ++__slow_ieee754_sqrt (b) + double b; + #endif + { +@@ -83,7 +83,7 @@ __ieee754_sqrt (b) + + /* Handle small numbers by scaling. */ + if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) +- return __ieee754_sqrt (b * two108) * twom54; ++ return __slow_ieee754_sqrt (b * two108) * twom54; + + #define FMADD(a_, c_, b_) \ + ({ double __r; \ +diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +index 812653558f..10de1f0cc3 100644 +--- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c ++++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c +@@ -39,10 +39,10 @@ static const float threehalf = 1.5; + + #ifdef __STDC__ + float +-__ieee754_sqrtf (float b) ++__slow_ieee754_sqrtf (float b) + #else + float +-__ieee754_sqrtf (b) ++__slow_ieee754_sqrtf (b) + float b; + #endif + { +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0013-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch b/poky/meta/recipes-core/glibc/glibc/0013-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch deleted file mode 100644 index 736de8cf0..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0013-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 21509735678990760d5ebf9d0c65efa4b52b838d Mon Sep 17 00:00:00 2001 -From: Ting Liu -Date: Wed, 19 Dec 2012 04:39:57 -0600 -Subject: [PATCH] eglibc: run libm-err-tab.pl with specific dirs in ${S} - -libm-err-tab.pl will parse all the files named "libm-test-ulps" -in the given dir recursively. To avoid parsing the one in -${S}/.pc/ (it does exist after eglibc adds aarch64 support, -${S}/.pc/aarch64-0001-glibc-fsf-v1-eaf6f205.patch/ports/sysdeps/ -aarch64/libm-test-ulps), run libm-err-tab.pl with specific dirs -in ${S}. - -Upstream-Status: inappropriate [OE specific] - -Signed-off-by: Ting Liu ---- - manual/Makefile | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/manual/Makefile b/manual/Makefile -index c61e11dcd7..5d859e6f84 100644 ---- a/manual/Makefile -+++ b/manual/Makefile -@@ -103,7 +103,8 @@ $(objpfx)stamp-libm-err: $(..)math/gen-libm-test.py \ - $(wildcard $(foreach dir,$(sysdirs),\ - $(dir)/libm-test-ulps)) - pwd=`pwd`; \ -- $(PYTHON) $< -s $$pwd/.. -m $(objpfx)libm-err-tmp -+ $(PYTHON) $< -s $$pwd/../ports -m $(objpfx)libm-err-tmp -+ $(PYTHON) $< -s $$pwd/../sysdeps -m $(objpfx)libm-err-tmp - $(move-if-change) $(objpfx)libm-err-tmp $(objpfx)libm-err.texi - touch $@ - diff --git a/poky/meta/recipes-core/glibc/glibc/0014-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch b/poky/meta/recipes-core/glibc/glibc/0014-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch deleted file mode 100644 index e73b640c1..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0014-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch +++ /dev/null @@ -1,58 +0,0 @@ -From e33deb119734ef443ef44c42a00a569f90e1e149 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:24:46 +0000 -Subject: [PATCH] __ieee754_sqrt{,f} are now inline functions and call out - __slow versions - -Upstream-Status: Pending - -Signed-off-by: chunrong guo -Signed-off-by: Khem Raj ---- - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c | 6 +++--- - sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c | 4 ++-- - 2 files changed, 5 insertions(+), 5 deletions(-) - -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -index 1c34244bd8..7038a70b47 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrt.c -@@ -41,10 +41,10 @@ static const float half = 0.5; - - #ifdef __STDC__ - double --__ieee754_sqrt (double b) -+__slow_ieee754_sqrt (double b) - #else - double --__ieee754_sqrt (b) -+__slow_ieee754_sqrt (b) - double b; - #endif - { -@@ -83,7 +83,7 @@ __ieee754_sqrt (b) - - /* Handle small numbers by scaling. */ - if (__builtin_expect ((u.parts.msw & 0x7ff00000) <= 0x02000000, 0)) -- return __ieee754_sqrt (b * two108) * twom54; -+ return __slow_ieee754_sqrt (b * two108) * twom54; - - #define FMADD(a_, c_, b_) \ - ({ double __r; \ -diff --git a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -index 812653558f..10de1f0cc3 100644 ---- a/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -+++ b/sysdeps/powerpc/powerpc32/e6500/fpu/e_sqrtf.c -@@ -39,10 +39,10 @@ static const float threehalf = 1.5; - - #ifdef __STDC__ - float --__ieee754_sqrtf (float b) -+__slow_ieee754_sqrtf (float b) - #else - float --__ieee754_sqrtf (b) -+__slow_ieee754_sqrtf (b) - float b; - #endif - { diff --git a/poky/meta/recipes-core/glibc/glibc/0014-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch b/poky/meta/recipes-core/glibc/glibc/0014-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch new file mode 100644 index 000000000..954534bae --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0014-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch @@ -0,0 +1,42 @@ +From a3c4f67fb3cb02855073a9cdbcf2881fb53144f0 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:27:10 +0000 +Subject: [PATCH 14/29] sysdeps/gnu/configure.ac: handle correctly + $libc_cv_rootsbindir + +Upstream-Status:Pending + +Signed-off-by: Matthieu Crapet +Signed-off-by: Khem Raj +--- + sysdeps/gnu/configure | 2 +- + sysdeps/gnu/configure.ac | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/sysdeps/gnu/configure b/sysdeps/gnu/configure +index c15d1087e8..37cc983f2a 100644 +--- a/sysdeps/gnu/configure ++++ b/sysdeps/gnu/configure +@@ -32,6 +32,6 @@ case "$prefix" in + else + libc_cv_localstatedir=$localstatedir + fi +- libc_cv_rootsbindir=/sbin ++ test -n "$libc_cv_rootsbindir" || libc_cv_rootsbindir=/sbin + ;; + esac +diff --git a/sysdeps/gnu/configure.ac b/sysdeps/gnu/configure.ac +index 634fe4de2a..3db1697f4f 100644 +--- a/sysdeps/gnu/configure.ac ++++ b/sysdeps/gnu/configure.ac +@@ -21,6 +21,6 @@ case "$prefix" in + else + libc_cv_localstatedir=$localstatedir + fi +- libc_cv_rootsbindir=/sbin ++ test -n "$libc_cv_rootsbindir" || libc_cv_rootsbindir=/sbin + ;; + esac +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0015-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch b/poky/meta/recipes-core/glibc/glibc/0015-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch deleted file mode 100644 index 0cdd0567d..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0015-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch +++ /dev/null @@ -1,39 +0,0 @@ -From c50cae36e90c41849301a9a668adf31e81e43a07 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:27:10 +0000 -Subject: [PATCH] sysdeps/gnu/configure.ac: handle correctly - $libc_cv_rootsbindir - -Upstream-Status:Pending - -Signed-off-by: Matthieu Crapet -Signed-off-by: Khem Raj ---- - sysdeps/gnu/configure | 2 +- - sysdeps/gnu/configure.ac | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/sysdeps/gnu/configure b/sysdeps/gnu/configure -index c15d1087e8..37cc983f2a 100644 ---- a/sysdeps/gnu/configure -+++ b/sysdeps/gnu/configure -@@ -32,6 +32,6 @@ case "$prefix" in - else - libc_cv_localstatedir=$localstatedir - fi -- libc_cv_rootsbindir=/sbin -+ test -n "$libc_cv_rootsbindir" || libc_cv_rootsbindir=/sbin - ;; - esac -diff --git a/sysdeps/gnu/configure.ac b/sysdeps/gnu/configure.ac -index 634fe4de2a..3db1697f4f 100644 ---- a/sysdeps/gnu/configure.ac -+++ b/sysdeps/gnu/configure.ac -@@ -21,6 +21,6 @@ case "$prefix" in - else - libc_cv_localstatedir=$localstatedir - fi -- libc_cv_rootsbindir=/sbin -+ test -n "$libc_cv_rootsbindir" || libc_cv_rootsbindir=/sbin - ;; - esac diff --git a/poky/meta/recipes-core/glibc/glibc/0015-yes-within-the-path-sets-wrong-config-variables.patch b/poky/meta/recipes-core/glibc/glibc/0015-yes-within-the-path-sets-wrong-config-variables.patch new file mode 100644 index 000000000..04a9bf01c --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0015-yes-within-the-path-sets-wrong-config-variables.patch @@ -0,0 +1,263 @@ +From 17a602b89cbe53a5a92d0153ccb013a737f028cb Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:31:06 +0000 +Subject: [PATCH 15/29] 'yes' within the path sets wrong config variables + +It seems that the 'AC_EGREP_CPP(yes...' example is quite popular +but being such a short word to grep it is likely to produce +false-positive matches with the path it is configured into. + +The change is to use a more elaborated string to grep for. + +Upstream-Status: Submitted [libc-alpha@sourceware.org] + +Signed-off-by: Benjamin Esquivel +Signed-off-by: Khem Raj +--- + sysdeps/aarch64/configure | 4 ++-- + sysdeps/aarch64/configure.ac | 4 ++-- + sysdeps/arm/configure | 4 ++-- + sysdeps/arm/configure.ac | 4 ++-- + sysdeps/mips/configure | 4 ++-- + sysdeps/mips/configure.ac | 4 ++-- + sysdeps/nios2/configure | 4 ++-- + sysdeps/nios2/configure.ac | 4 ++-- + sysdeps/unix/sysv/linux/mips/configure | 4 ++-- + sysdeps/unix/sysv/linux/mips/configure.ac | 4 ++-- + sysdeps/unix/sysv/linux/powerpc/powerpc64/configure | 8 ++++---- + sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac | 8 ++++---- + 12 files changed, 28 insertions(+), 28 deletions(-) + +diff --git a/sysdeps/aarch64/configure b/sysdeps/aarch64/configure +index ac3cf6fd36..32add94df9 100644 +--- a/sysdeps/aarch64/configure ++++ b/sysdeps/aarch64/configure +@@ -148,12 +148,12 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #ifdef __AARCH64EB__ +- yes ++ is_aarch64_be + #endif + + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "is_aarch64_be" >/dev/null 2>&1; then : + libc_cv_aarch64_be=yes + else + libc_cv_aarch64_be=no +diff --git a/sysdeps/aarch64/configure.ac b/sysdeps/aarch64/configure.ac +index 8b042d6d05..3cdd262951 100644 +--- a/sysdeps/aarch64/configure.ac ++++ b/sysdeps/aarch64/configure.ac +@@ -10,8 +10,8 @@ GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. + # the dynamic linker via %ifdef. + AC_CACHE_CHECK([for big endian], + [libc_cv_aarch64_be], +- [AC_EGREP_CPP(yes,[#ifdef __AARCH64EB__ +- yes ++ [AC_EGREP_CPP(is_aarch64_be,[#ifdef __AARCH64EB__ ++ is_aarch64_be + #endif + ], libc_cv_aarch64_be=yes, libc_cv_aarch64_be=no)]) + if test $libc_cv_aarch64_be = yes; then +diff --git a/sysdeps/arm/configure b/sysdeps/arm/configure +index 431e843b2b..e152461138 100644 +--- a/sysdeps/arm/configure ++++ b/sysdeps/arm/configure +@@ -151,12 +151,12 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #ifdef __ARM_PCS_VFP +- yes ++ use_arm_pcs_vfp + #endif + + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "use_arm_pcs_vfp" >/dev/null 2>&1; then : + libc_cv_arm_pcs_vfp=yes + else + libc_cv_arm_pcs_vfp=no +diff --git a/sysdeps/arm/configure.ac b/sysdeps/arm/configure.ac +index 90cdd69c75..05a262ba00 100644 +--- a/sysdeps/arm/configure.ac ++++ b/sysdeps/arm/configure.ac +@@ -15,8 +15,8 @@ AC_DEFINE(PI_STATIC_AND_HIDDEN) + # the dynamic linker via %ifdef. + AC_CACHE_CHECK([whether the compiler is using the ARM hard-float ABI], + [libc_cv_arm_pcs_vfp], +- [AC_EGREP_CPP(yes,[#ifdef __ARM_PCS_VFP +- yes ++ [AC_EGREP_CPP(use_arm_pcs_vfp,[#ifdef __ARM_PCS_VFP ++ use_arm_pcs_vfp + #endif + ], libc_cv_arm_pcs_vfp=yes, libc_cv_arm_pcs_vfp=no)]) + if test $libc_cv_arm_pcs_vfp = yes; then +diff --git a/sysdeps/mips/configure b/sysdeps/mips/configure +index 4e13248c03..f14af952d0 100644 +--- a/sysdeps/mips/configure ++++ b/sysdeps/mips/configure +@@ -143,11 +143,11 @@ else + /* end confdefs.h. */ + dnl + #ifdef __mips_nan2008 +-yes ++use_mips_nan2008 + #endif + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "use_mips_nan2008" >/dev/null 2>&1; then : + libc_cv_mips_nan2008=yes + else + libc_cv_mips_nan2008=no +diff --git a/sysdeps/mips/configure.ac b/sysdeps/mips/configure.ac +index bcbdaffd9f..ad3057f4cc 100644 +--- a/sysdeps/mips/configure.ac ++++ b/sysdeps/mips/configure.ac +@@ -6,9 +6,9 @@ dnl position independent way. + dnl AC_DEFINE(PI_STATIC_AND_HIDDEN) + + AC_CACHE_CHECK([whether the compiler is using the 2008 NaN encoding], +- libc_cv_mips_nan2008, [AC_EGREP_CPP(yes, [dnl ++ libc_cv_mips_nan2008, [AC_EGREP_CPP(use_mips_nan2008, [dnl + #ifdef __mips_nan2008 +-yes ++use_mips_nan2008 + #endif], libc_cv_mips_nan2008=yes, libc_cv_mips_nan2008=no)]) + if test x$libc_cv_mips_nan2008 = xyes; then + AC_DEFINE(HAVE_MIPS_NAN2008) +diff --git a/sysdeps/nios2/configure b/sysdeps/nios2/configure +index 14c8a3a014..dde3814ef2 100644 +--- a/sysdeps/nios2/configure ++++ b/sysdeps/nios2/configure +@@ -142,12 +142,12 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #ifdef __nios2_big_endian__ +- yes ++ is_nios2_be + #endif + + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "is_nios2_be" >/dev/null 2>&1; then : + libc_cv_nios2_be=yes + else + libc_cv_nios2_be=no +diff --git a/sysdeps/nios2/configure.ac b/sysdeps/nios2/configure.ac +index f05f43802b..dc8639902d 100644 +--- a/sysdeps/nios2/configure.ac ++++ b/sysdeps/nios2/configure.ac +@@ -4,8 +4,8 @@ GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. + # Nios II big endian is not yet supported. + AC_CACHE_CHECK([for big endian], + [libc_cv_nios2_be], +- [AC_EGREP_CPP(yes,[#ifdef __nios2_big_endian__ +- yes ++ [AC_EGREP_CPP(is_nios2_be,[#ifdef __nios2_big_endian__ ++ is_nios2_be + #endif + ], libc_cv_nios2_be=yes, libc_cv_nios2_be=no)]) + if test $libc_cv_nios2_be = yes; then +diff --git a/sysdeps/unix/sysv/linux/mips/configure b/sysdeps/unix/sysv/linux/mips/configure +index 25f98e0c7b..e95bfae359 100644 +--- a/sysdeps/unix/sysv/linux/mips/configure ++++ b/sysdeps/unix/sysv/linux/mips/configure +@@ -414,11 +414,11 @@ else + /* end confdefs.h. */ + dnl + #ifdef __mips_nan2008 +-yes ++use_mips_nan2008 + #endif + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "use_mips_nan2008" >/dev/null 2>&1; then : + libc_cv_mips_nan2008=yes + else + libc_cv_mips_nan2008=no +diff --git a/sysdeps/unix/sysv/linux/mips/configure.ac b/sysdeps/unix/sysv/linux/mips/configure.ac +index 3db1b32b08..f8cd375ebc 100644 +--- a/sysdeps/unix/sysv/linux/mips/configure.ac ++++ b/sysdeps/unix/sysv/linux/mips/configure.ac +@@ -105,9 +105,9 @@ AC_COMPILE_IFELSE( + LIBC_CONFIG_VAR([mips-mode-switch],[${libc_mips_mode_switch}]) + + AC_CACHE_CHECK([whether the compiler is using the 2008 NaN encoding], +- libc_cv_mips_nan2008, [AC_EGREP_CPP(yes, [dnl ++ libc_cv_mips_nan2008, [AC_EGREP_CPP(use_mips_nan2008, [dnl + #ifdef __mips_nan2008 +-yes ++use_mips_nan2008 + #endif], libc_cv_mips_nan2008=yes, libc_cv_mips_nan2008=no)]) + + libc_mips_nan= +diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure +index ae7f254da4..874519000b 100644 +--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure ++++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure +@@ -155,12 +155,12 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #if _CALL_ELF == 2 +- yes ++ use_ppc_elfv2_abi + #endif + + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "use_ppc_elfv2_abi" >/dev/null 2>&1; then : + libc_cv_ppc64_elfv2_abi=yes + else + libc_cv_ppc64_elfv2_abi=no +@@ -188,12 +188,12 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + #ifdef _CALL_ELF +- yes ++ is_def_call_elf + #endif + + _ACEOF + if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | +- $EGREP "yes" >/dev/null 2>&1; then : ++ $EGREP "is_def_call_elf" >/dev/null 2>&1; then : + libc_cv_ppc64_def_call_elf=yes + else + libc_cv_ppc64_def_call_elf=no +diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac +index f9cba6e15d..b21f72f1e4 100644 +--- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac ++++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac +@@ -6,8 +6,8 @@ LIBC_SLIBDIR_RTLDDIR([lib64], [lib64]) + # Define default-abi according to compiler flags. + AC_CACHE_CHECK([whether the compiler is using the PowerPC64 ELFv2 ABI], + [libc_cv_ppc64_elfv2_abi], +- [AC_EGREP_CPP(yes,[#if _CALL_ELF == 2 +- yes ++ [AC_EGREP_CPP(use_ppc_elfv2_abi,[#if _CALL_ELF == 2 ++ use_ppc_elfv2_abi + #endif + ], libc_cv_ppc64_elfv2_abi=yes, libc_cv_ppc64_elfv2_abi=no)]) + if test $libc_cv_ppc64_elfv2_abi = yes; then +@@ -19,8 +19,8 @@ else + # Compiler that do not support ELFv2 ABI does not define _CALL_ELF + AC_CACHE_CHECK([whether the compiler defines _CALL_ELF], + [libc_cv_ppc64_def_call_elf], +- [AC_EGREP_CPP(yes,[#ifdef _CALL_ELF +- yes ++ [AC_EGREP_CPP(is_def_call_elf,[#ifdef _CALL_ELF ++ is_def_call_elf + #endif + ], libc_cv_ppc64_def_call_elf=yes, libc_cv_ppc64_def_call_elf=no)]) + if test $libc_cv_ppc64_def_call_elf = no; then +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0016-Add-unused-attribute.patch b/poky/meta/recipes-core/glibc/glibc/0016-Add-unused-attribute.patch deleted file mode 100644 index 574e7c350..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0016-Add-unused-attribute.patch +++ /dev/null @@ -1,31 +0,0 @@ -From c323125744020a29f79e50dc4d024b55c482eafc Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:28:41 +0000 -Subject: [PATCH] Add unused attribute - -Helps in avoiding gcc warning when header is is included in -a source file which does not use both functions - - * iconv/gconv_charset.h (strip): - Add unused attribute. - -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - iconv/gconv_charset.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/iconv/gconv_charset.h b/iconv/gconv_charset.h -index 348acc089b..fa92465d89 100644 ---- a/iconv/gconv_charset.h -+++ b/iconv/gconv_charset.h -@@ -21,7 +21,7 @@ - #include - - --static void -+static void __attribute__ ((unused)) - strip (char *wp, const char *s) - { - int slash_count = 0; diff --git a/poky/meta/recipes-core/glibc/glibc/0016-timezone-re-written-tzselect-as-posix-sh.patch b/poky/meta/recipes-core/glibc/glibc/0016-timezone-re-written-tzselect-as-posix-sh.patch new file mode 100644 index 000000000..cd072018e --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0016-timezone-re-written-tzselect-as-posix-sh.patch @@ -0,0 +1,45 @@ +From 4762386b599f5c3287310a69ad3555e0129e0c51 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:33:03 +0000 +Subject: [PATCH 16/29] timezone: re-written tzselect as posix sh + +To avoid the bash dependency. + +Upstream-Status: Pending + +Signed-off-by: Hongxu Jia +Signed-off-by: Khem Raj +--- + timezone/Makefile | 2 +- + timezone/tzselect.ksh | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/timezone/Makefile b/timezone/Makefile +index 75f38df527..74e1ab7ff7 100644 +--- a/timezone/Makefile ++++ b/timezone/Makefile +@@ -122,7 +122,7 @@ $(testdata)/XT%: testdata/XT% + cp $< $@ + + $(objpfx)tzselect: tzselect.ksh $(common-objpfx)config.make +- sed -e 's|/bin/bash|$(BASH)|' \ ++ sed -e 's|/bin/bash|/bin/sh|' \ + -e 's|TZDIR=[^}]*|TZDIR=$(zonedir)|' \ + -e '/TZVERSION=/s|see_Makefile|"$(version)"|' \ + -e '/PKGVERSION=/s|=.*|="$(PKGVERSION)"|' \ +diff --git a/timezone/tzselect.ksh b/timezone/tzselect.ksh +index 18fce27e24..70745f9d36 100755 +--- a/timezone/tzselect.ksh ++++ b/timezone/tzselect.ksh +@@ -34,7 +34,7 @@ REPORT_BUGS_TO=tz@iana.org + + # Specify default values for environment variables if they are unset. + : ${AWK=awk} +-: ${TZDIR=`pwd`} ++: ${TZDIR=$(pwd)} + + # Output one argument as-is to standard output. + # Safer than 'echo', which can mishandle '\' or leading '-'. +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0017-Remove-bash-dependency-for-nscd-init-script.patch b/poky/meta/recipes-core/glibc/glibc/0017-Remove-bash-dependency-for-nscd-init-script.patch new file mode 100644 index 000000000..57907fe66 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0017-Remove-bash-dependency-for-nscd-init-script.patch @@ -0,0 +1,75 @@ +From dac46c07736a799fc82be03aa546b2d24c19ad78 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Thu, 31 Dec 2015 14:33:02 -0800 +Subject: [PATCH 17/29] Remove bash dependency for nscd init script + +The nscd init script uses #! /bin/bash but only really uses one bashism +(translated strings), so remove them and switch the shell to #!/bin/sh. + +Upstream-Status: Pending + +Signed-off-by: Ross Burton +Signed-off-by: Khem Raj +--- + nscd/nscd.init | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/nscd/nscd.init b/nscd/nscd.init +index a882da7d8b..b02986ec15 100644 +--- a/nscd/nscd.init ++++ b/nscd/nscd.init +@@ -1,4 +1,4 @@ +-#!/bin/bash ++#!/bin/sh + # + # nscd: Starts the Name Switch Cache Daemon + # +@@ -49,7 +49,7 @@ prog=nscd + start () { + [ -d /var/run/nscd ] || mkdir /var/run/nscd + [ -d /var/db/nscd ] || mkdir /var/db/nscd +- echo -n $"Starting $prog: " ++ echo -n "Starting $prog: " + daemon /usr/sbin/nscd + RETVAL=$? + echo +@@ -58,7 +58,7 @@ start () { + } + + stop () { +- echo -n $"Stopping $prog: " ++ echo -n "Stopping $prog: " + /usr/sbin/nscd -K + RETVAL=$? + if [ $RETVAL -eq 0 ]; then +@@ -67,9 +67,9 @@ stop () { + # a non-privileged user + rm -f /var/run/nscd/nscd.pid + rm -f /var/run/nscd/socket +- success $"$prog shutdown" ++ success "$prog shutdown" + else +- failure $"$prog shutdown" ++ failure "$prog shutdown" + fi + echo + return $RETVAL +@@ -103,13 +103,13 @@ case "$1" in + RETVAL=$? + ;; + force-reload | reload) +- echo -n $"Reloading $prog: " ++ echo -n "Reloading $prog: " + killproc /usr/sbin/nscd -HUP + RETVAL=$? + echo + ;; + *) +- echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}" ++ echo "Usage: $0 {start|stop|status|restart|reload|condrestart}" + RETVAL=1 + ;; + esac +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0017-yes-within-the-path-sets-wrong-config-variables.patch b/poky/meta/recipes-core/glibc/glibc/0017-yes-within-the-path-sets-wrong-config-variables.patch deleted file mode 100644 index 49089af41..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0017-yes-within-the-path-sets-wrong-config-variables.patch +++ /dev/null @@ -1,260 +0,0 @@ -From c421cd7e885497a99179b982dc4a27e8405f8857 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:31:06 +0000 -Subject: [PATCH] 'yes' within the path sets wrong config variables - -It seems that the 'AC_EGREP_CPP(yes...' example is quite popular -but being such a short word to grep it is likely to produce -false-positive matches with the path it is configured into. - -The change is to use a more elaborated string to grep for. - -Upstream-Status: Submitted [libc-alpha@sourceware.org] - -Signed-off-by: Benjamin Esquivel -Signed-off-by: Khem Raj ---- - sysdeps/aarch64/configure | 4 ++-- - sysdeps/aarch64/configure.ac | 4 ++-- - sysdeps/arm/configure | 4 ++-- - sysdeps/arm/configure.ac | 4 ++-- - sysdeps/mips/configure | 4 ++-- - sysdeps/mips/configure.ac | 4 ++-- - sysdeps/nios2/configure | 4 ++-- - sysdeps/nios2/configure.ac | 4 ++-- - sysdeps/unix/sysv/linux/mips/configure | 4 ++-- - sysdeps/unix/sysv/linux/mips/configure.ac | 4 ++-- - sysdeps/unix/sysv/linux/powerpc/powerpc64/configure | 8 ++++---- - sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac | 8 ++++---- - 12 files changed, 28 insertions(+), 28 deletions(-) - -diff --git a/sysdeps/aarch64/configure b/sysdeps/aarch64/configure -index 5bd355a691..3bc5537bc0 100644 ---- a/sysdeps/aarch64/configure -+++ b/sysdeps/aarch64/configure -@@ -148,12 +148,12 @@ else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - #ifdef __AARCH64EB__ -- yes -+ is_aarch64_be - #endif - - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "is_aarch64_be" >/dev/null 2>&1; then : - libc_cv_aarch64_be=yes - else - libc_cv_aarch64_be=no -diff --git a/sysdeps/aarch64/configure.ac b/sysdeps/aarch64/configure.ac -index 7851dd4dac..6e9238171f 100644 ---- a/sysdeps/aarch64/configure.ac -+++ b/sysdeps/aarch64/configure.ac -@@ -10,8 +10,8 @@ GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. - # the dynamic linker via %ifdef. - AC_CACHE_CHECK([for big endian], - [libc_cv_aarch64_be], -- [AC_EGREP_CPP(yes,[#ifdef __AARCH64EB__ -- yes -+ [AC_EGREP_CPP(is_aarch64_be,[#ifdef __AARCH64EB__ -+ is_aarch64_be - #endif - ], libc_cv_aarch64_be=yes, libc_cv_aarch64_be=no)]) - if test $libc_cv_aarch64_be = yes; then -diff --git a/sysdeps/arm/configure b/sysdeps/arm/configure -index 431e843b2b..e152461138 100644 ---- a/sysdeps/arm/configure -+++ b/sysdeps/arm/configure -@@ -151,12 +151,12 @@ else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - #ifdef __ARM_PCS_VFP -- yes -+ use_arm_pcs_vfp - #endif - - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "use_arm_pcs_vfp" >/dev/null 2>&1; then : - libc_cv_arm_pcs_vfp=yes - else - libc_cv_arm_pcs_vfp=no -diff --git a/sysdeps/arm/configure.ac b/sysdeps/arm/configure.ac -index 90cdd69c75..05a262ba00 100644 ---- a/sysdeps/arm/configure.ac -+++ b/sysdeps/arm/configure.ac -@@ -15,8 +15,8 @@ AC_DEFINE(PI_STATIC_AND_HIDDEN) - # the dynamic linker via %ifdef. - AC_CACHE_CHECK([whether the compiler is using the ARM hard-float ABI], - [libc_cv_arm_pcs_vfp], -- [AC_EGREP_CPP(yes,[#ifdef __ARM_PCS_VFP -- yes -+ [AC_EGREP_CPP(use_arm_pcs_vfp,[#ifdef __ARM_PCS_VFP -+ use_arm_pcs_vfp - #endif - ], libc_cv_arm_pcs_vfp=yes, libc_cv_arm_pcs_vfp=no)]) - if test $libc_cv_arm_pcs_vfp = yes; then -diff --git a/sysdeps/mips/configure b/sysdeps/mips/configure -index 4e13248c03..f14af952d0 100644 ---- a/sysdeps/mips/configure -+++ b/sysdeps/mips/configure -@@ -143,11 +143,11 @@ else - /* end confdefs.h. */ - dnl - #ifdef __mips_nan2008 --yes -+use_mips_nan2008 - #endif - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "use_mips_nan2008" >/dev/null 2>&1; then : - libc_cv_mips_nan2008=yes - else - libc_cv_mips_nan2008=no -diff --git a/sysdeps/mips/configure.ac b/sysdeps/mips/configure.ac -index bcbdaffd9f..ad3057f4cc 100644 ---- a/sysdeps/mips/configure.ac -+++ b/sysdeps/mips/configure.ac -@@ -6,9 +6,9 @@ dnl position independent way. - dnl AC_DEFINE(PI_STATIC_AND_HIDDEN) - - AC_CACHE_CHECK([whether the compiler is using the 2008 NaN encoding], -- libc_cv_mips_nan2008, [AC_EGREP_CPP(yes, [dnl -+ libc_cv_mips_nan2008, [AC_EGREP_CPP(use_mips_nan2008, [dnl - #ifdef __mips_nan2008 --yes -+use_mips_nan2008 - #endif], libc_cv_mips_nan2008=yes, libc_cv_mips_nan2008=no)]) - if test x$libc_cv_mips_nan2008 = xyes; then - AC_DEFINE(HAVE_MIPS_NAN2008) -diff --git a/sysdeps/nios2/configure b/sysdeps/nios2/configure -index 14c8a3a014..dde3814ef2 100644 ---- a/sysdeps/nios2/configure -+++ b/sysdeps/nios2/configure -@@ -142,12 +142,12 @@ else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - #ifdef __nios2_big_endian__ -- yes -+ is_nios2_be - #endif - - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "is_nios2_be" >/dev/null 2>&1; then : - libc_cv_nios2_be=yes - else - libc_cv_nios2_be=no -diff --git a/sysdeps/nios2/configure.ac b/sysdeps/nios2/configure.ac -index f05f43802b..dc8639902d 100644 ---- a/sysdeps/nios2/configure.ac -+++ b/sysdeps/nios2/configure.ac -@@ -4,8 +4,8 @@ GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. - # Nios II big endian is not yet supported. - AC_CACHE_CHECK([for big endian], - [libc_cv_nios2_be], -- [AC_EGREP_CPP(yes,[#ifdef __nios2_big_endian__ -- yes -+ [AC_EGREP_CPP(is_nios2_be,[#ifdef __nios2_big_endian__ -+ is_nios2_be - #endif - ], libc_cv_nios2_be=yes, libc_cv_nios2_be=no)]) - if test $libc_cv_nios2_be = yes; then -diff --git a/sysdeps/unix/sysv/linux/mips/configure b/sysdeps/unix/sysv/linux/mips/configure -index 25f98e0c7b..e95bfae359 100644 ---- a/sysdeps/unix/sysv/linux/mips/configure -+++ b/sysdeps/unix/sysv/linux/mips/configure -@@ -414,11 +414,11 @@ else - /* end confdefs.h. */ - dnl - #ifdef __mips_nan2008 --yes -+use_mips_nan2008 - #endif - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "use_mips_nan2008" >/dev/null 2>&1; then : - libc_cv_mips_nan2008=yes - else - libc_cv_mips_nan2008=no -diff --git a/sysdeps/unix/sysv/linux/mips/configure.ac b/sysdeps/unix/sysv/linux/mips/configure.ac -index 3db1b32b08..f8cd375ebc 100644 ---- a/sysdeps/unix/sysv/linux/mips/configure.ac -+++ b/sysdeps/unix/sysv/linux/mips/configure.ac -@@ -105,9 +105,9 @@ AC_COMPILE_IFELSE( - LIBC_CONFIG_VAR([mips-mode-switch],[${libc_mips_mode_switch}]) - - AC_CACHE_CHECK([whether the compiler is using the 2008 NaN encoding], -- libc_cv_mips_nan2008, [AC_EGREP_CPP(yes, [dnl -+ libc_cv_mips_nan2008, [AC_EGREP_CPP(use_mips_nan2008, [dnl - #ifdef __mips_nan2008 --yes -+use_mips_nan2008 - #endif], libc_cv_mips_nan2008=yes, libc_cv_mips_nan2008=no)]) - - libc_mips_nan= -diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure -index ae7f254da4..874519000b 100644 ---- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure -+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure -@@ -155,12 +155,12 @@ else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - #if _CALL_ELF == 2 -- yes -+ use_ppc_elfv2_abi - #endif - - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "use_ppc_elfv2_abi" >/dev/null 2>&1; then : - libc_cv_ppc64_elfv2_abi=yes - else - libc_cv_ppc64_elfv2_abi=no -@@ -188,12 +188,12 @@ else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - #ifdef _CALL_ELF -- yes -+ is_def_call_elf - #endif - - _ACEOF - if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | -- $EGREP "yes" >/dev/null 2>&1; then : -+ $EGREP "is_def_call_elf" >/dev/null 2>&1; then : - libc_cv_ppc64_def_call_elf=yes - else - libc_cv_ppc64_def_call_elf=no -diff --git a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac -index f9cba6e15d..b21f72f1e4 100644 ---- a/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac -+++ b/sysdeps/unix/sysv/linux/powerpc/powerpc64/configure.ac -@@ -6,8 +6,8 @@ LIBC_SLIBDIR_RTLDDIR([lib64], [lib64]) - # Define default-abi according to compiler flags. - AC_CACHE_CHECK([whether the compiler is using the PowerPC64 ELFv2 ABI], - [libc_cv_ppc64_elfv2_abi], -- [AC_EGREP_CPP(yes,[#if _CALL_ELF == 2 -- yes -+ [AC_EGREP_CPP(use_ppc_elfv2_abi,[#if _CALL_ELF == 2 -+ use_ppc_elfv2_abi - #endif - ], libc_cv_ppc64_elfv2_abi=yes, libc_cv_ppc64_elfv2_abi=no)]) - if test $libc_cv_ppc64_elfv2_abi = yes; then -@@ -19,8 +19,8 @@ else - # Compiler that do not support ELFv2 ABI does not define _CALL_ELF - AC_CACHE_CHECK([whether the compiler defines _CALL_ELF], - [libc_cv_ppc64_def_call_elf], -- [AC_EGREP_CPP(yes,[#ifdef _CALL_ELF -- yes -+ [AC_EGREP_CPP(is_def_call_elf,[#ifdef _CALL_ELF -+ is_def_call_elf - #endif - ], libc_cv_ppc64_def_call_elf=yes, libc_cv_ppc64_def_call_elf=no)]) - if test $libc_cv_ppc64_def_call_elf = no; then diff --git a/poky/meta/recipes-core/glibc/glibc/0018-eglibc-Cross-building-and-testing-instructions.patch b/poky/meta/recipes-core/glibc/glibc/0018-eglibc-Cross-building-and-testing-instructions.patch new file mode 100644 index 000000000..58d41ffe0 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0018-eglibc-Cross-building-and-testing-instructions.patch @@ -0,0 +1,619 @@ +From 0bd1dedf77194151397c53b12e0355c2edb8bccc Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:42:58 +0000 +Subject: [PATCH 18/29] eglibc: Cross building and testing instructions + +Ported from eglibc +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + GLIBC.cross-building | 383 +++++++++++++++++++++++++++++++++++++++++++ + GLIBC.cross-testing | 205 +++++++++++++++++++++++ + 2 files changed, 588 insertions(+) + create mode 100644 GLIBC.cross-building + create mode 100644 GLIBC.cross-testing + +diff --git a/GLIBC.cross-building b/GLIBC.cross-building +new file mode 100644 +index 0000000000..e6e0da1aaf +--- /dev/null ++++ b/GLIBC.cross-building +@@ -0,0 +1,383 @@ ++ -*- mode: text -*- ++ ++ Cross-Compiling GLIBC ++ Jim Blandy ++ ++ ++Introduction ++ ++Most GNU tools have a simple build procedure: you run their ++'configure' script, and then you run 'make'. Unfortunately, the ++process of cross-compiling the GNU C library is quite a bit more ++involved: ++ ++1) Build a cross-compiler, with certain facilities disabled. ++ ++2) Configure the C library using the compiler you built in step 1). ++ Build a few of the C run-time object files, but not the rest of the ++ library. Install the library's header files and the run-time ++ object files, and create a dummy libc.so. ++ ++3) Build a second cross-compiler, using the header files and object ++ files you installed in step 2. ++ ++4) Configure, build, and install a fresh C library, using the compiler ++ built in step 3. ++ ++5) Build a third cross-compiler, based on the C library built in step 4. ++ ++The reason for this complexity is that, although GCC and the GNU C ++library are distributed separately, they are not actually independent ++of each other: GCC requires the C library's headers and some object ++files to compile its own libraries, while the C library depends on ++GCC's libraries. GLIBC includes features and bug fixes to the stock ++GNU C library that simplify this process, but the fundamental ++interdependency stands. ++ ++In this document, we explain how to cross-compile an GLIBC/GCC pair ++from source. Our intended audience is developers who are already ++familiar with the GNU toolchain and comfortable working with ++cross-development tools. While we do present a worked example to ++accompany the explanation, for clarity's sake we do not cover many of ++the options available to cross-toolchain users. ++ ++ ++Preparation ++ ++GLIBC requires recent versions of the GNU binutils, GCC, and the ++Linux kernel. The web page ++documents the current requirements, and lists patches needed for ++certain target architectures. As of this writing, these build ++instructions have been tested with binutils 2.22.51, GCC 4.6.2, ++and Linux 3.1. ++ ++First, let's set some variables, to simplify later commands. We'll ++build GLIBC and GCC for an ARM target, known to the Linux kernel ++as 'arm', and we'll do the build on an Intel x86_64 Linux box: ++ ++ $ build=x86_64-pc-linux-gnu ++ $ host=$build ++ $ target=arm-none-linux-gnueabi ++ $ linux_arch=arm ++ ++We're using the aforementioned versions of Binutils, GCC, and Linux: ++ ++ $ binutilsv=binutils-2.22.51 ++ $ gccv=gcc-4.6.2 ++ $ linuxv=linux-3.1 ++ ++We're carrying out the entire process under '~/cross-build', which ++contains unpacked source trees for binutils, gcc, and linux kernel, ++along with GLIBC svn trunk (which can be checked-out with ++'svn co http://www.eglibc.org/svn/trunk eglibc'): ++ ++ $ top=$HOME/cross-build/$target ++ $ src=$HOME/cross-build/src ++ $ ls $src ++ binutils-2.22.51 glibc gcc-4.6.2 linux-3.1 ++ ++We're going to place our build directories in a subdirectory 'obj', ++we'll install the cross-development toolchain in 'tools', and we'll ++place our sysroot (containing files to be installed on the target ++system) in 'sysroot': ++ ++ $ obj=$top/obj ++ $ tools=$top/tools ++ $ sysroot=$top/sysroot ++ ++ ++Binutils ++ ++Configuring and building binutils for the target is straightforward: ++ ++ $ mkdir -p $obj/binutils ++ $ cd $obj/binutils ++ $ $src/$binutilsv/configure \ ++ > --target=$target \ ++ > --prefix=$tools \ ++ > --with-sysroot=$sysroot ++ $ make ++ $ make install ++ ++ ++The First GCC ++ ++For our work, we need a cross-compiler targeting an ARM Linux ++system. However, that configuration includes the shared library ++'libgcc_s.so', which is compiled against the GLIBC headers (which we ++haven't installed yet) and linked against 'libc.so' (which we haven't ++built yet). ++ ++Fortunately, there are configuration options for GCC which tell it not ++to build 'libgcc_s.so'. The '--without-headers' option is supposed to ++take care of this, but its implementation is incomplete, so you must ++also configure with the '--with-newlib' option. While '--with-newlib' ++appears to mean "Use the Newlib C library", its effect is to tell the ++GCC build machinery, "Don't assume there is a C library available." ++ ++We also need to disable some of the libraries that would normally be ++built along with GCC, and specify that only the compiler for the C ++language is needed. ++ ++So, we create a build directory, configure, make, and install. ++ ++ $ mkdir -p $obj/gcc1 ++ $ cd $obj/gcc1 ++ $ $src/$gccv/configure \ ++ > --target=$target \ ++ > --prefix=$tools \ ++ > --without-headers --with-newlib \ ++ > --disable-shared --disable-threads --disable-libssp \ ++ > --disable-libgomp --disable-libmudflap --disable-libquadmath \ ++ > --disable-decimal-float --disable-libffi \ ++ > --enable-languages=c ++ $ PATH=$tools/bin:$PATH make ++ $ PATH=$tools/bin:$PATH make install ++ ++ ++Linux Kernel Headers ++ ++To configure GLIBC, we also need Linux kernel headers in place. ++Fortunately, the Linux makefiles have a target that installs them for ++us. Since the process does modify the source tree a bit, we make a ++copy first: ++ ++ $ cp -r $src/$linuxv $obj/linux ++ $ cd $obj/linux ++ ++Now we're ready to install the headers into the sysroot: ++ ++ $ PATH=$tools/bin:$PATH \ ++ > make headers_install \ ++ > ARCH=$linux_arch CROSS_COMPILE=$target- \ ++ > INSTALL_HDR_PATH=$sysroot/usr ++ ++ ++GLIBC Headers and Preliminary Objects ++ ++Using the cross-compiler we've just built, we can now configure GLIBC ++well enough to install the headers and build the object files that the ++full cross-compiler will need: ++ ++ $ mkdir -p $obj/glibc-headers ++ $ cd $obj/glibc-headers ++ $ BUILD_CC=gcc \ ++ > CC=$tools/bin/$target-gcc \ ++ > CXX=$tools/bin/$target-g++ \ ++ > AR=$tools/bin/$target-ar \ ++ > RANLIB=$tools/bin/$target-ranlib \ ++ > $src/glibc/libc/configure \ ++ > --prefix=/usr \ ++ > --with-headers=$sysroot/usr/include \ ++ > --build=$build \ ++ > --host=$target \ ++ > --disable-profile --without-gd --without-cvs \ ++ > --enable-add-ons=nptl,libidn,../ports ++ ++The option '--prefix=/usr' may look strange, but you should never ++configure GLIBC with a prefix other than '/usr': in various places, ++GLIBC's build system checks whether the prefix is '/usr', and does ++special handling only if that is the case. Unless you use this ++prefix, you will get a sysroot that does not use the standard Linux ++directory layouts and cannot be used as a basis for the root ++filesystem on your target system compatibly with normal GLIBC ++installations. ++ ++The '--with-headers' option tells GLIBC where the Linux headers have ++been installed. ++ ++The '--enable-add-ons=nptl,libidn,../ports' option tells GLIBC to look ++for the listed glibc add-ons. Most notably the ports add-on (located ++just above the libc sources in the GLIBC svn tree) is required to ++support ARM targets. ++ ++We can now use the 'install-headers' makefile target to install the ++headers: ++ ++ $ make install-headers install_root=$sysroot \ ++ > install-bootstrap-headers=yes ++ ++The 'install_root' variable indicates where the files should actually ++be installed; its value is treated as the parent of the '--prefix' ++directory we passed to the configure script, so the headers will go in ++'$sysroot/usr/include'. The 'install-bootstrap-headers' variable ++requests special handling for certain tricky header files. ++ ++Next, there are a few object files needed to link shared libraries, ++which we build and install by hand: ++ ++ $ mkdir -p $sysroot/usr/lib ++ $ make csu/subdir_lib ++ $ cp csu/crt1.o csu/crti.o csu/crtn.o $sysroot/usr/lib ++ ++Finally, 'libgcc_s.so' requires a 'libc.so' to link against. However, ++since we will never actually execute its code, it doesn't matter what ++it contains. So, treating '/dev/null' as a C source file, we produce ++a dummy 'libc.so' in one step: ++ ++ $ $tools/bin/$target-gcc -nostdlib -nostartfiles -shared -x c /dev/null \ ++ > -o $sysroot/usr/lib/libc.so ++ ++ ++The Second GCC ++ ++With the GLIBC headers and selected object files installed, we can ++now build a GCC that is capable of compiling GLIBC. We configure, ++build, and install the second GCC, again building only the C compiler, ++and avoiding libraries we won't use: ++ ++ $ mkdir -p $obj/gcc2 ++ $ cd $obj/gcc2 ++ $ $src/$gccv/configure \ ++ > --target=$target \ ++ > --prefix=$tools \ ++ > --with-sysroot=$sysroot \ ++ > --disable-libssp --disable-libgomp --disable-libmudflap \ ++ > --disable-libffi --disable-libquadmath \ ++ > --enable-languages=c ++ $ PATH=$tools/bin:$PATH make ++ $ PATH=$tools/bin:$PATH make install ++ ++ ++GLIBC, Complete ++ ++With the second compiler built and installed, we're now ready for the ++full GLIBC build: ++ ++ $ mkdir -p $obj/glibc ++ $ cd $obj/glibc ++ $ BUILD_CC=gcc \ ++ > CC=$tools/bin/$target-gcc \ ++ > CXX=$tools/bin/$target-g++ \ ++ > AR=$tools/bin/$target-ar \ ++ > RANLIB=$tools/bin/$target-ranlib \ ++ > $src/glibc/libc/configure \ ++ > --prefix=/usr \ ++ > --with-headers=$sysroot/usr/include \ ++ > --with-kconfig=$obj/linux/scripts/kconfig \ ++ > --build=$build \ ++ > --host=$target \ ++ > --disable-profile --without-gd --without-cvs \ ++ > --enable-add-ons=nptl,libidn,../ports ++ ++Note the additional '--with-kconfig' option. This tells GLIBC where to ++find the host config tools used by the kernel 'make config' and 'make ++menuconfig'. These tools can be re-used by GLIBC for its own 'make ++*config' support, which will create 'option-groups.config' for you. ++But first make sure those tools have been built by running some ++dummy 'make *config' calls in the kernel directory: ++ ++ $ cd $obj/linux ++ $ PATH=$tools/bin:$PATH make config \ ++ > ARCH=$linux_arch CROSS_COMPILE=$target- \ ++ $ PATH=$tools/bin:$PATH make menuconfig \ ++ > ARCH=$linux_arch CROSS_COMPILE=$target- \ ++ ++Now we can configure and build the full GLIBC: ++ ++ $ cd $obj/glibc ++ $ PATH=$tools/bin:$PATH make defconfig ++ $ PATH=$tools/bin:$PATH make menuconfig ++ $ PATH=$tools/bin:$PATH make ++ $ PATH=$tools/bin:$PATH make install install_root=$sysroot ++ ++At this point, we have a complete GLIBC installation in '$sysroot', ++with header files, library files, and most of the C runtime startup ++files in place. ++ ++ ++The Third GCC ++ ++Finally, we recompile GCC against this full installation, enabling ++whatever languages and libraries we would like to use: ++ ++ $ mkdir -p $obj/gcc3 ++ $ cd $obj/gcc3 ++ $ $src/$gccv/configure \ ++ > --target=$target \ ++ > --prefix=$tools \ ++ > --with-sysroot=$sysroot \ ++ > --enable-__cxa_atexit \ ++ > --disable-libssp --disable-libgomp --disable-libmudflap \ ++ > --enable-languages=c,c++ ++ $ PATH=$tools/bin:$PATH make ++ $ PATH=$tools/bin:$PATH make install ++ ++The '--enable-__cxa_atexit' option tells GCC what sort of C++ ++destructor support to expect from the C library; it's required with ++GLIBC. ++ ++And since GCC's installation process isn't designed to help construct ++sysroot trees, we must manually copy certain libraries into place in ++the sysroot. ++ ++ $ cp -d $tools/$target/lib/libgcc_s.so* $sysroot/lib ++ $ cp -d $tools/$target/lib/libstdc++.so* $sysroot/usr/lib ++ ++ ++Trying Things Out ++ ++At this point, '$tools' contains a cross toolchain ready to use ++the GLIBC installation in '$sysroot': ++ ++ $ cat > hello.c < #include ++ > int ++ > main (int argc, char **argv) ++ > { ++ > puts ("Hello, world!"); ++ > return 0; ++ > } ++ > EOF ++ $ $tools/bin/$target-gcc -Wall hello.c -o hello ++ $ cat > c++-hello.cc < #include ++ > int ++ > main (int argc, char **argv) ++ > { ++ > std::cout << "Hello, C++ world!" << std::endl; ++ > return 0; ++ > } ++ > EOF ++ $ $tools/bin/$target-g++ -Wall c++-hello.cc -o c++-hello ++ ++ ++We can use 'readelf' to verify that these are indeed executables for ++our target, using our dynamic linker: ++ ++ $ $tools/bin/$target-readelf -hl hello ++ ELF Header: ++ ... ++ Type: EXEC (Executable file) ++ Machine: ARM ++ ++ ... ++ Program Headers: ++ Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align ++ PHDR 0x000034 0x10000034 0x10000034 0x00100 0x00100 R E 0x4 ++ INTERP 0x000134 0x00008134 0x00008134 0x00013 0x00013 R 0x1 ++ [Requesting program interpreter: /lib/ld-linux.so.3] ++ LOAD 0x000000 0x00008000 0x00008000 0x0042c 0x0042c R E 0x8000 ++ ... ++ ++Looking at the dynamic section of the installed 'libgcc_s.so', we see ++that the 'NEEDED' entry for the C library does include the '.6' ++suffix, indicating that was linked against our fully build GLIBC, and ++not our dummy 'libc.so': ++ ++ $ $tools/bin/$target-readelf -d $sysroot/lib/libgcc_s.so.1 ++ Dynamic section at offset 0x1083c contains 24 entries: ++ Tag Type Name/Value ++ 0x00000001 (NEEDED) Shared library: [libc.so.6] ++ 0x0000000e (SONAME) Library soname: [libgcc_s.so.1] ++ ... ++ ++ ++And on the target machine, we can run our programs: ++ ++ $ $sysroot/lib/ld.so.1 --library-path $sysroot/lib:$sysroot/usr/lib \ ++ > ./hello ++ Hello, world! ++ $ $sysroot/lib/ld.so.1 --library-path $sysroot/lib:$sysroot/usr/lib \ ++ > ./c++-hello ++ Hello, C++ world! +diff --git a/GLIBC.cross-testing b/GLIBC.cross-testing +new file mode 100644 +index 0000000000..b67b468466 +--- /dev/null ++++ b/GLIBC.cross-testing +@@ -0,0 +1,205 @@ ++ -*- mode: text -*- ++ ++ Cross-Testing With GLIBC ++ Jim Blandy ++ ++ ++Introduction ++ ++Developers writing software for embedded systems often use a desktop ++or other similarly capable computer for development, but need to run ++tests on the embedded system, or perhaps on a simulator. When ++configured for cross-compilation, the stock GNU C library simply ++disables running tests altogether: the command 'make tests' builds ++test programs, but does not run them. GLIBC, however, provides ++facilities for compiling tests and generating data files on the build ++system, but running the test programs themselves on a remote system or ++simulator. ++ ++ ++Test environment requirements ++ ++The test environment must meet certain conditions for GLIBC's ++cross-testing facilities to work: ++ ++- Shared filesystems. The 'build' system, on which you configure and ++ compile GLIBC, and the 'host' system, on which you intend to run ++ GLIBC, must share a filesystem containing the GLIBC build and ++ source trees. Files must appear at the same paths on both systems. ++ ++- Remote-shell like invocation. There must be a way to run a program ++ on the host system from the build system, passing it properly quoted ++ command-line arguments, setting environment variables, and ++ inheriting the caller's standard input and output. ++ ++ ++Usage ++ ++To use GLIBC's cross-testing support, provide values for the ++following Make variables when you invoke 'make': ++ ++- cross-test-wrapper ++ ++ This should be the name of the cross-testing wrapper command, along ++ with any arguments. ++ ++- cross-localedef ++ ++ This should be the name of a cross-capable localedef program, like ++ that included in the GLIBC 'localedef' module, along with any ++ arguments needed. ++ ++These are each explained in detail below. ++ ++ ++The Cross-Testing Wrapper ++ ++To run test programs reliably, the stock GNU C library takes care to ++ensure that test programs use the newly compiled dynamic linker and ++shared libraries, and never the host system's installed libraries. To ++accomplish this, it runs the tests by explicitly invoking the dynamic ++linker from the build tree, passing it a list of build tree ++directories to search for shared libraries, followed by the name of ++the executable to run and its arguments. ++ ++For example, where one might normally run a test program like this: ++ ++ $ ./tst-foo arg1 arg2 ++ ++the GNU C library might run that program like this: ++ ++ $ $objdir/elf/ld-linux.so.3 --library-path $objdir \ ++ ./tst-foo arg1 arg2 ++ ++(where $objdir is the path to the top of the build tree, and the ++trailing backslash indicates a continuation of the command). In other ++words, each test program invocation is 'wrapped up' inside an explicit ++invocation of the dynamic linker, which must itself execute the test ++program, having loaded shared libraries from the appropriate ++directories. ++ ++To support cross-testing, GLIBC allows the developer to optionally ++set the 'cross-test-wrapper' Make variable to another wrapper command, ++to which it passes the entire dynamic linker invocation shown above as ++arguments. For example, if the developer supplies a wrapper of ++'my-wrapper hostname', then GLIBC would run the test above as ++follows: ++ ++ $ my-wrapper hostname \ ++ $objdir/elf/ld-linux.so.3 --library-path $objdir \ ++ ./tst-foo arg1 arg2 ++ ++The 'my-wrapper' command is responsible for executing the command ++given on the host system. ++ ++Since tests are run in varying directories, the wrapper should either ++be in your command search path, or 'cross-test-wrapper' should give an ++absolute path for the wrapper. ++ ++The wrapper must meet several requirements: ++ ++- It must preserve the current directory. As explained above, the ++ build directory tree must be visible on both the build and host ++ systems, at the same path. The test wrapper must ensure that the ++ current directory it inherits is also inherited by the dynamic ++ linker (and thus the test program itself). ++ ++- It must preserve environment variables' values. Many GLIBC tests ++ set environment variables for test runs; in native testing, it ++ invokes programs like this: ++ ++ $ GCONV_PATH=$objdir/iconvdata \ ++ $objdir/elf/ld-linux.so.3 --library-path $objdir \ ++ ./tst-foo arg1 arg2 ++ ++ With the cross-testing wrapper, that invocation becomes: ++ ++ $ GCONV_PATH=$objdir/iconvdata \ ++ my-wrapper hostname \ ++ $objdir/elf/ld-linux.so.3 --library-path $objdir \ ++ ./tst-foo arg1 arg2 ++ ++ Here, 'my-wrapper' must ensure that the value it sees for ++ 'GCONV_PATH' will be seen by the dynamic linker, and thus 'tst-foo' ++ itself. (The wrapper supplied with GLIBC simply preserves the ++ values of *all* enviroment variables, with a fixed set of ++ exceptions.) ++ ++ If your wrapper is a shell script, take care to correctly propagate ++ environment variables whose values contain spaces and shell ++ metacharacters. ++ ++- It must pass the command's arguments, unmodified. The arguments ++ seen by the test program should be exactly those seen by the wrapper ++ (after whatever arguments are given to the wrapper itself). The ++ GLIBC test framework performs all needed shell word splitting and ++ expansion (wildcard expansion, parameter substitution, and so on) ++ before invoking the wrapper; further expansion may break the tests. ++ ++ ++The 'cross-test-ssh.sh' script ++ ++If you want to use 'ssh' (or something sufficiently similar) to run ++test programs on your host system, GLIBC includes a shell script, ++'scripts/cross-test-ssh.sh', which you can use as your wrapper ++command. This script takes care of setting the test command's current ++directory, propagating environment variable values, and carrying ++command-line arguments, all across an 'ssh' connection. You may even ++supply an alternative to 'ssh' on the command line, if needed. ++ ++For more details, pass 'cross-test-ssh.sh' the '--help' option. ++ ++ ++The Cross-Compiling Locale Definition Command ++ ++Some GLIBC tests rely on locales generated especially for the test ++process. In a native configuration, these tests simply run the ++'localedef' command built by the normal GLIBC build process, ++'locale/localedef', to process and install their locales. However, in ++a cross-compiling configuration, this 'localedef' is built for the ++host system, not the build system, and since it requires quite a bit ++of memory to run (we have seen it fail on systems with 64MiB of ++memory), it may not be practical to run it on the host system. ++ ++If set, GLIBC uses the 'cross-localedef' Make variable as the command ++to run on the build system to process and install locales. The ++localedef program built from the GLIBC 'localedef' module is ++suitable. ++ ++The value of 'cross-localedef' may also include command-line arguments ++to be passed to the program; if you are using GLIBC's 'localedef', ++you may include endianness and 'uint32_t' alignment arguments here. ++ ++ ++Example ++ ++In developing GLIBC's cross-testing facility, we invoked 'make' with ++the following script: ++ ++ #!/bin/sh ++ ++ srcdir=... ++ test_hostname=... ++ localedefdir=... ++ cross_gxx=...-g++ ++ ++ wrapper="$srcdir/scripts/cross-test-ssh.sh $test_hostname" ++ localedef="$localedefdir/localedef --little-endian --uint32-align=4" ++ ++ make cross-test-wrapper="$wrapper" \ ++ cross-localedef="$localedef" \ ++ CXX="$cross_gxx" \ ++ "$@" ++ ++ ++Other Cross-Testing Concerns ++ ++Here are notes on some other issues which you may encounter in running ++the GLIBC tests in a cross-compiling environment: ++ ++- Some tests require a C++ cross-compiler; you should set the 'CXX' ++ Make variable to the name of an appropriate cross-compiler. ++ ++- Some tests require access to libstdc++.so.6 and libgcc_s.so.1; we ++ simply place copies of these libraries in the top GLIBC build ++ directory. +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0018-timezone-re-written-tzselect-as-posix-sh.patch b/poky/meta/recipes-core/glibc/glibc/0018-timezone-re-written-tzselect-as-posix-sh.patch deleted file mode 100644 index 20b0ee98e..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0018-timezone-re-written-tzselect-as-posix-sh.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 2023d88c355b8af6458c8e39ce38b75c1ca4ea2a Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:33:03 +0000 -Subject: [PATCH] timezone: re-written tzselect as posix sh - -To avoid the bash dependency. - -Upstream-Status: Pending - -Signed-off-by: Hongxu Jia -Signed-off-by: Khem Raj ---- - timezone/Makefile | 2 +- - timezone/tzselect.ksh | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/timezone/Makefile b/timezone/Makefile -index 75f38df527..74e1ab7ff7 100644 ---- a/timezone/Makefile -+++ b/timezone/Makefile -@@ -122,7 +122,7 @@ $(testdata)/XT%: testdata/XT% - cp $< $@ - - $(objpfx)tzselect: tzselect.ksh $(common-objpfx)config.make -- sed -e 's|/bin/bash|$(BASH)|' \ -+ sed -e 's|/bin/bash|/bin/sh|' \ - -e 's|TZDIR=[^}]*|TZDIR=$(zonedir)|' \ - -e '/TZVERSION=/s|see_Makefile|"$(version)"|' \ - -e '/PKGVERSION=/s|=.*|="$(PKGVERSION)"|' \ -diff --git a/timezone/tzselect.ksh b/timezone/tzselect.ksh -index 18fce27e24..70745f9d36 100755 ---- a/timezone/tzselect.ksh -+++ b/timezone/tzselect.ksh -@@ -34,7 +34,7 @@ REPORT_BUGS_TO=tz@iana.org - - # Specify default values for environment variables if they are unset. - : ${AWK=awk} --: ${TZDIR=`pwd`} -+: ${TZDIR=$(pwd)} - - # Output one argument as-is to standard output. - # Safer than 'echo', which can mishandle '\' or leading '-'. diff --git a/poky/meta/recipes-core/glibc/glibc/0019-Remove-bash-dependency-for-nscd-init-script.patch b/poky/meta/recipes-core/glibc/glibc/0019-Remove-bash-dependency-for-nscd-init-script.patch deleted file mode 100644 index 1c15a5130..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0019-Remove-bash-dependency-for-nscd-init-script.patch +++ /dev/null @@ -1,72 +0,0 @@ -From 06da20d9f89907e5f2777537244e6589ca3c9703 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Thu, 31 Dec 2015 14:33:02 -0800 -Subject: [PATCH] Remove bash dependency for nscd init script - -The nscd init script uses #! /bin/bash but only really uses one bashism -(translated strings), so remove them and switch the shell to #!/bin/sh. - -Upstream-Status: Pending - -Signed-off-by: Ross Burton -Signed-off-by: Khem Raj ---- - nscd/nscd.init | 14 +++++++------- - 1 file changed, 7 insertions(+), 7 deletions(-) - -diff --git a/nscd/nscd.init b/nscd/nscd.init -index a882da7d8b..b02986ec15 100644 ---- a/nscd/nscd.init -+++ b/nscd/nscd.init -@@ -1,4 +1,4 @@ --#!/bin/bash -+#!/bin/sh - # - # nscd: Starts the Name Switch Cache Daemon - # -@@ -49,7 +49,7 @@ prog=nscd - start () { - [ -d /var/run/nscd ] || mkdir /var/run/nscd - [ -d /var/db/nscd ] || mkdir /var/db/nscd -- echo -n $"Starting $prog: " -+ echo -n "Starting $prog: " - daemon /usr/sbin/nscd - RETVAL=$? - echo -@@ -58,7 +58,7 @@ start () { - } - - stop () { -- echo -n $"Stopping $prog: " -+ echo -n "Stopping $prog: " - /usr/sbin/nscd -K - RETVAL=$? - if [ $RETVAL -eq 0 ]; then -@@ -67,9 +67,9 @@ stop () { - # a non-privileged user - rm -f /var/run/nscd/nscd.pid - rm -f /var/run/nscd/socket -- success $"$prog shutdown" -+ success "$prog shutdown" - else -- failure $"$prog shutdown" -+ failure "$prog shutdown" - fi - echo - return $RETVAL -@@ -103,13 +103,13 @@ case "$1" in - RETVAL=$? - ;; - force-reload | reload) -- echo -n $"Reloading $prog: " -+ echo -n "Reloading $prog: " - killproc /usr/sbin/nscd -HUP - RETVAL=$? - echo - ;; - *) -- echo $"Usage: $0 {start|stop|status|restart|reload|condrestart}" -+ echo "Usage: $0 {start|stop|status|restart|reload|condrestart}" - RETVAL=1 - ;; - esac diff --git a/poky/meta/recipes-core/glibc/glibc/0019-eglibc-Help-bootstrap-cross-toolchain.patch b/poky/meta/recipes-core/glibc/glibc/0019-eglibc-Help-bootstrap-cross-toolchain.patch new file mode 100644 index 000000000..f633079f3 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0019-eglibc-Help-bootstrap-cross-toolchain.patch @@ -0,0 +1,100 @@ +From 5591b7653411da26fa2939352e50ea4121b327e6 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:49:28 +0000 +Subject: [PATCH 19/29] eglibc: Help bootstrap cross toolchain + +Taken from EGLIBC, r1484 + r1525 + + 2007-02-20 Jim Blandy + + * Makefile (install-headers): Preserve old behavior: depend on + $(inst_includedir)/gnu/stubs.h only if install-bootstrap-headers + is set; otherwise, place gnu/stubs.h on the 'install-others' list. + + 2007-02-16 Jim Blandy + + * Makefile: Amend make install-headers to install everything + necessary for building a cross-compiler. Install gnu/stubs.h as + part of 'install-headers', not 'install-others'. + If install-bootstrap-headers is 'yes', install a dummy copy of + gnu/stubs.h, instead of computing the real thing. + * include/stubs-bootstrap.h: New file. + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + Makefile | 22 +++++++++++++++++++++- + include/stubs-bootstrap.h | 12 ++++++++++++ + 2 files changed, 33 insertions(+), 1 deletion(-) + create mode 100644 include/stubs-bootstrap.h + +diff --git a/Makefile b/Makefile +index 6dcfe40c25..bc37c32e5a 100644 +--- a/Makefile ++++ b/Makefile +@@ -79,9 +79,18 @@ subdir-dirs = include + vpath %.h $(subdir-dirs) + + # What to install. +-install-others = $(inst_includedir)/gnu/stubs.h + install-bin-script = + ++# If we're bootstrapping, install a dummy gnu/stubs.h along with the ++# other headers, so 'make install-headers' produces a useable include ++# tree. Otherwise, install gnu/stubs.h later, after the rest of the ++# build is done. ++ifeq ($(install-bootstrap-headers),yes) ++install-headers: $(inst_includedir)/gnu/stubs.h ++else ++install-others = $(inst_includedir)/gnu/stubs.h ++endif ++ + ifeq (yes,$(build-shared)) + headers += gnu/lib-names.h + endif +@@ -407,6 +416,16 @@ others: $(common-objpfx)testrun.sh $(common-objpfx)debugglibc.sh + + subdir-stubs := $(foreach dir,$(subdirs),$(common-objpfx)$(dir)/stubs) + ++# gnu/stubs.h depends (via the subdir 'stubs' targets) on all the .o ++# files in EGLIBC. For bootstrapping a GCC/EGLIBC pair, an empty ++# gnu/stubs.h is good enough. ++ifeq ($(install-bootstrap-headers),yes) ++$(inst_includedir)/gnu/stubs.h: include/stubs-bootstrap.h $(+force) ++ $(make-target-directory) ++ $(INSTALL_DATA) $< $@ ++ ++installed-stubs = ++else + ifndef abi-variants + installed-stubs = $(inst_includedir)/gnu/stubs.h + else +@@ -433,6 +452,7 @@ $(inst_includedir)/gnu/stubs.h: $(+force) + + install-others-nosubdir: $(installed-stubs) + endif ++endif + + + # Since stubs.h is never needed when building the library, we simplify the +diff --git a/include/stubs-bootstrap.h b/include/stubs-bootstrap.h +new file mode 100644 +index 0000000000..1d2b669aff +--- /dev/null ++++ b/include/stubs-bootstrap.h +@@ -0,0 +1,12 @@ ++/* Placeholder stubs.h file for bootstrapping. ++ ++ When bootstrapping a GCC/EGLIBC pair, GCC requires that the EGLIBC ++ headers be installed, but we can't fully build EGLIBC without that ++ GCC. So we run the command: ++ ++ make install-headers install-bootstrap-headers=yes ++ ++ to install the headers GCC needs, but avoid building certain ++ difficult headers. The header depends, via the ++ EGLIBC subdir 'stubs' make targets, on every .o file in EGLIBC, but ++ an empty stubs.h like this will do fine for GCC. */ +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Cross-building-and-testing-instructions.patch b/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Cross-building-and-testing-instructions.patch deleted file mode 100644 index eda556537..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Cross-building-and-testing-instructions.patch +++ /dev/null @@ -1,616 +0,0 @@ -From 5641452a24f76c5dafa3749a542fcac93f77390f Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:42:58 +0000 -Subject: [PATCH] eglibc: Cross building and testing instructions - -Ported from eglibc -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - GLIBC.cross-building | 383 +++++++++++++++++++++++++++++++++++++++++++ - GLIBC.cross-testing | 205 +++++++++++++++++++++++ - 2 files changed, 588 insertions(+) - create mode 100644 GLIBC.cross-building - create mode 100644 GLIBC.cross-testing - -diff --git a/GLIBC.cross-building b/GLIBC.cross-building -new file mode 100644 -index 0000000000..e6e0da1aaf ---- /dev/null -+++ b/GLIBC.cross-building -@@ -0,0 +1,383 @@ -+ -*- mode: text -*- -+ -+ Cross-Compiling GLIBC -+ Jim Blandy -+ -+ -+Introduction -+ -+Most GNU tools have a simple build procedure: you run their -+'configure' script, and then you run 'make'. Unfortunately, the -+process of cross-compiling the GNU C library is quite a bit more -+involved: -+ -+1) Build a cross-compiler, with certain facilities disabled. -+ -+2) Configure the C library using the compiler you built in step 1). -+ Build a few of the C run-time object files, but not the rest of the -+ library. Install the library's header files and the run-time -+ object files, and create a dummy libc.so. -+ -+3) Build a second cross-compiler, using the header files and object -+ files you installed in step 2. -+ -+4) Configure, build, and install a fresh C library, using the compiler -+ built in step 3. -+ -+5) Build a third cross-compiler, based on the C library built in step 4. -+ -+The reason for this complexity is that, although GCC and the GNU C -+library are distributed separately, they are not actually independent -+of each other: GCC requires the C library's headers and some object -+files to compile its own libraries, while the C library depends on -+GCC's libraries. GLIBC includes features and bug fixes to the stock -+GNU C library that simplify this process, but the fundamental -+interdependency stands. -+ -+In this document, we explain how to cross-compile an GLIBC/GCC pair -+from source. Our intended audience is developers who are already -+familiar with the GNU toolchain and comfortable working with -+cross-development tools. While we do present a worked example to -+accompany the explanation, for clarity's sake we do not cover many of -+the options available to cross-toolchain users. -+ -+ -+Preparation -+ -+GLIBC requires recent versions of the GNU binutils, GCC, and the -+Linux kernel. The web page -+documents the current requirements, and lists patches needed for -+certain target architectures. As of this writing, these build -+instructions have been tested with binutils 2.22.51, GCC 4.6.2, -+and Linux 3.1. -+ -+First, let's set some variables, to simplify later commands. We'll -+build GLIBC and GCC for an ARM target, known to the Linux kernel -+as 'arm', and we'll do the build on an Intel x86_64 Linux box: -+ -+ $ build=x86_64-pc-linux-gnu -+ $ host=$build -+ $ target=arm-none-linux-gnueabi -+ $ linux_arch=arm -+ -+We're using the aforementioned versions of Binutils, GCC, and Linux: -+ -+ $ binutilsv=binutils-2.22.51 -+ $ gccv=gcc-4.6.2 -+ $ linuxv=linux-3.1 -+ -+We're carrying out the entire process under '~/cross-build', which -+contains unpacked source trees for binutils, gcc, and linux kernel, -+along with GLIBC svn trunk (which can be checked-out with -+'svn co http://www.eglibc.org/svn/trunk eglibc'): -+ -+ $ top=$HOME/cross-build/$target -+ $ src=$HOME/cross-build/src -+ $ ls $src -+ binutils-2.22.51 glibc gcc-4.6.2 linux-3.1 -+ -+We're going to place our build directories in a subdirectory 'obj', -+we'll install the cross-development toolchain in 'tools', and we'll -+place our sysroot (containing files to be installed on the target -+system) in 'sysroot': -+ -+ $ obj=$top/obj -+ $ tools=$top/tools -+ $ sysroot=$top/sysroot -+ -+ -+Binutils -+ -+Configuring and building binutils for the target is straightforward: -+ -+ $ mkdir -p $obj/binutils -+ $ cd $obj/binutils -+ $ $src/$binutilsv/configure \ -+ > --target=$target \ -+ > --prefix=$tools \ -+ > --with-sysroot=$sysroot -+ $ make -+ $ make install -+ -+ -+The First GCC -+ -+For our work, we need a cross-compiler targeting an ARM Linux -+system. However, that configuration includes the shared library -+'libgcc_s.so', which is compiled against the GLIBC headers (which we -+haven't installed yet) and linked against 'libc.so' (which we haven't -+built yet). -+ -+Fortunately, there are configuration options for GCC which tell it not -+to build 'libgcc_s.so'. The '--without-headers' option is supposed to -+take care of this, but its implementation is incomplete, so you must -+also configure with the '--with-newlib' option. While '--with-newlib' -+appears to mean "Use the Newlib C library", its effect is to tell the -+GCC build machinery, "Don't assume there is a C library available." -+ -+We also need to disable some of the libraries that would normally be -+built along with GCC, and specify that only the compiler for the C -+language is needed. -+ -+So, we create a build directory, configure, make, and install. -+ -+ $ mkdir -p $obj/gcc1 -+ $ cd $obj/gcc1 -+ $ $src/$gccv/configure \ -+ > --target=$target \ -+ > --prefix=$tools \ -+ > --without-headers --with-newlib \ -+ > --disable-shared --disable-threads --disable-libssp \ -+ > --disable-libgomp --disable-libmudflap --disable-libquadmath \ -+ > --disable-decimal-float --disable-libffi \ -+ > --enable-languages=c -+ $ PATH=$tools/bin:$PATH make -+ $ PATH=$tools/bin:$PATH make install -+ -+ -+Linux Kernel Headers -+ -+To configure GLIBC, we also need Linux kernel headers in place. -+Fortunately, the Linux makefiles have a target that installs them for -+us. Since the process does modify the source tree a bit, we make a -+copy first: -+ -+ $ cp -r $src/$linuxv $obj/linux -+ $ cd $obj/linux -+ -+Now we're ready to install the headers into the sysroot: -+ -+ $ PATH=$tools/bin:$PATH \ -+ > make headers_install \ -+ > ARCH=$linux_arch CROSS_COMPILE=$target- \ -+ > INSTALL_HDR_PATH=$sysroot/usr -+ -+ -+GLIBC Headers and Preliminary Objects -+ -+Using the cross-compiler we've just built, we can now configure GLIBC -+well enough to install the headers and build the object files that the -+full cross-compiler will need: -+ -+ $ mkdir -p $obj/glibc-headers -+ $ cd $obj/glibc-headers -+ $ BUILD_CC=gcc \ -+ > CC=$tools/bin/$target-gcc \ -+ > CXX=$tools/bin/$target-g++ \ -+ > AR=$tools/bin/$target-ar \ -+ > RANLIB=$tools/bin/$target-ranlib \ -+ > $src/glibc/libc/configure \ -+ > --prefix=/usr \ -+ > --with-headers=$sysroot/usr/include \ -+ > --build=$build \ -+ > --host=$target \ -+ > --disable-profile --without-gd --without-cvs \ -+ > --enable-add-ons=nptl,libidn,../ports -+ -+The option '--prefix=/usr' may look strange, but you should never -+configure GLIBC with a prefix other than '/usr': in various places, -+GLIBC's build system checks whether the prefix is '/usr', and does -+special handling only if that is the case. Unless you use this -+prefix, you will get a sysroot that does not use the standard Linux -+directory layouts and cannot be used as a basis for the root -+filesystem on your target system compatibly with normal GLIBC -+installations. -+ -+The '--with-headers' option tells GLIBC where the Linux headers have -+been installed. -+ -+The '--enable-add-ons=nptl,libidn,../ports' option tells GLIBC to look -+for the listed glibc add-ons. Most notably the ports add-on (located -+just above the libc sources in the GLIBC svn tree) is required to -+support ARM targets. -+ -+We can now use the 'install-headers' makefile target to install the -+headers: -+ -+ $ make install-headers install_root=$sysroot \ -+ > install-bootstrap-headers=yes -+ -+The 'install_root' variable indicates where the files should actually -+be installed; its value is treated as the parent of the '--prefix' -+directory we passed to the configure script, so the headers will go in -+'$sysroot/usr/include'. The 'install-bootstrap-headers' variable -+requests special handling for certain tricky header files. -+ -+Next, there are a few object files needed to link shared libraries, -+which we build and install by hand: -+ -+ $ mkdir -p $sysroot/usr/lib -+ $ make csu/subdir_lib -+ $ cp csu/crt1.o csu/crti.o csu/crtn.o $sysroot/usr/lib -+ -+Finally, 'libgcc_s.so' requires a 'libc.so' to link against. However, -+since we will never actually execute its code, it doesn't matter what -+it contains. So, treating '/dev/null' as a C source file, we produce -+a dummy 'libc.so' in one step: -+ -+ $ $tools/bin/$target-gcc -nostdlib -nostartfiles -shared -x c /dev/null \ -+ > -o $sysroot/usr/lib/libc.so -+ -+ -+The Second GCC -+ -+With the GLIBC headers and selected object files installed, we can -+now build a GCC that is capable of compiling GLIBC. We configure, -+build, and install the second GCC, again building only the C compiler, -+and avoiding libraries we won't use: -+ -+ $ mkdir -p $obj/gcc2 -+ $ cd $obj/gcc2 -+ $ $src/$gccv/configure \ -+ > --target=$target \ -+ > --prefix=$tools \ -+ > --with-sysroot=$sysroot \ -+ > --disable-libssp --disable-libgomp --disable-libmudflap \ -+ > --disable-libffi --disable-libquadmath \ -+ > --enable-languages=c -+ $ PATH=$tools/bin:$PATH make -+ $ PATH=$tools/bin:$PATH make install -+ -+ -+GLIBC, Complete -+ -+With the second compiler built and installed, we're now ready for the -+full GLIBC build: -+ -+ $ mkdir -p $obj/glibc -+ $ cd $obj/glibc -+ $ BUILD_CC=gcc \ -+ > CC=$tools/bin/$target-gcc \ -+ > CXX=$tools/bin/$target-g++ \ -+ > AR=$tools/bin/$target-ar \ -+ > RANLIB=$tools/bin/$target-ranlib \ -+ > $src/glibc/libc/configure \ -+ > --prefix=/usr \ -+ > --with-headers=$sysroot/usr/include \ -+ > --with-kconfig=$obj/linux/scripts/kconfig \ -+ > --build=$build \ -+ > --host=$target \ -+ > --disable-profile --without-gd --without-cvs \ -+ > --enable-add-ons=nptl,libidn,../ports -+ -+Note the additional '--with-kconfig' option. This tells GLIBC where to -+find the host config tools used by the kernel 'make config' and 'make -+menuconfig'. These tools can be re-used by GLIBC for its own 'make -+*config' support, which will create 'option-groups.config' for you. -+But first make sure those tools have been built by running some -+dummy 'make *config' calls in the kernel directory: -+ -+ $ cd $obj/linux -+ $ PATH=$tools/bin:$PATH make config \ -+ > ARCH=$linux_arch CROSS_COMPILE=$target- \ -+ $ PATH=$tools/bin:$PATH make menuconfig \ -+ > ARCH=$linux_arch CROSS_COMPILE=$target- \ -+ -+Now we can configure and build the full GLIBC: -+ -+ $ cd $obj/glibc -+ $ PATH=$tools/bin:$PATH make defconfig -+ $ PATH=$tools/bin:$PATH make menuconfig -+ $ PATH=$tools/bin:$PATH make -+ $ PATH=$tools/bin:$PATH make install install_root=$sysroot -+ -+At this point, we have a complete GLIBC installation in '$sysroot', -+with header files, library files, and most of the C runtime startup -+files in place. -+ -+ -+The Third GCC -+ -+Finally, we recompile GCC against this full installation, enabling -+whatever languages and libraries we would like to use: -+ -+ $ mkdir -p $obj/gcc3 -+ $ cd $obj/gcc3 -+ $ $src/$gccv/configure \ -+ > --target=$target \ -+ > --prefix=$tools \ -+ > --with-sysroot=$sysroot \ -+ > --enable-__cxa_atexit \ -+ > --disable-libssp --disable-libgomp --disable-libmudflap \ -+ > --enable-languages=c,c++ -+ $ PATH=$tools/bin:$PATH make -+ $ PATH=$tools/bin:$PATH make install -+ -+The '--enable-__cxa_atexit' option tells GCC what sort of C++ -+destructor support to expect from the C library; it's required with -+GLIBC. -+ -+And since GCC's installation process isn't designed to help construct -+sysroot trees, we must manually copy certain libraries into place in -+the sysroot. -+ -+ $ cp -d $tools/$target/lib/libgcc_s.so* $sysroot/lib -+ $ cp -d $tools/$target/lib/libstdc++.so* $sysroot/usr/lib -+ -+ -+Trying Things Out -+ -+At this point, '$tools' contains a cross toolchain ready to use -+the GLIBC installation in '$sysroot': -+ -+ $ cat > hello.c < #include -+ > int -+ > main (int argc, char **argv) -+ > { -+ > puts ("Hello, world!"); -+ > return 0; -+ > } -+ > EOF -+ $ $tools/bin/$target-gcc -Wall hello.c -o hello -+ $ cat > c++-hello.cc < #include -+ > int -+ > main (int argc, char **argv) -+ > { -+ > std::cout << "Hello, C++ world!" << std::endl; -+ > return 0; -+ > } -+ > EOF -+ $ $tools/bin/$target-g++ -Wall c++-hello.cc -o c++-hello -+ -+ -+We can use 'readelf' to verify that these are indeed executables for -+our target, using our dynamic linker: -+ -+ $ $tools/bin/$target-readelf -hl hello -+ ELF Header: -+ ... -+ Type: EXEC (Executable file) -+ Machine: ARM -+ -+ ... -+ Program Headers: -+ Type Offset VirtAddr PhysAddr FileSiz MemSiz Flg Align -+ PHDR 0x000034 0x10000034 0x10000034 0x00100 0x00100 R E 0x4 -+ INTERP 0x000134 0x00008134 0x00008134 0x00013 0x00013 R 0x1 -+ [Requesting program interpreter: /lib/ld-linux.so.3] -+ LOAD 0x000000 0x00008000 0x00008000 0x0042c 0x0042c R E 0x8000 -+ ... -+ -+Looking at the dynamic section of the installed 'libgcc_s.so', we see -+that the 'NEEDED' entry for the C library does include the '.6' -+suffix, indicating that was linked against our fully build GLIBC, and -+not our dummy 'libc.so': -+ -+ $ $tools/bin/$target-readelf -d $sysroot/lib/libgcc_s.so.1 -+ Dynamic section at offset 0x1083c contains 24 entries: -+ Tag Type Name/Value -+ 0x00000001 (NEEDED) Shared library: [libc.so.6] -+ 0x0000000e (SONAME) Library soname: [libgcc_s.so.1] -+ ... -+ -+ -+And on the target machine, we can run our programs: -+ -+ $ $sysroot/lib/ld.so.1 --library-path $sysroot/lib:$sysroot/usr/lib \ -+ > ./hello -+ Hello, world! -+ $ $sysroot/lib/ld.so.1 --library-path $sysroot/lib:$sysroot/usr/lib \ -+ > ./c++-hello -+ Hello, C++ world! -diff --git a/GLIBC.cross-testing b/GLIBC.cross-testing -new file mode 100644 -index 0000000000..b67b468466 ---- /dev/null -+++ b/GLIBC.cross-testing -@@ -0,0 +1,205 @@ -+ -*- mode: text -*- -+ -+ Cross-Testing With GLIBC -+ Jim Blandy -+ -+ -+Introduction -+ -+Developers writing software for embedded systems often use a desktop -+or other similarly capable computer for development, but need to run -+tests on the embedded system, or perhaps on a simulator. When -+configured for cross-compilation, the stock GNU C library simply -+disables running tests altogether: the command 'make tests' builds -+test programs, but does not run them. GLIBC, however, provides -+facilities for compiling tests and generating data files on the build -+system, but running the test programs themselves on a remote system or -+simulator. -+ -+ -+Test environment requirements -+ -+The test environment must meet certain conditions for GLIBC's -+cross-testing facilities to work: -+ -+- Shared filesystems. The 'build' system, on which you configure and -+ compile GLIBC, and the 'host' system, on which you intend to run -+ GLIBC, must share a filesystem containing the GLIBC build and -+ source trees. Files must appear at the same paths on both systems. -+ -+- Remote-shell like invocation. There must be a way to run a program -+ on the host system from the build system, passing it properly quoted -+ command-line arguments, setting environment variables, and -+ inheriting the caller's standard input and output. -+ -+ -+Usage -+ -+To use GLIBC's cross-testing support, provide values for the -+following Make variables when you invoke 'make': -+ -+- cross-test-wrapper -+ -+ This should be the name of the cross-testing wrapper command, along -+ with any arguments. -+ -+- cross-localedef -+ -+ This should be the name of a cross-capable localedef program, like -+ that included in the GLIBC 'localedef' module, along with any -+ arguments needed. -+ -+These are each explained in detail below. -+ -+ -+The Cross-Testing Wrapper -+ -+To run test programs reliably, the stock GNU C library takes care to -+ensure that test programs use the newly compiled dynamic linker and -+shared libraries, and never the host system's installed libraries. To -+accomplish this, it runs the tests by explicitly invoking the dynamic -+linker from the build tree, passing it a list of build tree -+directories to search for shared libraries, followed by the name of -+the executable to run and its arguments. -+ -+For example, where one might normally run a test program like this: -+ -+ $ ./tst-foo arg1 arg2 -+ -+the GNU C library might run that program like this: -+ -+ $ $objdir/elf/ld-linux.so.3 --library-path $objdir \ -+ ./tst-foo arg1 arg2 -+ -+(where $objdir is the path to the top of the build tree, and the -+trailing backslash indicates a continuation of the command). In other -+words, each test program invocation is 'wrapped up' inside an explicit -+invocation of the dynamic linker, which must itself execute the test -+program, having loaded shared libraries from the appropriate -+directories. -+ -+To support cross-testing, GLIBC allows the developer to optionally -+set the 'cross-test-wrapper' Make variable to another wrapper command, -+to which it passes the entire dynamic linker invocation shown above as -+arguments. For example, if the developer supplies a wrapper of -+'my-wrapper hostname', then GLIBC would run the test above as -+follows: -+ -+ $ my-wrapper hostname \ -+ $objdir/elf/ld-linux.so.3 --library-path $objdir \ -+ ./tst-foo arg1 arg2 -+ -+The 'my-wrapper' command is responsible for executing the command -+given on the host system. -+ -+Since tests are run in varying directories, the wrapper should either -+be in your command search path, or 'cross-test-wrapper' should give an -+absolute path for the wrapper. -+ -+The wrapper must meet several requirements: -+ -+- It must preserve the current directory. As explained above, the -+ build directory tree must be visible on both the build and host -+ systems, at the same path. The test wrapper must ensure that the -+ current directory it inherits is also inherited by the dynamic -+ linker (and thus the test program itself). -+ -+- It must preserve environment variables' values. Many GLIBC tests -+ set environment variables for test runs; in native testing, it -+ invokes programs like this: -+ -+ $ GCONV_PATH=$objdir/iconvdata \ -+ $objdir/elf/ld-linux.so.3 --library-path $objdir \ -+ ./tst-foo arg1 arg2 -+ -+ With the cross-testing wrapper, that invocation becomes: -+ -+ $ GCONV_PATH=$objdir/iconvdata \ -+ my-wrapper hostname \ -+ $objdir/elf/ld-linux.so.3 --library-path $objdir \ -+ ./tst-foo arg1 arg2 -+ -+ Here, 'my-wrapper' must ensure that the value it sees for -+ 'GCONV_PATH' will be seen by the dynamic linker, and thus 'tst-foo' -+ itself. (The wrapper supplied with GLIBC simply preserves the -+ values of *all* enviroment variables, with a fixed set of -+ exceptions.) -+ -+ If your wrapper is a shell script, take care to correctly propagate -+ environment variables whose values contain spaces and shell -+ metacharacters. -+ -+- It must pass the command's arguments, unmodified. The arguments -+ seen by the test program should be exactly those seen by the wrapper -+ (after whatever arguments are given to the wrapper itself). The -+ GLIBC test framework performs all needed shell word splitting and -+ expansion (wildcard expansion, parameter substitution, and so on) -+ before invoking the wrapper; further expansion may break the tests. -+ -+ -+The 'cross-test-ssh.sh' script -+ -+If you want to use 'ssh' (or something sufficiently similar) to run -+test programs on your host system, GLIBC includes a shell script, -+'scripts/cross-test-ssh.sh', which you can use as your wrapper -+command. This script takes care of setting the test command's current -+directory, propagating environment variable values, and carrying -+command-line arguments, all across an 'ssh' connection. You may even -+supply an alternative to 'ssh' on the command line, if needed. -+ -+For more details, pass 'cross-test-ssh.sh' the '--help' option. -+ -+ -+The Cross-Compiling Locale Definition Command -+ -+Some GLIBC tests rely on locales generated especially for the test -+process. In a native configuration, these tests simply run the -+'localedef' command built by the normal GLIBC build process, -+'locale/localedef', to process and install their locales. However, in -+a cross-compiling configuration, this 'localedef' is built for the -+host system, not the build system, and since it requires quite a bit -+of memory to run (we have seen it fail on systems with 64MiB of -+memory), it may not be practical to run it on the host system. -+ -+If set, GLIBC uses the 'cross-localedef' Make variable as the command -+to run on the build system to process and install locales. The -+localedef program built from the GLIBC 'localedef' module is -+suitable. -+ -+The value of 'cross-localedef' may also include command-line arguments -+to be passed to the program; if you are using GLIBC's 'localedef', -+you may include endianness and 'uint32_t' alignment arguments here. -+ -+ -+Example -+ -+In developing GLIBC's cross-testing facility, we invoked 'make' with -+the following script: -+ -+ #!/bin/sh -+ -+ srcdir=... -+ test_hostname=... -+ localedefdir=... -+ cross_gxx=...-g++ -+ -+ wrapper="$srcdir/scripts/cross-test-ssh.sh $test_hostname" -+ localedef="$localedefdir/localedef --little-endian --uint32-align=4" -+ -+ make cross-test-wrapper="$wrapper" \ -+ cross-localedef="$localedef" \ -+ CXX="$cross_gxx" \ -+ "$@" -+ -+ -+Other Cross-Testing Concerns -+ -+Here are notes on some other issues which you may encounter in running -+the GLIBC tests in a cross-compiling environment: -+ -+- Some tests require a C++ cross-compiler; you should set the 'CXX' -+ Make variable to the name of an appropriate cross-compiler. -+ -+- Some tests require access to libstdc++.so.6 and libgcc_s.so.1; we -+ simply place copies of these libraries in the top GLIBC build -+ directory. diff --git a/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Resolve-__fpscr_values-on-SH4.patch b/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Resolve-__fpscr_values-on-SH4.patch new file mode 100644 index 000000000..bb21c5472 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0020-eglibc-Resolve-__fpscr_values-on-SH4.patch @@ -0,0 +1,56 @@ +From d3451c186f96c6b2434a4ac9304c01730bf22061 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:55:53 +0000 +Subject: [PATCH 20/29] eglibc: Resolve __fpscr_values on SH4 + +2010-09-29 Nobuhiro Iwamatsu + Andrew Stubbs + + Resolve SH's __fpscr_values to symbol in libc.so. + + * sysdeps/sh/sh4/fpu/fpu_control.h: Add C++ __set_fpscr prototype. + * sysdeps/unix/sysv/linux/sh/Versions (GLIBC_2.2): Add __fpscr_values. + * sysdeps/unix/sysv/linux/sh/sysdep.S (___fpscr_values): New constant. + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + sysdeps/unix/sysv/linux/sh/Versions | 1 + + sysdeps/unix/sysv/linux/sh/sysdep.S | 11 +++++++++++ + 2 files changed, 12 insertions(+) + +diff --git a/sysdeps/unix/sysv/linux/sh/Versions b/sysdeps/unix/sysv/linux/sh/Versions +index e0938c4165..ca1d7da339 100644 +--- a/sysdeps/unix/sysv/linux/sh/Versions ++++ b/sysdeps/unix/sysv/linux/sh/Versions +@@ -2,6 +2,7 @@ libc { + GLIBC_2.2 { + # functions used in other libraries + __xstat64; __fxstat64; __lxstat64; ++ __fpscr_values; + + # a* + alphasort64; +diff --git a/sysdeps/unix/sysv/linux/sh/sysdep.S b/sysdeps/unix/sysv/linux/sh/sysdep.S +index 85ff3f900e..7743b8d57a 100644 +--- a/sysdeps/unix/sysv/linux/sh/sysdep.S ++++ b/sysdeps/unix/sysv/linux/sh/sysdep.S +@@ -30,3 +30,14 @@ ENTRY (__syscall_error) + + #define __syscall_error __syscall_error_1 + #include ++ ++ .data ++ .align 3 ++ .globl ___fpscr_values ++ .type ___fpscr_values, @object ++ .size ___fpscr_values, 8 ++___fpscr_values: ++ .long 0 ++ .long 0x80000 ++weak_alias (___fpscr_values, __fpscr_values) ++ +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Forward-port-cross-locale-generation-support.patch b/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Forward-port-cross-locale-generation-support.patch new file mode 100644 index 000000000..7fe5db0c2 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Forward-port-cross-locale-generation-support.patch @@ -0,0 +1,563 @@ +From e4b8abdc2d884d721fd89d67b689546f2f780924 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 01:33:49 +0000 +Subject: [PATCH 21/29] eglibc: Forward port cross locale generation support + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + locale/Makefile | 3 +- + locale/catnames.c | 46 +++++++++++++++++++++++++++ + locale/localeinfo.h | 2 +- + locale/programs/charmap-dir.c | 6 ++++ + locale/programs/ld-collate.c | 17 +++++----- + locale/programs/ld-ctype.c | 27 ++++++++-------- + locale/programs/ld-time.c | 31 ++++++++++++------ + locale/programs/linereader.c | 2 +- + locale/programs/localedef.c | 8 +++++ + locale/programs/locfile.c | 5 ++- + locale/programs/locfile.h | 59 +++++++++++++++++++++++++++++++++-- + locale/setlocale.c | 29 ----------------- + 12 files changed, 167 insertions(+), 68 deletions(-) + create mode 100644 locale/catnames.c + +diff --git a/locale/Makefile b/locale/Makefile +index 49c0c78c7d..ebfcf4f4da 100644 +--- a/locale/Makefile ++++ b/locale/Makefile +@@ -26,7 +26,8 @@ headers = langinfo.h locale.h bits/locale.h \ + bits/types/locale_t.h bits/types/__locale_t.h + routines = setlocale findlocale loadlocale loadarchive \ + localeconv nl_langinfo nl_langinfo_l mb_cur_max \ +- newlocale duplocale freelocale uselocale ++ newlocale duplocale freelocale uselocale \ ++ catnames + tests = tst-C-locale tst-locname tst-duplocale + tests-container = tst-localedef-path-norm + categories = ctype messages monetary numeric time paper name \ +diff --git a/locale/catnames.c b/locale/catnames.c +new file mode 100644 +index 0000000000..538f3f5edb +--- /dev/null ++++ b/locale/catnames.c +@@ -0,0 +1,46 @@ ++/* Copyright (C) 2006 Free Software Foundation, Inc. ++ This file is part of the GNU C Library. ++ ++ The GNU C Library is free software; you can redistribute it and/or ++ modify it under the terms of the GNU Lesser General Public ++ License as published by the Free Software Foundation; either ++ version 2.1 of the License, or (at your option) any later version. ++ ++ The GNU C Library is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ Lesser General Public License for more details. ++ ++ You should have received a copy of the GNU Lesser General Public ++ License along with the GNU C Library; if not, write to the Free ++ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA ++ 02111-1307 USA. */ ++ ++#include "localeinfo.h" ++ ++/* Define an array of category names (also the environment variable names). */ ++const struct catnamestr_t _nl_category_names attribute_hidden = ++ { ++#define DEFINE_CATEGORY(category, category_name, items, a) \ ++ category_name, ++#include "categories.def" ++#undef DEFINE_CATEGORY ++ }; ++ ++const uint8_t _nl_category_name_idxs[__LC_LAST] attribute_hidden = ++ { ++#define DEFINE_CATEGORY(category, category_name, items, a) \ ++ [category] = offsetof (struct catnamestr_t, CATNAMEMF (__LINE__)), ++#include "categories.def" ++#undef DEFINE_CATEGORY ++ }; ++ ++/* An array of their lengths, for convenience. */ ++const uint8_t _nl_category_name_sizes[] attribute_hidden = ++ { ++#define DEFINE_CATEGORY(category, category_name, items, a) \ ++ [category] = sizeof (category_name) - 1, ++#include "categories.def" ++#undef DEFINE_CATEGORY ++ [LC_ALL] = sizeof ("LC_ALL") - 1 ++ }; +diff --git a/locale/localeinfo.h b/locale/localeinfo.h +index fdc283c69a..4eeed35f90 100644 +--- a/locale/localeinfo.h ++++ b/locale/localeinfo.h +@@ -230,7 +230,7 @@ __libc_tsd_define (extern, locale_t, LOCALE) + unused. We can manage this playing some tricks with weak references. + But with thread-local locale settings, it becomes quite ungainly unless + we can use __thread variables. So only in that case do we attempt this. */ +-#ifndef SHARED ++#if !defined SHARED && !defined IN_GLIBC_LOCALEDEF + # include + # define NL_CURRENT_INDIRECT 1 + #endif +diff --git a/locale/programs/charmap-dir.c b/locale/programs/charmap-dir.c +index 1a526a240d..0fb2daf936 100644 +--- a/locale/programs/charmap-dir.c ++++ b/locale/programs/charmap-dir.c +@@ -18,7 +18,9 @@ + #include + #include + #include ++#ifndef NO_UNCOMPRESS + #include ++#endif + #include + #include + #include +@@ -154,6 +156,7 @@ charmap_closedir (CHARMAP_DIR *cdir) + return closedir (dir); + } + ++#ifndef NO_UNCOMPRESS + /* Creates a subprocess decompressing the given pathname, and returns + a stream reading its output (the decompressed data). */ + static +@@ -202,6 +205,7 @@ fopen_uncompressed (const char *pathname, const char *compressor) + } + return NULL; + } ++#endif + + /* Opens a charmap for reading, given its name (not an alias name). */ + FILE * +@@ -224,6 +228,7 @@ charmap_open (const char *directory, const char *name) + if (stream != NULL) + return stream; + ++#ifndef NO_UNCOMPRESS + memcpy (p, ".gz", 4); + stream = fopen_uncompressed (pathname, "gzip"); + if (stream != NULL) +@@ -233,6 +238,7 @@ charmap_open (const char *directory, const char *name) + stream = fopen_uncompressed (pathname, "bzip2"); + if (stream != NULL) + return stream; ++#endif + + return NULL; + } +diff --git a/locale/programs/ld-collate.c b/locale/programs/ld-collate.c +index feb1a11258..5a8e522470 100644 +--- a/locale/programs/ld-collate.c ++++ b/locale/programs/ld-collate.c +@@ -349,7 +349,7 @@ new_element (struct locale_collate_t *collate, const char *mbs, size_t mbslen, + } + if (wcs != NULL) + { +- size_t nwcs = wcslen ((wchar_t *) wcs); ++ size_t nwcs = wcslen_uint32 (wcs); + uint32_t zero = 0; + /* Handle as a single character. */ + if (nwcs == 0) +@@ -1772,8 +1772,7 @@ symbol `%s' has the same encoding as"), (*eptr)->name); + + if ((*eptr)->nwcs == runp->nwcs) + { +- int c = wmemcmp ((wchar_t *) (*eptr)->wcs, +- (wchar_t *) runp->wcs, runp->nwcs); ++ int c = wmemcmp_uint32 ((*eptr)->wcs, runp->wcs, runp->nwcs); + + if (c == 0) + { +@@ -2000,9 +1999,9 @@ add_to_tablewc (uint32_t ch, struct element_t *runp) + one consecutive entry. */ + if (runp->wcnext != NULL + && runp->nwcs == runp->wcnext->nwcs +- && wmemcmp ((wchar_t *) runp->wcs, +- (wchar_t *)runp->wcnext->wcs, +- runp->nwcs - 1) == 0 ++ && wmemcmp_uint32 (runp->wcs, ++ runp->wcnext->wcs, ++ runp->nwcs - 1) == 0 + && (runp->wcs[runp->nwcs - 1] + == runp->wcnext->wcs[runp->nwcs - 1] + 1)) + { +@@ -2026,9 +2025,9 @@ add_to_tablewc (uint32_t ch, struct element_t *runp) + runp = runp->wcnext; + while (runp->wcnext != NULL + && runp->nwcs == runp->wcnext->nwcs +- && wmemcmp ((wchar_t *) runp->wcs, +- (wchar_t *)runp->wcnext->wcs, +- runp->nwcs - 1) == 0 ++ && wmemcmp_uint32 (runp->wcs, ++ runp->wcnext->wcs, ++ runp->nwcs - 1) == 0 + && (runp->wcs[runp->nwcs - 1] + == runp->wcnext->wcs[runp->nwcs - 1] + 1)); + +diff --git a/locale/programs/ld-ctype.c b/locale/programs/ld-ctype.c +index 3328093d0e..d58fb0f4b7 100644 +--- a/locale/programs/ld-ctype.c ++++ b/locale/programs/ld-ctype.c +@@ -915,7 +915,7 @@ ctype_output (struct localedef_t *locale, const struct charmap_t *charmap, + allocate_arrays (ctype, charmap, ctype->repertoire); + + default_missing_len = (ctype->default_missing +- ? wcslen ((wchar_t *) ctype->default_missing) ++ ? wcslen_uint32 (ctype->default_missing) + : 0); + + init_locale_data (&file, nelems); +@@ -1927,7 +1927,7 @@ read_translit_entry (struct linereader *ldfile, struct locale_ctype_t *ctype, + ignore = 1; + else + /* This value is usable. */ +- obstack_grow (ob, to_wstr, wcslen ((wchar_t *) to_wstr) * 4); ++ obstack_grow (ob, to_wstr, wcslen_uint32 (to_wstr) * 4); + + first = 0; + } +@@ -2461,8 +2461,8 @@ with character code range values one must use the absolute ellipsis `...'")); + } + + handle_tok_digit: +- class_bit = _ISwdigit; +- class256_bit = _ISdigit; ++ class_bit = BITw (tok_digit); ++ class256_bit = BIT (tok_digit); + handle_digits = 1; + goto read_charclass; + +@@ -3904,8 +3904,7 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, + + while (idx < number) + { +- int res = wcscmp ((const wchar_t *) sorted[idx]->from, +- (const wchar_t *) runp->from); ++ int res = wcscmp_uint32 (sorted[idx]->from, runp->from); + if (res == 0) + { + replace = 1; +@@ -3942,11 +3941,11 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, + for (size_t cnt = 0; cnt < number; ++cnt) + { + struct translit_to_t *srunp; +- from_len += wcslen ((const wchar_t *) sorted[cnt]->from) + 1; ++ from_len += wcslen_uint32 (sorted[cnt]->from) + 1; + srunp = sorted[cnt]->to; + while (srunp != NULL) + { +- to_len += wcslen ((const wchar_t *) srunp->str) + 1; ++ to_len += wcslen_uint32 (srunp->str) + 1; + srunp = srunp->next; + } + /* Plus one for the extra NUL character marking the end of +@@ -3970,18 +3969,18 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, + ctype->translit_from_idx[cnt] = from_len; + ctype->translit_to_idx[cnt] = to_len; + +- len = wcslen ((const wchar_t *) sorted[cnt]->from) + 1; +- wmemcpy ((wchar_t *) &ctype->translit_from_tbl[from_len], +- (const wchar_t *) sorted[cnt]->from, len); ++ len = wcslen_uint32 (sorted[cnt]->from) + 1; ++ wmemcpy_uint32 (&ctype->translit_from_tbl[from_len], ++ sorted[cnt]->from, len); + from_len += len; + + ctype->translit_to_idx[cnt] = to_len; + srunp = sorted[cnt]->to; + while (srunp != NULL) + { +- len = wcslen ((const wchar_t *) srunp->str) + 1; +- wmemcpy ((wchar_t *) &ctype->translit_to_tbl[to_len], +- (const wchar_t *) srunp->str, len); ++ len = wcslen_uint32 (srunp->str) + 1; ++ wmemcpy_uint32 (&ctype->translit_to_tbl[to_len], ++ srunp->str, len); + to_len += len; + srunp = srunp->next; + } +diff --git a/locale/programs/ld-time.c b/locale/programs/ld-time.c +index f7db873adb..fec3773c9d 100644 +--- a/locale/programs/ld-time.c ++++ b/locale/programs/ld-time.c +@@ -220,8 +220,10 @@ No definition for %s category found"), "LC_TIME"); + } + else + { ++ static const uint32_t wt_fmt_ampm[] ++ = { '%','I',':','%','M',':','%','S',' ','%','p',0 }; + time->t_fmt_ampm = "%I:%M:%S %p"; +- time->wt_fmt_ampm = (const uint32_t *) L"%I:%M:%S %p"; ++ time->wt_fmt_ampm = wt_fmt_ampm; + } + } + +@@ -231,7 +233,7 @@ No definition for %s category found"), "LC_TIME"); + const int days_per_month[12] = { 31, 29, 31, 30, 31, 30, + 31, 31, 30, 31 ,30, 31 }; + size_t idx; +- wchar_t *wstr; ++ uint32_t *wstr; + + time->era_entries = + (struct era_data *) xmalloc (time->num_era +@@ -457,18 +459,18 @@ No definition for %s category found"), "LC_TIME"); + } + + /* Now generate the wide character name and format. */ +- wstr = wcschr ((wchar_t *) time->wera[idx], L':');/* end direction */ +- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end offset */ +- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end start */ +- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end end */ ++ wstr = wcschr_uint32 (time->wera[idx], L':'); /* end direction */ ++ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end offset */ ++ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end start */ ++ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end end */ + if (wstr != NULL) + { +- time->era_entries[idx].wname = (uint32_t *) wstr + 1; +- wstr = wcschr (wstr + 1, L':'); /* end name */ ++ time->era_entries[idx].wname = wstr + 1; ++ wstr = wcschr_uint32 (wstr + 1, L':'); /* end name */ + if (wstr != NULL) + { + *wstr = L'\0'; +- time->era_entries[idx].wformat = (uint32_t *) wstr + 1; ++ time->era_entries[idx].wformat = wstr + 1; + } + else + time->era_entries[idx].wname = +@@ -527,7 +529,16 @@ No definition for %s category found"), "LC_TIME"); + if (time->date_fmt == NULL) + time->date_fmt = "%a %b %e %H:%M:%S %Z %Y"; + if (time->wdate_fmt == NULL) +- time->wdate_fmt = (const uint32_t *) L"%a %b %e %H:%M:%S %Z %Y"; ++ { ++ static const uint32_t wdate_fmt[] = ++ { '%','a',' ', ++ '%','b',' ', ++ '%','e',' ', ++ '%','H',':','%','M',':','%','S',' ', ++ '%','Z',' ', ++ '%','Y',0 }; ++ time->wdate_fmt = wdate_fmt; ++ } + } + + +diff --git a/locale/programs/linereader.c b/locale/programs/linereader.c +index 7ebd933801..5753750756 100644 +--- a/locale/programs/linereader.c ++++ b/locale/programs/linereader.c +@@ -595,7 +595,7 @@ get_string (struct linereader *lr, const struct charmap_t *charmap, + { + int return_widestr = lr->return_widestr; + char *buf; +- wchar_t *buf2 = NULL; ++ uint32_t *buf2 = NULL; + size_t bufact; + size_t bufmax = 56; + +diff --git a/locale/programs/localedef.c b/locale/programs/localedef.c +index b048bd05b9..fed15c42ec 100644 +--- a/locale/programs/localedef.c ++++ b/locale/programs/localedef.c +@@ -109,6 +109,7 @@ void (*argp_program_version_hook) (FILE *, struct argp_state *) = print_version; + #define OPT_NO_WARN 402 + #define OPT_WARN 403 + #define OPT_NO_HARD_LINKS 404 ++#define OPT_UINT32_ALIGN 405 + + /* Definitions of arguments for argp functions. */ + static const struct argp_option options[] = +@@ -153,6 +154,8 @@ static const struct argp_option options[] = + N_("Generate little-endian output") }, + { "big-endian", OPT_BIG_ENDIAN, NULL, 0, + N_("Generate big-endian output") }, ++ { "uint32-align", OPT_UINT32_ALIGN, "ALIGNMENT", 0, ++ N_("Set the target's uint32_t alignment in bytes (default 4)") }, + { NULL, 0, NULL, 0, NULL } + }; + +@@ -243,12 +246,14 @@ main (int argc, char *argv[]) + ctype locale. (P1003.2 4.35.5.2) */ + setlocale (LC_CTYPE, "POSIX"); + ++#ifndef NO_SYSCONF + /* Look whether the system really allows locale definitions. POSIX + defines error code 3 for this situation so I think it must be + a fatal error (see P1003.2 4.35.8). */ + if (sysconf (_SC_2_LOCALEDEF) < 0) + record_error (3, 0, _("\ + FATAL: system does not define `_POSIX2_LOCALEDEF'")); ++#endif + + /* Process charmap file. */ + charmap = charmap_read (charmap_file, verbose, 1, be_quiet, 1); +@@ -400,6 +405,9 @@ parse_opt (int key, char *arg, struct argp_state *state) + /* Do not hard link to other locales. */ + hard_links = false; + break; ++ case OPT_UINT32_ALIGN: ++ uint32_align_mask = strtol (arg, NULL, 0) - 1; ++ break; + case 'c': + force_output = 1; + break; +diff --git a/locale/programs/locfile.c b/locale/programs/locfile.c +index eb2f4634da..d387147323 100644 +--- a/locale/programs/locfile.c ++++ b/locale/programs/locfile.c +@@ -544,6 +544,9 @@ compare_files (const char *filename1, const char *filename2, size_t size, + machine running localedef. */ + bool swap_endianness_p; + ++/* The target's value of __align__(uint32_t) - 1. */ ++unsigned int uint32_align_mask = 3; ++ + /* When called outside a start_locale_structure/end_locale_structure + or start_locale_prelude/end_locale_prelude block, record that the + next byte in FILE's obstack will be the first byte of a new element. +@@ -621,7 +624,7 @@ add_locale_string (struct locale_file *file, const char *string) + void + add_locale_wstring (struct locale_file *file, const uint32_t *string) + { +- add_locale_uint32_array (file, string, wcslen ((const wchar_t *) string) + 1); ++ add_locale_uint32_array (file, string, wcslen_uint32 (string) + 1); + } + + /* Record that FILE's next element is the 32-bit integer VALUE. */ +diff --git a/locale/programs/locfile.h b/locale/programs/locfile.h +index 7ea10038b4..0888c09762 100644 +--- a/locale/programs/locfile.h ++++ b/locale/programs/locfile.h +@@ -71,6 +71,8 @@ extern void write_all_categories (struct localedef_t *definitions, + + extern bool swap_endianness_p; + ++extern unsigned int uint32_align_mask; ++ + /* Change the output to be big-endian if BIG_ENDIAN is true and + little-endian otherwise. */ + static inline void +@@ -89,7 +91,8 @@ maybe_swap_uint32 (uint32_t value) + } + + /* Likewise, but munge an array of N uint32_ts starting at ARRAY. */ +-static inline void ++static void ++__attribute__ ((unused)) + maybe_swap_uint32_array (uint32_t *array, size_t n) + { + if (swap_endianness_p) +@@ -99,7 +102,8 @@ maybe_swap_uint32_array (uint32_t *array, size_t n) + + /* Like maybe_swap_uint32_array, but the array of N elements is at + the end of OBSTACK's current object. */ +-static inline void ++static void ++__attribute__ ((unused)) + maybe_swap_uint32_obstack (struct obstack *obstack, size_t n) + { + maybe_swap_uint32_array ((uint32_t *) obstack_next_free (obstack) - n, n); +@@ -276,4 +280,55 @@ extern void identification_output (struct localedef_t *locale, + const struct charmap_t *charmap, + const char *output_path); + ++static size_t wcslen_uint32 (const uint32_t *str) __attribute__ ((unused)); ++static uint32_t * wmemcpy_uint32 (uint32_t *s1, const uint32_t *s2, size_t n) __attribute__ ((unused)); ++static uint32_t * wcschr_uint32 (const uint32_t *s, uint32_t ch) __attribute__ ((unused)); ++static int wcscmp_uint32 (const uint32_t *s1, const uint32_t *s2) __attribute__ ((unused)); ++static int wmemcmp_uint32 (const uint32_t *s1, const uint32_t *s2, size_t n) __attribute__ ((unused)); ++ ++static size_t ++wcslen_uint32 (const uint32_t *str) ++{ ++ size_t len = 0; ++ while (str[len] != 0) ++ len++; ++ return len; ++} ++ ++static int ++wmemcmp_uint32 (const uint32_t *s1, const uint32_t *s2, size_t n) ++{ ++ while (n-- != 0) ++ { ++ int diff = *s1++ - *s2++; ++ if (diff != 0) ++ return diff; ++ } ++ return 0; ++} ++ ++static int ++wcscmp_uint32 (const uint32_t *s1, const uint32_t *s2) ++{ ++ while (*s1 != 0 && *s1 == *s2) ++ s1++, s2++; ++ return *s1 - *s2; ++} ++ ++static uint32_t * ++wmemcpy_uint32 (uint32_t *s1, const uint32_t *s2, size_t n) ++{ ++ return memcpy (s1, s2, n * sizeof (uint32_t)); ++} ++ ++static uint32_t * ++wcschr_uint32 (const uint32_t *s, uint32_t ch) ++{ ++ do ++ if (*s == ch) ++ return (uint32_t *) s; ++ while (*s++ != 0); ++ return 0; ++} ++ + #endif /* locfile.h */ +diff --git a/locale/setlocale.c b/locale/setlocale.c +index 030f1727bd..096d8ed895 100644 +--- a/locale/setlocale.c ++++ b/locale/setlocale.c +@@ -63,35 +63,6 @@ static char *const _nl_current_used[] = + + #endif + +- +-/* Define an array of category names (also the environment variable names). */ +-const struct catnamestr_t _nl_category_names attribute_hidden = +- { +-#define DEFINE_CATEGORY(category, category_name, items, a) \ +- category_name, +-#include "categories.def" +-#undef DEFINE_CATEGORY +- }; +- +-const uint8_t _nl_category_name_idxs[__LC_LAST] attribute_hidden = +- { +-#define DEFINE_CATEGORY(category, category_name, items, a) \ +- [category] = offsetof (struct catnamestr_t, CATNAMEMF (__LINE__)), +-#include "categories.def" +-#undef DEFINE_CATEGORY +- }; +- +-/* An array of their lengths, for convenience. */ +-const uint8_t _nl_category_name_sizes[] attribute_hidden = +- { +-#define DEFINE_CATEGORY(category, category_name, items, a) \ +- [category] = sizeof (category_name) - 1, +-#include "categories.def" +-#undef DEFINE_CATEGORY +- [LC_ALL] = sizeof ("LC_ALL") - 1 +- }; +- +- + #ifdef NL_CURRENT_INDIRECT + # define WEAK_POSTLOAD(postload) weak_extern (postload) + #else +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Help-bootstrap-cross-toolchain.patch b/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Help-bootstrap-cross-toolchain.patch deleted file mode 100644 index 6374ea408..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0021-eglibc-Help-bootstrap-cross-toolchain.patch +++ /dev/null @@ -1,97 +0,0 @@ -From 6a32d5bf40deee5d12d24c06f3ea9b5479c16802 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:49:28 +0000 -Subject: [PATCH] eglibc: Help bootstrap cross toolchain - -Taken from EGLIBC, r1484 + r1525 - - 2007-02-20 Jim Blandy - - * Makefile (install-headers): Preserve old behavior: depend on - $(inst_includedir)/gnu/stubs.h only if install-bootstrap-headers - is set; otherwise, place gnu/stubs.h on the 'install-others' list. - - 2007-02-16 Jim Blandy - - * Makefile: Amend make install-headers to install everything - necessary for building a cross-compiler. Install gnu/stubs.h as - part of 'install-headers', not 'install-others'. - If install-bootstrap-headers is 'yes', install a dummy copy of - gnu/stubs.h, instead of computing the real thing. - * include/stubs-bootstrap.h: New file. - -Upstream-Status: Pending -Signed-off-by: Khem Raj ---- - Makefile | 22 +++++++++++++++++++++- - include/stubs-bootstrap.h | 12 ++++++++++++ - 2 files changed, 33 insertions(+), 1 deletion(-) - create mode 100644 include/stubs-bootstrap.h - -diff --git a/Makefile b/Makefile -index 8f0a93aceb..8eba23a868 100644 ---- a/Makefile -+++ b/Makefile -@@ -79,9 +79,18 @@ subdir-dirs = include - vpath %.h $(subdir-dirs) - - # What to install. --install-others = $(inst_includedir)/gnu/stubs.h - install-bin-script = - -+# If we're bootstrapping, install a dummy gnu/stubs.h along with the -+# other headers, so 'make install-headers' produces a useable include -+# tree. Otherwise, install gnu/stubs.h later, after the rest of the -+# build is done. -+ifeq ($(install-bootstrap-headers),yes) -+install-headers: $(inst_includedir)/gnu/stubs.h -+else -+install-others = $(inst_includedir)/gnu/stubs.h -+endif -+ - ifeq (yes,$(build-shared)) - headers += gnu/lib-names.h - endif -@@ -407,6 +416,16 @@ others: $(common-objpfx)testrun.sh $(common-objpfx)debugglibc.sh - - subdir-stubs := $(foreach dir,$(subdirs),$(common-objpfx)$(dir)/stubs) - -+# gnu/stubs.h depends (via the subdir 'stubs' targets) on all the .o -+# files in EGLIBC. For bootstrapping a GCC/EGLIBC pair, an empty -+# gnu/stubs.h is good enough. -+ifeq ($(install-bootstrap-headers),yes) -+$(inst_includedir)/gnu/stubs.h: include/stubs-bootstrap.h $(+force) -+ $(make-target-directory) -+ $(INSTALL_DATA) $< $@ -+ -+installed-stubs = -+else - ifndef abi-variants - installed-stubs = $(inst_includedir)/gnu/stubs.h - else -@@ -433,6 +452,7 @@ $(inst_includedir)/gnu/stubs.h: $(+force) - - install-others-nosubdir: $(installed-stubs) - endif -+endif - - - # Since stubs.h is never needed when building the library, we simplify the -diff --git a/include/stubs-bootstrap.h b/include/stubs-bootstrap.h -new file mode 100644 -index 0000000000..1d2b669aff ---- /dev/null -+++ b/include/stubs-bootstrap.h -@@ -0,0 +1,12 @@ -+/* Placeholder stubs.h file for bootstrapping. -+ -+ When bootstrapping a GCC/EGLIBC pair, GCC requires that the EGLIBC -+ headers be installed, but we can't fully build EGLIBC without that -+ GCC. So we run the command: -+ -+ make install-headers install-bootstrap-headers=yes -+ -+ to install the headers GCC needs, but avoid building certain -+ difficult headers. The header depends, via the -+ EGLIBC subdir 'stubs' make targets, on every .o file in EGLIBC, but -+ an empty stubs.h like this will do fine for GCC. */ diff --git a/poky/meta/recipes-core/glibc/glibc/0022-Define-DUMMY_LOCALE_T-if-not-defined.patch b/poky/meta/recipes-core/glibc/glibc/0022-Define-DUMMY_LOCALE_T-if-not-defined.patch new file mode 100644 index 000000000..0e9318308 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0022-Define-DUMMY_LOCALE_T-if-not-defined.patch @@ -0,0 +1,32 @@ +From ad5bc87134871b99e082e2449b8c1ce2f1375ef9 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 20 Apr 2016 21:11:00 -0700 +Subject: [PATCH 22/29] Define DUMMY_LOCALE_T if not defined + +This is a hack to fix building the locale bits on an older +CentOs 5.X machine + +Upstream-Status: Inappropriate [other] + +Signed-off-by: Khem Raj +--- + locale/programs/config.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/locale/programs/config.h b/locale/programs/config.h +index d76d9f7e8b..92b56d7df3 100644 +--- a/locale/programs/config.h ++++ b/locale/programs/config.h +@@ -19,6 +19,9 @@ + #ifndef _LD_CONFIG_H + #define _LD_CONFIG_H 1 + ++#ifndef DUMMY_LOCALE_T ++#define DUMMY_LOCALE_T ++#endif + /* Use the internal textdomain used for libc messages. */ + #define PACKAGE _libc_intl_domainname + #ifndef VERSION +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0022-eglibc-Resolve-__fpscr_values-on-SH4.patch b/poky/meta/recipes-core/glibc/glibc/0022-eglibc-Resolve-__fpscr_values-on-SH4.patch deleted file mode 100644 index 2a503c811..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0022-eglibc-Resolve-__fpscr_values-on-SH4.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 25c21857a3fc0eb26831616ba88a696dd31ecba1 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 00:55:53 +0000 -Subject: [PATCH] eglibc: Resolve __fpscr_values on SH4 - -2010-09-29 Nobuhiro Iwamatsu - Andrew Stubbs - - Resolve SH's __fpscr_values to symbol in libc.so. - - * sysdeps/sh/sh4/fpu/fpu_control.h: Add C++ __set_fpscr prototype. - * sysdeps/unix/sysv/linux/sh/Versions (GLIBC_2.2): Add __fpscr_values. - * sysdeps/unix/sysv/linux/sh/sysdep.S (___fpscr_values): New constant. - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - sysdeps/unix/sysv/linux/sh/Versions | 1 + - sysdeps/unix/sysv/linux/sh/sysdep.S | 11 +++++++++++ - 2 files changed, 12 insertions(+) - -diff --git a/sysdeps/unix/sysv/linux/sh/Versions b/sysdeps/unix/sysv/linux/sh/Versions -index e0938c4165..ca1d7da339 100644 ---- a/sysdeps/unix/sysv/linux/sh/Versions -+++ b/sysdeps/unix/sysv/linux/sh/Versions -@@ -2,6 +2,7 @@ libc { - GLIBC_2.2 { - # functions used in other libraries - __xstat64; __fxstat64; __lxstat64; -+ __fpscr_values; - - # a* - alphasort64; -diff --git a/sysdeps/unix/sysv/linux/sh/sysdep.S b/sysdeps/unix/sysv/linux/sh/sysdep.S -index 85ff3f900e..7743b8d57a 100644 ---- a/sysdeps/unix/sysv/linux/sh/sysdep.S -+++ b/sysdeps/unix/sysv/linux/sh/sysdep.S -@@ -30,3 +30,14 @@ ENTRY (__syscall_error) - - #define __syscall_error __syscall_error_1 - #include -+ -+ .data -+ .align 3 -+ .globl ___fpscr_values -+ .type ___fpscr_values, @object -+ .size ___fpscr_values, 8 -+___fpscr_values: -+ .long 0 -+ .long 0x80000 -+weak_alias (___fpscr_values, __fpscr_values) -+ diff --git a/poky/meta/recipes-core/glibc/glibc/0023-eglibc-Forward-port-cross-locale-generation-support.patch b/poky/meta/recipes-core/glibc/glibc/0023-eglibc-Forward-port-cross-locale-generation-support.patch deleted file mode 100644 index 1a90f22db..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0023-eglibc-Forward-port-cross-locale-generation-support.patch +++ /dev/null @@ -1,560 +0,0 @@ -From 30008327aadf0c775e644bb387d7c25952ed05b5 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 18 Mar 2015 01:33:49 +0000 -Subject: [PATCH] eglibc: Forward port cross locale generation support - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - locale/Makefile | 3 +- - locale/catnames.c | 46 +++++++++++++++++++++++++++ - locale/localeinfo.h | 2 +- - locale/programs/charmap-dir.c | 6 ++++ - locale/programs/ld-collate.c | 17 +++++----- - locale/programs/ld-ctype.c | 27 ++++++++-------- - locale/programs/ld-time.c | 31 ++++++++++++------ - locale/programs/linereader.c | 2 +- - locale/programs/localedef.c | 8 +++++ - locale/programs/locfile.c | 5 ++- - locale/programs/locfile.h | 59 +++++++++++++++++++++++++++++++++-- - locale/setlocale.c | 29 ----------------- - 12 files changed, 167 insertions(+), 68 deletions(-) - create mode 100644 locale/catnames.c - -diff --git a/locale/Makefile b/locale/Makefile -index c9694e236e..ba7105fad6 100644 ---- a/locale/Makefile -+++ b/locale/Makefile -@@ -26,7 +26,8 @@ headers = langinfo.h locale.h bits/locale.h \ - bits/types/locale_t.h bits/types/__locale_t.h - routines = setlocale findlocale loadlocale loadarchive \ - localeconv nl_langinfo nl_langinfo_l mb_cur_max \ -- newlocale duplocale freelocale uselocale -+ newlocale duplocale freelocale uselocale \ -+ catnames - tests = tst-C-locale tst-locname tst-duplocale - categories = ctype messages monetary numeric time paper name \ - address telephone measurement identification collate -diff --git a/locale/catnames.c b/locale/catnames.c -new file mode 100644 -index 0000000000..538f3f5edb ---- /dev/null -+++ b/locale/catnames.c -@@ -0,0 +1,46 @@ -+/* Copyright (C) 2006 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, write to the Free -+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA -+ 02111-1307 USA. */ -+ -+#include "localeinfo.h" -+ -+/* Define an array of category names (also the environment variable names). */ -+const struct catnamestr_t _nl_category_names attribute_hidden = -+ { -+#define DEFINE_CATEGORY(category, category_name, items, a) \ -+ category_name, -+#include "categories.def" -+#undef DEFINE_CATEGORY -+ }; -+ -+const uint8_t _nl_category_name_idxs[__LC_LAST] attribute_hidden = -+ { -+#define DEFINE_CATEGORY(category, category_name, items, a) \ -+ [category] = offsetof (struct catnamestr_t, CATNAMEMF (__LINE__)), -+#include "categories.def" -+#undef DEFINE_CATEGORY -+ }; -+ -+/* An array of their lengths, for convenience. */ -+const uint8_t _nl_category_name_sizes[] attribute_hidden = -+ { -+#define DEFINE_CATEGORY(category, category_name, items, a) \ -+ [category] = sizeof (category_name) - 1, -+#include "categories.def" -+#undef DEFINE_CATEGORY -+ [LC_ALL] = sizeof ("LC_ALL") - 1 -+ }; -diff --git a/locale/localeinfo.h b/locale/localeinfo.h -index fdc283c69a..4eeed35f90 100644 ---- a/locale/localeinfo.h -+++ b/locale/localeinfo.h -@@ -230,7 +230,7 @@ __libc_tsd_define (extern, locale_t, LOCALE) - unused. We can manage this playing some tricks with weak references. - But with thread-local locale settings, it becomes quite ungainly unless - we can use __thread variables. So only in that case do we attempt this. */ --#ifndef SHARED -+#if !defined SHARED && !defined IN_GLIBC_LOCALEDEF - # include - # define NL_CURRENT_INDIRECT 1 - #endif -diff --git a/locale/programs/charmap-dir.c b/locale/programs/charmap-dir.c -index 1a526a240d..0fb2daf936 100644 ---- a/locale/programs/charmap-dir.c -+++ b/locale/programs/charmap-dir.c -@@ -18,7 +18,9 @@ - #include - #include - #include -+#ifndef NO_UNCOMPRESS - #include -+#endif - #include - #include - #include -@@ -154,6 +156,7 @@ charmap_closedir (CHARMAP_DIR *cdir) - return closedir (dir); - } - -+#ifndef NO_UNCOMPRESS - /* Creates a subprocess decompressing the given pathname, and returns - a stream reading its output (the decompressed data). */ - static -@@ -202,6 +205,7 @@ fopen_uncompressed (const char *pathname, const char *compressor) - } - return NULL; - } -+#endif - - /* Opens a charmap for reading, given its name (not an alias name). */ - FILE * -@@ -224,6 +228,7 @@ charmap_open (const char *directory, const char *name) - if (stream != NULL) - return stream; - -+#ifndef NO_UNCOMPRESS - memcpy (p, ".gz", 4); - stream = fopen_uncompressed (pathname, "gzip"); - if (stream != NULL) -@@ -233,6 +238,7 @@ charmap_open (const char *directory, const char *name) - stream = fopen_uncompressed (pathname, "bzip2"); - if (stream != NULL) - return stream; -+#endif - - return NULL; - } -diff --git a/locale/programs/ld-collate.c b/locale/programs/ld-collate.c -index feb1a11258..5a8e522470 100644 ---- a/locale/programs/ld-collate.c -+++ b/locale/programs/ld-collate.c -@@ -349,7 +349,7 @@ new_element (struct locale_collate_t *collate, const char *mbs, size_t mbslen, - } - if (wcs != NULL) - { -- size_t nwcs = wcslen ((wchar_t *) wcs); -+ size_t nwcs = wcslen_uint32 (wcs); - uint32_t zero = 0; - /* Handle as a single character. */ - if (nwcs == 0) -@@ -1772,8 +1772,7 @@ symbol `%s' has the same encoding as"), (*eptr)->name); - - if ((*eptr)->nwcs == runp->nwcs) - { -- int c = wmemcmp ((wchar_t *) (*eptr)->wcs, -- (wchar_t *) runp->wcs, runp->nwcs); -+ int c = wmemcmp_uint32 ((*eptr)->wcs, runp->wcs, runp->nwcs); - - if (c == 0) - { -@@ -2000,9 +1999,9 @@ add_to_tablewc (uint32_t ch, struct element_t *runp) - one consecutive entry. */ - if (runp->wcnext != NULL - && runp->nwcs == runp->wcnext->nwcs -- && wmemcmp ((wchar_t *) runp->wcs, -- (wchar_t *)runp->wcnext->wcs, -- runp->nwcs - 1) == 0 -+ && wmemcmp_uint32 (runp->wcs, -+ runp->wcnext->wcs, -+ runp->nwcs - 1) == 0 - && (runp->wcs[runp->nwcs - 1] - == runp->wcnext->wcs[runp->nwcs - 1] + 1)) - { -@@ -2026,9 +2025,9 @@ add_to_tablewc (uint32_t ch, struct element_t *runp) - runp = runp->wcnext; - while (runp->wcnext != NULL - && runp->nwcs == runp->wcnext->nwcs -- && wmemcmp ((wchar_t *) runp->wcs, -- (wchar_t *)runp->wcnext->wcs, -- runp->nwcs - 1) == 0 -+ && wmemcmp_uint32 (runp->wcs, -+ runp->wcnext->wcs, -+ runp->nwcs - 1) == 0 - && (runp->wcs[runp->nwcs - 1] - == runp->wcnext->wcs[runp->nwcs - 1] + 1)); - -diff --git a/locale/programs/ld-ctype.c b/locale/programs/ld-ctype.c -index 3328093d0e..d58fb0f4b7 100644 ---- a/locale/programs/ld-ctype.c -+++ b/locale/programs/ld-ctype.c -@@ -915,7 +915,7 @@ ctype_output (struct localedef_t *locale, const struct charmap_t *charmap, - allocate_arrays (ctype, charmap, ctype->repertoire); - - default_missing_len = (ctype->default_missing -- ? wcslen ((wchar_t *) ctype->default_missing) -+ ? wcslen_uint32 (ctype->default_missing) - : 0); - - init_locale_data (&file, nelems); -@@ -1927,7 +1927,7 @@ read_translit_entry (struct linereader *ldfile, struct locale_ctype_t *ctype, - ignore = 1; - else - /* This value is usable. */ -- obstack_grow (ob, to_wstr, wcslen ((wchar_t *) to_wstr) * 4); -+ obstack_grow (ob, to_wstr, wcslen_uint32 (to_wstr) * 4); - - first = 0; - } -@@ -2461,8 +2461,8 @@ with character code range values one must use the absolute ellipsis `...'")); - } - - handle_tok_digit: -- class_bit = _ISwdigit; -- class256_bit = _ISdigit; -+ class_bit = BITw (tok_digit); -+ class256_bit = BIT (tok_digit); - handle_digits = 1; - goto read_charclass; - -@@ -3904,8 +3904,7 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, - - while (idx < number) - { -- int res = wcscmp ((const wchar_t *) sorted[idx]->from, -- (const wchar_t *) runp->from); -+ int res = wcscmp_uint32 (sorted[idx]->from, runp->from); - if (res == 0) - { - replace = 1; -@@ -3942,11 +3941,11 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, - for (size_t cnt = 0; cnt < number; ++cnt) - { - struct translit_to_t *srunp; -- from_len += wcslen ((const wchar_t *) sorted[cnt]->from) + 1; -+ from_len += wcslen_uint32 (sorted[cnt]->from) + 1; - srunp = sorted[cnt]->to; - while (srunp != NULL) - { -- to_len += wcslen ((const wchar_t *) srunp->str) + 1; -+ to_len += wcslen_uint32 (srunp->str) + 1; - srunp = srunp->next; - } - /* Plus one for the extra NUL character marking the end of -@@ -3970,18 +3969,18 @@ allocate_arrays (struct locale_ctype_t *ctype, const struct charmap_t *charmap, - ctype->translit_from_idx[cnt] = from_len; - ctype->translit_to_idx[cnt] = to_len; - -- len = wcslen ((const wchar_t *) sorted[cnt]->from) + 1; -- wmemcpy ((wchar_t *) &ctype->translit_from_tbl[from_len], -- (const wchar_t *) sorted[cnt]->from, len); -+ len = wcslen_uint32 (sorted[cnt]->from) + 1; -+ wmemcpy_uint32 (&ctype->translit_from_tbl[from_len], -+ sorted[cnt]->from, len); - from_len += len; - - ctype->translit_to_idx[cnt] = to_len; - srunp = sorted[cnt]->to; - while (srunp != NULL) - { -- len = wcslen ((const wchar_t *) srunp->str) + 1; -- wmemcpy ((wchar_t *) &ctype->translit_to_tbl[to_len], -- (const wchar_t *) srunp->str, len); -+ len = wcslen_uint32 (srunp->str) + 1; -+ wmemcpy_uint32 (&ctype->translit_to_tbl[to_len], -+ srunp->str, len); - to_len += len; - srunp = srunp->next; - } -diff --git a/locale/programs/ld-time.c b/locale/programs/ld-time.c -index f7db873adb..fec3773c9d 100644 ---- a/locale/programs/ld-time.c -+++ b/locale/programs/ld-time.c -@@ -220,8 +220,10 @@ No definition for %s category found"), "LC_TIME"); - } - else - { -+ static const uint32_t wt_fmt_ampm[] -+ = { '%','I',':','%','M',':','%','S',' ','%','p',0 }; - time->t_fmt_ampm = "%I:%M:%S %p"; -- time->wt_fmt_ampm = (const uint32_t *) L"%I:%M:%S %p"; -+ time->wt_fmt_ampm = wt_fmt_ampm; - } - } - -@@ -231,7 +233,7 @@ No definition for %s category found"), "LC_TIME"); - const int days_per_month[12] = { 31, 29, 31, 30, 31, 30, - 31, 31, 30, 31 ,30, 31 }; - size_t idx; -- wchar_t *wstr; -+ uint32_t *wstr; - - time->era_entries = - (struct era_data *) xmalloc (time->num_era -@@ -457,18 +459,18 @@ No definition for %s category found"), "LC_TIME"); - } - - /* Now generate the wide character name and format. */ -- wstr = wcschr ((wchar_t *) time->wera[idx], L':');/* end direction */ -- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end offset */ -- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end start */ -- wstr = wstr ? wcschr (wstr + 1, L':') : NULL; /* end end */ -+ wstr = wcschr_uint32 (time->wera[idx], L':'); /* end direction */ -+ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end offset */ -+ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end start */ -+ wstr = wstr ? wcschr_uint32 (wstr + 1, L':') : NULL; /* end end */ - if (wstr != NULL) - { -- time->era_entries[idx].wname = (uint32_t *) wstr + 1; -- wstr = wcschr (wstr + 1, L':'); /* end name */ -+ time->era_entries[idx].wname = wstr + 1; -+ wstr = wcschr_uint32 (wstr + 1, L':'); /* end name */ - if (wstr != NULL) - { - *wstr = L'\0'; -- time->era_entries[idx].wformat = (uint32_t *) wstr + 1; -+ time->era_entries[idx].wformat = wstr + 1; - } - else - time->era_entries[idx].wname = -@@ -527,7 +529,16 @@ No definition for %s category found"), "LC_TIME"); - if (time->date_fmt == NULL) - time->date_fmt = "%a %b %e %H:%M:%S %Z %Y"; - if (time->wdate_fmt == NULL) -- time->wdate_fmt = (const uint32_t *) L"%a %b %e %H:%M:%S %Z %Y"; -+ { -+ static const uint32_t wdate_fmt[] = -+ { '%','a',' ', -+ '%','b',' ', -+ '%','e',' ', -+ '%','H',':','%','M',':','%','S',' ', -+ '%','Z',' ', -+ '%','Y',0 }; -+ time->wdate_fmt = wdate_fmt; -+ } - } - - -diff --git a/locale/programs/linereader.c b/locale/programs/linereader.c -index 7ebd933801..5753750756 100644 ---- a/locale/programs/linereader.c -+++ b/locale/programs/linereader.c -@@ -595,7 +595,7 @@ get_string (struct linereader *lr, const struct charmap_t *charmap, - { - int return_widestr = lr->return_widestr; - char *buf; -- wchar_t *buf2 = NULL; -+ uint32_t *buf2 = NULL; - size_t bufact; - size_t bufmax = 56; - -diff --git a/locale/programs/localedef.c b/locale/programs/localedef.c -index dbbb0145c0..097a8b6193 100644 ---- a/locale/programs/localedef.c -+++ b/locale/programs/localedef.c -@@ -109,6 +109,7 @@ void (*argp_program_version_hook) (FILE *, struct argp_state *) = print_version; - #define OPT_NO_WARN 402 - #define OPT_WARN 403 - #define OPT_NO_HARD_LINKS 404 -+#define OPT_UINT32_ALIGN 405 - - /* Definitions of arguments for argp functions. */ - static const struct argp_option options[] = -@@ -153,6 +154,8 @@ static const struct argp_option options[] = - N_("Generate little-endian output") }, - { "big-endian", OPT_BIG_ENDIAN, NULL, 0, - N_("Generate big-endian output") }, -+ { "uint32-align", OPT_UINT32_ALIGN, "ALIGNMENT", 0, -+ N_("Set the target's uint32_t alignment in bytes (default 4)") }, - { NULL, 0, NULL, 0, NULL } - }; - -@@ -242,12 +245,14 @@ main (int argc, char *argv[]) - ctype locale. (P1003.2 4.35.5.2) */ - setlocale (LC_CTYPE, "POSIX"); - -+#ifndef NO_SYSCONF - /* Look whether the system really allows locale definitions. POSIX - defines error code 3 for this situation so I think it must be - a fatal error (see P1003.2 4.35.8). */ - if (sysconf (_SC_2_LOCALEDEF) < 0) - record_error (3, 0, _("\ - FATAL: system does not define `_POSIX2_LOCALEDEF'")); -+#endif - - /* Process charmap file. */ - charmap = charmap_read (charmap_file, verbose, 1, be_quiet, 1); -@@ -399,6 +404,9 @@ parse_opt (int key, char *arg, struct argp_state *state) - /* Do not hard link to other locales. */ - hard_links = false; - break; -+ case OPT_UINT32_ALIGN: -+ uint32_align_mask = strtol (arg, NULL, 0) - 1; -+ break; - case 'c': - force_output = 1; - break; -diff --git a/locale/programs/locfile.c b/locale/programs/locfile.c -index eb2f4634da..d387147323 100644 ---- a/locale/programs/locfile.c -+++ b/locale/programs/locfile.c -@@ -544,6 +544,9 @@ compare_files (const char *filename1, const char *filename2, size_t size, - machine running localedef. */ - bool swap_endianness_p; - -+/* The target's value of __align__(uint32_t) - 1. */ -+unsigned int uint32_align_mask = 3; -+ - /* When called outside a start_locale_structure/end_locale_structure - or start_locale_prelude/end_locale_prelude block, record that the - next byte in FILE's obstack will be the first byte of a new element. -@@ -621,7 +624,7 @@ add_locale_string (struct locale_file *file, const char *string) - void - add_locale_wstring (struct locale_file *file, const uint32_t *string) - { -- add_locale_uint32_array (file, string, wcslen ((const wchar_t *) string) + 1); -+ add_locale_uint32_array (file, string, wcslen_uint32 (string) + 1); - } - - /* Record that FILE's next element is the 32-bit integer VALUE. */ -diff --git a/locale/programs/locfile.h b/locale/programs/locfile.h -index 7ea10038b4..0888c09762 100644 ---- a/locale/programs/locfile.h -+++ b/locale/programs/locfile.h -@@ -71,6 +71,8 @@ extern void write_all_categories (struct localedef_t *definitions, - - extern bool swap_endianness_p; - -+extern unsigned int uint32_align_mask; -+ - /* Change the output to be big-endian if BIG_ENDIAN is true and - little-endian otherwise. */ - static inline void -@@ -89,7 +91,8 @@ maybe_swap_uint32 (uint32_t value) - } - - /* Likewise, but munge an array of N uint32_ts starting at ARRAY. */ --static inline void -+static void -+__attribute__ ((unused)) - maybe_swap_uint32_array (uint32_t *array, size_t n) - { - if (swap_endianness_p) -@@ -99,7 +102,8 @@ maybe_swap_uint32_array (uint32_t *array, size_t n) - - /* Like maybe_swap_uint32_array, but the array of N elements is at - the end of OBSTACK's current object. */ --static inline void -+static void -+__attribute__ ((unused)) - maybe_swap_uint32_obstack (struct obstack *obstack, size_t n) - { - maybe_swap_uint32_array ((uint32_t *) obstack_next_free (obstack) - n, n); -@@ -276,4 +280,55 @@ extern void identification_output (struct localedef_t *locale, - const struct charmap_t *charmap, - const char *output_path); - -+static size_t wcslen_uint32 (const uint32_t *str) __attribute__ ((unused)); -+static uint32_t * wmemcpy_uint32 (uint32_t *s1, const uint32_t *s2, size_t n) __attribute__ ((unused)); -+static uint32_t * wcschr_uint32 (const uint32_t *s, uint32_t ch) __attribute__ ((unused)); -+static int wcscmp_uint32 (const uint32_t *s1, const uint32_t *s2) __attribute__ ((unused)); -+static int wmemcmp_uint32 (const uint32_t *s1, const uint32_t *s2, size_t n) __attribute__ ((unused)); -+ -+static size_t -+wcslen_uint32 (const uint32_t *str) -+{ -+ size_t len = 0; -+ while (str[len] != 0) -+ len++; -+ return len; -+} -+ -+static int -+wmemcmp_uint32 (const uint32_t *s1, const uint32_t *s2, size_t n) -+{ -+ while (n-- != 0) -+ { -+ int diff = *s1++ - *s2++; -+ if (diff != 0) -+ return diff; -+ } -+ return 0; -+} -+ -+static int -+wcscmp_uint32 (const uint32_t *s1, const uint32_t *s2) -+{ -+ while (*s1 != 0 && *s1 == *s2) -+ s1++, s2++; -+ return *s1 - *s2; -+} -+ -+static uint32_t * -+wmemcpy_uint32 (uint32_t *s1, const uint32_t *s2, size_t n) -+{ -+ return memcpy (s1, s2, n * sizeof (uint32_t)); -+} -+ -+static uint32_t * -+wcschr_uint32 (const uint32_t *s, uint32_t ch) -+{ -+ do -+ if (*s == ch) -+ return (uint32_t *) s; -+ while (*s++ != 0); -+ return 0; -+} -+ - #endif /* locfile.h */ -diff --git a/locale/setlocale.c b/locale/setlocale.c -index 030f1727bd..096d8ed895 100644 ---- a/locale/setlocale.c -+++ b/locale/setlocale.c -@@ -63,35 +63,6 @@ static char *const _nl_current_used[] = - - #endif - -- --/* Define an array of category names (also the environment variable names). */ --const struct catnamestr_t _nl_category_names attribute_hidden = -- { --#define DEFINE_CATEGORY(category, category_name, items, a) \ -- category_name, --#include "categories.def" --#undef DEFINE_CATEGORY -- }; -- --const uint8_t _nl_category_name_idxs[__LC_LAST] attribute_hidden = -- { --#define DEFINE_CATEGORY(category, category_name, items, a) \ -- [category] = offsetof (struct catnamestr_t, CATNAMEMF (__LINE__)), --#include "categories.def" --#undef DEFINE_CATEGORY -- }; -- --/* An array of their lengths, for convenience. */ --const uint8_t _nl_category_name_sizes[] attribute_hidden = -- { --#define DEFINE_CATEGORY(category, category_name, items, a) \ -- [category] = sizeof (category_name) - 1, --#include "categories.def" --#undef DEFINE_CATEGORY -- [LC_ALL] = sizeof ("LC_ALL") - 1 -- }; -- -- - #ifdef NL_CURRENT_INDIRECT - # define WEAK_POSTLOAD(postload) weak_extern (postload) - #else diff --git a/poky/meta/recipes-core/glibc/glibc/0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch b/poky/meta/recipes-core/glibc/glibc/0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch new file mode 100644 index 000000000..7cbf6811a --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch @@ -0,0 +1,84 @@ +From c5fd8a3d336b8288f631fb6b6dd85f9d1076f25b Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 3 Aug 2018 09:42:06 -0700 +Subject: [PATCH 23/29] localedef --add-to-archive uses a hard-coded locale + path + +it doesn't exist in normal use, and there's no way to pass an +alternative filename. + +Add a fallback of $LOCALEARCHIVE from the environment, and allow +creation of new locale archives that are not the system archive. + +Upstream-Status: Inappropriate (OE-specific) + +Signed-off-by: Ross Burton +Signed-off-by: Khem Raj +--- + locale/programs/locarchive.c | 35 +++++++++++++++++++++++++---------- + 1 file changed, 25 insertions(+), 10 deletions(-) + +diff --git a/locale/programs/locarchive.c b/locale/programs/locarchive.c +index dccaf04e3b..ae0b7fe155 100644 +--- a/locale/programs/locarchive.c ++++ b/locale/programs/locarchive.c +@@ -340,12 +340,24 @@ enlarge_archive (struct locarhandle *ah, const struct locarhead *head) + struct namehashent *oldnamehashtab; + struct locarhandle new_ah; + size_t prefix_len = output_prefix ? strlen (output_prefix) : 0; +- char archivefname[prefix_len + sizeof (ARCHIVE_NAME)]; +- char fname[prefix_len + sizeof (ARCHIVE_NAME) + sizeof (".XXXXXX") - 1]; ++ char *archivefname; ++ char *fname; ++ char *envarchive = getenv("LOCALEARCHIVE"); + +- if (output_prefix) +- memcpy (archivefname, output_prefix, prefix_len); +- strcpy (archivefname + prefix_len, ARCHIVE_NAME); ++ if (envarchive != NULL) ++ { ++ archivefname = xmalloc(strlen(envarchive) + 1); ++ fname = xmalloc(strlen(envarchive) + sizeof (".XXXXXX")); ++ strcpy (archivefname, envarchive); ++ } ++ else ++ { ++ archivefname = xmalloc(prefix_len + sizeof (ARCHIVE_NAME)); ++ fname = xmalloc(prefix_len + sizeof (ARCHIVE_NAME) + sizeof (".XXXXXX") - 1); ++ if (output_prefix) ++ memcpy (archivefname, output_prefix, prefix_len); ++ strcpy (archivefname + prefix_len, ARCHIVE_NAME); ++ } + strcpy (stpcpy (fname, archivefname), ".XXXXXX"); + + /* Not all of the old file has to be mapped. Change this now this +@@ -569,10 +581,13 @@ open_archive (struct locarhandle *ah, bool readonly) + /* If ah has a non-NULL fname open that otherwise open the default. */ + if (archivefname == NULL) + { +- archivefname = default_fname; +- if (output_prefix) +- memcpy (default_fname, output_prefix, prefix_len); +- strcpy (default_fname + prefix_len, ARCHIVE_NAME); ++ archivefname = getenv("LOCALEARCHIVE"); ++ if (archivefname == NULL) { ++ archivefname = default_fname; ++ if (output_prefix) ++ memcpy (default_fname, output_prefix, prefix_len); ++ strcpy (default_fname + prefix_len, ARCHIVE_NAME); ++ } + } + + while (1) +@@ -585,7 +600,7 @@ open_archive (struct locarhandle *ah, bool readonly) + the default locale archive we ignore the failure and + list an empty archive, otherwise we print an error + and exit. */ +- if (errno == ENOENT && archivefname == default_fname) ++ if (errno == ENOENT) + { + if (readonly) + { +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0024-Define-DUMMY_LOCALE_T-if-not-defined.patch b/poky/meta/recipes-core/glibc/glibc/0024-Define-DUMMY_LOCALE_T-if-not-defined.patch deleted file mode 100644 index 15e460eb0..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0024-Define-DUMMY_LOCALE_T-if-not-defined.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 067f71a381ce6626ef1179be3dd90c4ed2aa52fb Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 20 Apr 2016 21:11:00 -0700 -Subject: [PATCH] Define DUMMY_LOCALE_T if not defined - -This is a hack to fix building the locale bits on an older -CentOs 5.X machine - -Upstream-Status: Inappropriate [other] - -Signed-off-by: Khem Raj ---- - locale/programs/config.h | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/locale/programs/config.h b/locale/programs/config.h -index d76d9f7e8b..92b56d7df3 100644 ---- a/locale/programs/config.h -+++ b/locale/programs/config.h -@@ -19,6 +19,9 @@ - #ifndef _LD_CONFIG_H - #define _LD_CONFIG_H 1 - -+#ifndef DUMMY_LOCALE_T -+#define DUMMY_LOCALE_T -+#endif - /* Use the internal textdomain used for libc messages. */ - #define PACKAGE _libc_intl_domainname - #ifndef VERSION diff --git a/poky/meta/recipes-core/glibc/glibc/0024-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch b/poky/meta/recipes-core/glibc/glibc/0024-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch new file mode 100644 index 000000000..02e73594a --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0024-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch @@ -0,0 +1,56 @@ +From dffa52364f8c54c455b2459ebe83f05cb6ffc9fc Mon Sep 17 00:00:00 2001 +From: Mark Hatle +Date: Thu, 18 Aug 2016 14:07:58 -0500 +Subject: [PATCH 24/29] elf/dl-deps.c: Make _dl_build_local_scope breadth first + +According to the ELF specification: + +When resolving symbolic references, the dynamic linker examines the symbol +tables with a breadth-first search. + +This function was using a depth first search. By doing so the conflict +resolution reported to the prelinker (when LD_TRACE_PRELINKING=1 is set) +was incorrect. This caused problems when their were various circular +dependencies between libraries. The problem usually manifested itself by +the wrong IFUNC being executed. + +[BZ# 20488] + +Upstream-Status: Submitted [libc-alpha] + +Signed-off-by: Mark Hatle +--- + elf/dl-deps.c | 14 ++++++++++---- + 1 file changed, 10 insertions(+), 4 deletions(-) + +diff --git a/elf/dl-deps.c b/elf/dl-deps.c +index b5a43232a7..8aa8f37fa3 100644 +--- a/elf/dl-deps.c ++++ b/elf/dl-deps.c +@@ -73,13 +73,19 @@ _dl_build_local_scope (struct link_map **list, struct link_map *map) + { + struct link_map **p = list; + struct link_map **q; ++ struct link_map **r; + + *p++ = map; + map->l_reserved = 1; +- if (map->l_initfini) +- for (q = map->l_initfini + 1; *q; ++q) +- if (! (*q)->l_reserved) +- p += _dl_build_local_scope (p, *q); ++ ++ for (r = list; r < p; ++r) ++ if ((*r)->l_initfini) ++ for (q = (*r)->l_initfini + 1; *q; ++q) ++ if (! (*q)->l_reserved) ++ { ++ *p++ = *q; ++ (*q)->l_reserved = 1; ++ } + return p - list; + } + +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0025-intl-Emit-no-lines-in-bison-generated-files.patch b/poky/meta/recipes-core/glibc/glibc/0025-intl-Emit-no-lines-in-bison-generated-files.patch new file mode 100644 index 000000000..700fb2887 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0025-intl-Emit-no-lines-in-bison-generated-files.patch @@ -0,0 +1,34 @@ +From bc3380877bd2a1f9368a913fa6a2ca1ee88fd95f Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 3 Aug 2018 09:44:00 -0700 +Subject: [PATCH 25/29] intl: Emit no lines in bison generated files + +Improve reproducibility: +Do not put any #line preprocessor commands in bison generated files. +These lines contain absolute paths containing file locations on +the host build machine. + +Upstream-Status: Pending + +Signed-off-by: Juro Bystricky +Signed-off-by: Khem Raj +--- + intl/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/intl/Makefile b/intl/Makefile +index ce3072420f..58457840d9 100644 +--- a/intl/Makefile ++++ b/intl/Makefile +@@ -155,7 +155,7 @@ $(objpfx)tst-gettext6.out: $(objpfx)tst-gettext.out + + CPPFLAGS += -D'LOCALEDIR="$(localedir)"' \ + -D'LOCALE_ALIAS_PATH="$(localedir)"' +-BISONFLAGS = --yacc --name-prefix=__gettext --output ++BISONFLAGS = --yacc --no-lines --name-prefix=__gettext --output + + $(inst_localedir)/locale.alias: locale.alias $(+force) + $(do-install) +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch b/poky/meta/recipes-core/glibc/glibc/0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch deleted file mode 100644 index 543f65d6e..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 8f4b0bac85d14b184e08848b02de3f30775f05b1 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 3 Aug 2018 09:42:06 -0700 -Subject: [PATCH] localedef --add-to-archive uses a hard-coded locale path - -it doesn't exist in normal use, and there's no way to pass an -alternative filename. - -Add a fallback of $LOCALEARCHIVE from the environment, and allow -creation of new locale archives that are not the system archive. - -Upstream-Status: Inappropriate (OE-specific) - -Signed-off-by: Ross Burton -Signed-off-by: Khem Raj ---- - locale/programs/locarchive.c | 35 +++++++++++++++++++++++++---------- - 1 file changed, 25 insertions(+), 10 deletions(-) - -diff --git a/locale/programs/locarchive.c b/locale/programs/locarchive.c -index dccaf04e3b..ae0b7fe155 100644 ---- a/locale/programs/locarchive.c -+++ b/locale/programs/locarchive.c -@@ -340,12 +340,24 @@ enlarge_archive (struct locarhandle *ah, const struct locarhead *head) - struct namehashent *oldnamehashtab; - struct locarhandle new_ah; - size_t prefix_len = output_prefix ? strlen (output_prefix) : 0; -- char archivefname[prefix_len + sizeof (ARCHIVE_NAME)]; -- char fname[prefix_len + sizeof (ARCHIVE_NAME) + sizeof (".XXXXXX") - 1]; -+ char *archivefname; -+ char *fname; -+ char *envarchive = getenv("LOCALEARCHIVE"); - -- if (output_prefix) -- memcpy (archivefname, output_prefix, prefix_len); -- strcpy (archivefname + prefix_len, ARCHIVE_NAME); -+ if (envarchive != NULL) -+ { -+ archivefname = xmalloc(strlen(envarchive) + 1); -+ fname = xmalloc(strlen(envarchive) + sizeof (".XXXXXX")); -+ strcpy (archivefname, envarchive); -+ } -+ else -+ { -+ archivefname = xmalloc(prefix_len + sizeof (ARCHIVE_NAME)); -+ fname = xmalloc(prefix_len + sizeof (ARCHIVE_NAME) + sizeof (".XXXXXX") - 1); -+ if (output_prefix) -+ memcpy (archivefname, output_prefix, prefix_len); -+ strcpy (archivefname + prefix_len, ARCHIVE_NAME); -+ } - strcpy (stpcpy (fname, archivefname), ".XXXXXX"); - - /* Not all of the old file has to be mapped. Change this now this -@@ -569,10 +581,13 @@ open_archive (struct locarhandle *ah, bool readonly) - /* If ah has a non-NULL fname open that otherwise open the default. */ - if (archivefname == NULL) - { -- archivefname = default_fname; -- if (output_prefix) -- memcpy (default_fname, output_prefix, prefix_len); -- strcpy (default_fname + prefix_len, ARCHIVE_NAME); -+ archivefname = getenv("LOCALEARCHIVE"); -+ if (archivefname == NULL) { -+ archivefname = default_fname; -+ if (output_prefix) -+ memcpy (default_fname, output_prefix, prefix_len); -+ strcpy (default_fname + prefix_len, ARCHIVE_NAME); -+ } - } - - while (1) -@@ -585,7 +600,7 @@ open_archive (struct locarhandle *ah, bool readonly) - the default locale archive we ignore the failure and - list an empty archive, otherwise we print an error - and exit. */ -- if (errno == ENOENT && archivefname == default_fname) -+ if (errno == ENOENT) - { - if (readonly) - { diff --git a/poky/meta/recipes-core/glibc/glibc/0026-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch b/poky/meta/recipes-core/glibc/glibc/0026-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch deleted file mode 100644 index 5f6ee40f2..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0026-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 1dad746ce93928a57b2fe618c74722f710751826 Mon Sep 17 00:00:00 2001 -From: Mark Hatle -Date: Thu, 18 Aug 2016 14:07:58 -0500 -Subject: [PATCH] elf/dl-deps.c: Make _dl_build_local_scope breadth first - -According to the ELF specification: - -When resolving symbolic references, the dynamic linker examines the symbol -tables with a breadth-first search. - -This function was using a depth first search. By doing so the conflict -resolution reported to the prelinker (when LD_TRACE_PRELINKING=1 is set) -was incorrect. This caused problems when their were various circular -dependencies between libraries. The problem usually manifested itself by -the wrong IFUNC being executed. - -[BZ# 20488] - -Upstream-Status: Submitted [libc-alpha] - -Signed-off-by: Mark Hatle ---- - elf/dl-deps.c | 14 ++++++++++---- - 1 file changed, 10 insertions(+), 4 deletions(-) - -diff --git a/elf/dl-deps.c b/elf/dl-deps.c -index 5103a8a111..54cd80aaff 100644 ---- a/elf/dl-deps.c -+++ b/elf/dl-deps.c -@@ -73,13 +73,19 @@ _dl_build_local_scope (struct link_map **list, struct link_map *map) - { - struct link_map **p = list; - struct link_map **q; -+ struct link_map **r; - - *p++ = map; - map->l_reserved = 1; -- if (map->l_initfini) -- for (q = map->l_initfini + 1; *q; ++q) -- if (! (*q)->l_reserved) -- p += _dl_build_local_scope (p, *q); -+ -+ for (r = list; r < p; ++r) -+ if ((*r)->l_initfini) -+ for (q = (*r)->l_initfini + 1; *q; ++q) -+ if (! (*q)->l_reserved) -+ { -+ *p++ = *q; -+ (*q)->l_reserved = 1; -+ } - return p - list; - } - diff --git a/poky/meta/recipes-core/glibc/glibc/0026-inject-file-assembly-directives.patch b/poky/meta/recipes-core/glibc/glibc/0026-inject-file-assembly-directives.patch new file mode 100644 index 000000000..89eef82e1 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0026-inject-file-assembly-directives.patch @@ -0,0 +1,240 @@ +From cf043d56638883157dbfaa8c6a8e2f63a6f66280 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Thu, 29 Nov 2018 17:29:35 -0800 +Subject: [PATCH 26/29] inject file assembly directives + +Currently, non-IA builds are not reproducibile since build paths are +being injected into the debug symbols. These are coming from the use of +.S assembler files during the glibc build. No STT_FILE section is added +during the assembly but when linking, ld decides to add one to aid +debugging and ensure references between the different object files its +linking remain clear. + +We can avoid this by injecting a file header into the assembler files +ahead of time, choosing a filename which does not contain build system +paths. + +This is a bit of a workaround/hack but does significantly reduce the +build system references in target binaries for the non-IA architectures +which use .S files. + +RP +2018/10/3 + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + csu/abi-note.c | 2 ++ + sysdeps/aarch64/crti.S | 2 ++ + sysdeps/aarch64/crtn.S | 1 + + sysdeps/aarch64/dl-tlsdesc.S | 2 ++ + sysdeps/aarch64/dl-trampoline.S | 2 ++ + sysdeps/aarch64/start.S | 2 ++ + sysdeps/arm/crti.S | 2 ++ + sysdeps/arm/crtn.S | 2 ++ + sysdeps/arm/dl-tlsdesc.S | 2 ++ + sysdeps/arm/dl-trampoline.S | 2 ++ + sysdeps/arm/start.S | 2 ++ + sysdeps/mips/start.S | 2 ++ + sysdeps/powerpc/powerpc32/dl-start.S | 2 ++ + sysdeps/powerpc/powerpc32/start.S | 2 ++ + sysdeps/powerpc/powerpc64/start.S | 2 ++ + sysdeps/unix/sysv/linux/aarch64/__read_tp.S | 2 ++ + sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S | 2 ++ + sysdeps/unix/sysv/linux/arm/libc-do-syscall.S | 2 ++ + sysdeps/unix/sysv/linux/powerpc/dl-brk.S | 2 ++ + 19 files changed, 37 insertions(+) + +--- a/sysdeps/aarch64/crti.S ++++ b/sysdeps/aarch64/crti.S +@@ -50,6 +50,8 @@ + # define PREINIT_FUNCTION_WEAK 1 + #endif + ++ .file "crti.S" ++ + #if PREINIT_FUNCTION_WEAK + weak_extern (PREINIT_FUNCTION) + #else +--- a/sysdeps/aarch64/crtn.S ++++ b/sysdeps/aarch64/crtn.S +@@ -38,6 +38,7 @@ + corresponding to the prologues in crti.S. */ + + #include ++ .file "crtn.S" + + .section .init,"ax",%progbits + ldp x29, x30, [sp], 16 +--- a/sysdeps/aarch64/dl-tlsdesc.S ++++ b/sysdeps/aarch64/dl-tlsdesc.S +@@ -22,6 +22,8 @@ + #include + #include "tlsdesc.h" + ++ .file "dl-tlsdesc.S" ++ + #define NSAVEDQREGPAIRS 16 + #define SAVE_Q_REGISTERS \ + stp q0, q1, [sp, #-32*NSAVEDQREGPAIRS]!; \ +--- a/sysdeps/aarch64/dl-trampoline.S ++++ b/sysdeps/aarch64/dl-trampoline.S +@@ -21,6 +21,8 @@ + + #include "dl-link.h" + ++ .file "dl-trampoline.S" ++ + #define ip0 x16 + #define ip0l PTR_REG (16) + #define ip1 x17 +--- a/sysdeps/aarch64/start.S ++++ b/sysdeps/aarch64/start.S +@@ -18,6 +18,8 @@ + + #include + ++ .file "start.S" ++ + /* This is the canonical entry point, usually the first thing in the text + segment. + +--- a/sysdeps/arm/crti.S ++++ b/sysdeps/arm/crti.S +@@ -57,6 +57,8 @@ + .hidden PREINIT_FUNCTION + #endif + ++ .file "crti.S" ++ + #if PREINIT_FUNCTION_WEAK + .p2align 2 + .type call_weak_fn, %function +--- a/sysdeps/arm/crtn.S ++++ b/sysdeps/arm/crtn.S +@@ -37,6 +37,8 @@ + #define NO_THUMB + #include + ++ .file "crtn.S" ++ + /* crtn.S puts function epilogues in the .init and .fini sections + corresponding to the prologues in crti.S. */ + +--- a/sysdeps/arm/dl-tlsdesc.S ++++ b/sysdeps/arm/dl-tlsdesc.S +@@ -21,6 +21,8 @@ + #include + #include "tlsdesc.h" + ++ .file "dl-tlsdesc.S" ++ + .text + @ emit debug information with cfi + @ use arm-specific pseudos for unwinding itself +--- a/sysdeps/arm/dl-trampoline.S ++++ b/sysdeps/arm/dl-trampoline.S +@@ -21,6 +21,8 @@ + #include + #include + ++ .file "dl-trampoline.S" ++ + .text + .globl _dl_runtime_resolve + .type _dl_runtime_resolve, #function +--- a/sysdeps/arm/start.S ++++ b/sysdeps/arm/start.S +@@ -57,6 +57,8 @@ + NULL + */ + ++ .file "start.S" ++ + /* Tag_ABI_align8_preserved: This code preserves 8-byte + alignment in any callee. */ + .eabi_attribute 25, 1 +--- a/sysdeps/mips/start.S ++++ b/sysdeps/mips/start.S +@@ -38,6 +38,8 @@ + #include + #include + ++ .file "start.S" ++ + #ifndef ENTRY_POINT + #error ENTRY_POINT needs to be defined for start.S on MIPS/ELF. + #endif +--- a/sysdeps/powerpc/powerpc32/dl-start.S ++++ b/sysdeps/powerpc/powerpc32/dl-start.S +@@ -18,6 +18,8 @@ + + #include + ++ .file "dl-start.S" ++ + /* Initial entry point code for the dynamic linker. + The C function `_dl_start' is the real entry point; + its return value is the user program's entry point. */ +--- a/sysdeps/powerpc/powerpc32/start.S ++++ b/sysdeps/powerpc/powerpc32/start.S +@@ -35,6 +35,8 @@ + + #include + ++ .file "start.S" ++ + /* We do not want .eh_frame info for crt1.o since crt1.o is linked + before crtbegin.o, the file defining __EH_FRAME_BEGIN__. */ + #undef cfi_startproc +--- a/sysdeps/powerpc/powerpc64/start.S ++++ b/sysdeps/powerpc/powerpc64/start.S +@@ -35,6 +35,8 @@ + + #include + ++ .file "start.S" ++ + /* We do not want .eh_frame info for crt1.o since crt1.o is linked + before crtbegin.o, the file defining __EH_FRAME_BEGIN__. */ + #undef cfi_startproc +--- a/sysdeps/unix/sysv/linux/aarch64/__read_tp.S ++++ b/sysdeps/unix/sysv/linux/aarch64/__read_tp.S +@@ -18,6 +18,8 @@ + + #include + ++ .file "__read_tp.S" ++ + .hidden __read_tp + ENTRY (__read_tp) + mrs x0, tpidr_el0 +--- a/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S ++++ b/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S +@@ -39,6 +39,8 @@ + a normal function call) in a high page of memory; tail call to the + helper. */ + ++ .file "aeabi_read_tp.S" ++ + .hidden __aeabi_read_tp + ENTRY (__aeabi_read_tp) + #ifdef ARCH_HAS_HARD_TP +--- a/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S ++++ b/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S +@@ -27,6 +27,8 @@ + ARM unwind tables for register to register moves, the actual opcodes + are not defined. */ + ++ .file "libc-do-syscall.S" ++ + #if defined(__thumb__) + .thumb + .syntax unified +--- a/sysdeps/unix/sysv/linux/powerpc/dl-brk.S ++++ b/sysdeps/unix/sysv/linux/powerpc/dl-brk.S +@@ -1 +1,3 @@ ++ .file "dl-brk.S" ++ + #include diff --git a/poky/meta/recipes-core/glibc/glibc/0027-intl-Emit-no-lines-in-bison-generated-files.patch b/poky/meta/recipes-core/glibc/glibc/0027-intl-Emit-no-lines-in-bison-generated-files.patch deleted file mode 100644 index 2c317d58a..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0027-intl-Emit-no-lines-in-bison-generated-files.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 3c8d9eae83ab4f1677afc9b379f97114e8503363 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 3 Aug 2018 09:44:00 -0700 -Subject: [PATCH] intl: Emit no lines in bison generated files - -Improve reproducibility: -Do not put any #line preprocessor commands in bison generated files. -These lines contain absolute paths containing file locations on -the host build machine. - -Upstream-Status: Pending - -Signed-off-by: Juro Bystricky -Signed-off-by: Khem Raj ---- - intl/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/intl/Makefile b/intl/Makefile -index ce3072420f..58457840d9 100644 ---- a/intl/Makefile -+++ b/intl/Makefile -@@ -155,7 +155,7 @@ $(objpfx)tst-gettext6.out: $(objpfx)tst-gettext.out - - CPPFLAGS += -D'LOCALEDIR="$(localedir)"' \ - -D'LOCALE_ALIAS_PATH="$(localedir)"' --BISONFLAGS = --yacc --name-prefix=__gettext --output -+BISONFLAGS = --yacc --no-lines --name-prefix=__gettext --output - - $(inst_localedir)/locale.alias: locale.alias $(+force) - $(do-install) diff --git a/poky/meta/recipes-core/glibc/glibc/0027-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch b/poky/meta/recipes-core/glibc/glibc/0027-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch new file mode 100644 index 000000000..d2c88d235 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0027-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch @@ -0,0 +1,56 @@ +From 10aef8f0671d814aaf910ababc0225cf6f0a46e8 Mon Sep 17 00:00:00 2001 +From: Martin Jansa +Date: Mon, 17 Dec 2018 21:36:18 +0000 +Subject: [PATCH 27/29] locale: prevent maybe-uninitialized errors with -Os [BZ + #19444] + +Fixes following error when building for aarch64 with -Os: +| In file included from strcoll_l.c:43: +| strcoll_l.c: In function '__strcoll_l': +| ../locale/weight.h:31:26: error: 'seq2.back_us' may be used uninitialized in this function [-Werror=maybe-uninitialized] +| int_fast32_t i = table[*(*cpp)++]; +| ^~~~~~~~~ +| strcoll_l.c:304:18: note: 'seq2.back_us' was declared here +| coll_seq seq1, seq2; +| ^~~~ +| In file included from strcoll_l.c:43: +| ../locale/weight.h:31:26: error: 'seq1.back_us' may be used uninitialized in this function [-Werror=maybe-uninitialized] +| int_fast32_t i = table[*(*cpp)++]; +| ^~~~~~~~~ +| strcoll_l.c:304:12: note: 'seq1.back_us' was declared here +| coll_seq seq1, seq2; +| ^~~~ + + Partial fix for [BZ #19444] + * locale/weight.h: Fix build with -Os. + +Upstream-Status: Submitted [https://patchwork.ozlabs.org/patch/1014766] + +Signed-off-by: Martin Jansa +Signed-off-by: Khem Raj +--- + locale/weight.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +diff --git a/locale/weight.h b/locale/weight.h +index e071253f85..2889c395f1 100644 +--- a/locale/weight.h ++++ b/locale/weight.h +@@ -28,7 +28,14 @@ findidx (const int32_t *table, + const unsigned char *extra, + const unsigned char **cpp, size_t len) + { ++ /* With GCC 8 when compiling with -Os the compiler warns that ++ seq1.back_us and seq2.back_us might be used uninitialized. ++ This uninitialized use is impossible for the same reason ++ as described in comments in locale/weightwc.h. */ ++ DIAG_PUSH_NEEDS_COMMENT; ++ DIAG_IGNORE_Os_NEEDS_COMMENT (8, "-Wmaybe-uninitialized"); + int_fast32_t i = table[*(*cpp)++]; ++ DIAG_POP_NEEDS_COMMENT; + const unsigned char *cp; + const unsigned char *usrc; + +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0028-inject-file-assembly-directives.patch b/poky/meta/recipes-core/glibc/glibc/0028-inject-file-assembly-directives.patch deleted file mode 100644 index e1a030d10..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0028-inject-file-assembly-directives.patch +++ /dev/null @@ -1,301 +0,0 @@ -From 420454f7098b5445730caa855c37b8143bfccc1b Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Thu, 29 Nov 2018 17:29:35 -0800 -Subject: [PATCH] inject file assembly directives - -Currently, non-IA builds are not reproducibile since build paths are -being injected into the debug symbols. These are coming from the use of -.S assembler files during the glibc build. No STT_FILE section is added -during the assembly but when linking, ld decides to add one to aid -debugging and ensure references between the different object files its -linking remain clear. - -We can avoid this by injecting a file header into the assembler files -ahead of time, choosing a filename which does not contain build system -paths. - -This is a bit of a workaround/hack but does significantly reduce the -build system references in target binaries for the non-IA architectures -which use .S files. - -RP -2018/10/3 - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - csu/abi-note.S | 2 ++ - sysdeps/aarch64/crti.S | 2 ++ - sysdeps/aarch64/crtn.S | 2 ++ - sysdeps/aarch64/dl-tlsdesc.S | 2 ++ - sysdeps/aarch64/dl-trampoline.S | 2 ++ - sysdeps/aarch64/start.S | 2 ++ - sysdeps/arm/abi-note.S | 2 ++ - sysdeps/arm/crti.S | 2 ++ - sysdeps/arm/crtn.S | 2 ++ - sysdeps/arm/dl-tlsdesc.S | 2 ++ - sysdeps/arm/dl-trampoline.S | 2 ++ - sysdeps/arm/start.S | 2 ++ - sysdeps/mips/start.S | 2 ++ - sysdeps/powerpc/powerpc32/dl-start.S | 2 ++ - sysdeps/powerpc/powerpc32/start.S | 2 ++ - sysdeps/powerpc/powerpc64/start.S | 2 ++ - sysdeps/unix/sysv/linux/aarch64/__read_tp.S | 2 ++ - sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S | 2 ++ - sysdeps/unix/sysv/linux/arm/libc-do-syscall.S | 2 ++ - sysdeps/unix/sysv/linux/powerpc/dl-brk.S | 2 ++ - 20 files changed, 40 insertions(+) - -diff --git a/csu/abi-note.S b/csu/abi-note.S -index 2b4b5f8824..964843016c 100644 ---- a/csu/abi-note.S -+++ b/csu/abi-note.S -@@ -56,6 +56,8 @@ offset length contents - #include - #include /* OS-specific ABI tag value */ - -+ .file "abi-note.S" -+ - /* The linker (GNU ld 2.8 and later) recognizes an allocated section whose - name begins with `.note' and creates a PT_NOTE program header entry - pointing at it. */ -diff --git a/sysdeps/aarch64/crti.S b/sysdeps/aarch64/crti.S -index 1728eac37a..76a8ab590b 100644 ---- a/sysdeps/aarch64/crti.S -+++ b/sysdeps/aarch64/crti.S -@@ -50,6 +50,8 @@ - # define PREINIT_FUNCTION_WEAK 1 - #endif - -+ .file "crti.S" -+ - #if PREINIT_FUNCTION_WEAK - weak_extern (PREINIT_FUNCTION) - #else -diff --git a/sysdeps/aarch64/crtn.S b/sysdeps/aarch64/crtn.S -index c3e97cc449..fc2e5c2df8 100644 ---- a/sysdeps/aarch64/crtn.S -+++ b/sysdeps/aarch64/crtn.S -@@ -37,6 +37,8 @@ - /* crtn.S puts function epilogues in the .init and .fini sections - corresponding to the prologues in crti.S. */ - -+ .file "crtn.S" -+ - .section .init,"ax",%progbits - ldp x29, x30, [sp], 16 - RET -diff --git a/sysdeps/aarch64/dl-tlsdesc.S b/sysdeps/aarch64/dl-tlsdesc.S -index 557ad1d505..194a8531da 100644 ---- a/sysdeps/aarch64/dl-tlsdesc.S -+++ b/sysdeps/aarch64/dl-tlsdesc.S -@@ -22,6 +22,8 @@ - #include - #include "tlsdesc.h" - -+ .file "dl-tlsdesc.S" -+ - #define NSAVEDQREGPAIRS 16 - #define SAVE_Q_REGISTERS \ - stp q0, q1, [sp, #-32*NSAVEDQREGPAIRS]!; \ -diff --git a/sysdeps/aarch64/dl-trampoline.S b/sysdeps/aarch64/dl-trampoline.S -index 94e965c096..ba18742f3e 100644 ---- a/sysdeps/aarch64/dl-trampoline.S -+++ b/sysdeps/aarch64/dl-trampoline.S -@@ -21,6 +21,8 @@ - - #include "dl-link.h" - -+ .file "dl-trampoline.S" -+ - #define ip0 x16 - #define ip0l PTR_REG (16) - #define ip1 x17 -diff --git a/sysdeps/aarch64/start.S b/sysdeps/aarch64/start.S -index d96cf57e2d..07a523978a 100644 ---- a/sysdeps/aarch64/start.S -+++ b/sysdeps/aarch64/start.S -@@ -18,6 +18,8 @@ - - #include - -+ .file "start.S" -+ - /* This is the canonical entry point, usually the first thing in the text - segment. - -diff --git a/sysdeps/arm/abi-note.S b/sysdeps/arm/abi-note.S -index 07bd4c4619..7213b16f27 100644 ---- a/sysdeps/arm/abi-note.S -+++ b/sysdeps/arm/abi-note.S -@@ -1,3 +1,5 @@ -+ .file "abi-note.S" -+ - /* Tag_ABI_align8_preserved: This code preserves 8-byte - alignment in any callee. */ - .eabi_attribute 25, 1 -diff --git a/sysdeps/arm/crti.S b/sysdeps/arm/crti.S -index 8169783267..f56e0c85a6 100644 ---- a/sysdeps/arm/crti.S -+++ b/sysdeps/arm/crti.S -@@ -57,6 +57,8 @@ - .hidden PREINIT_FUNCTION - #endif - -+ .file "crti.S" -+ - #if PREINIT_FUNCTION_WEAK - .p2align 2 - .type call_weak_fn, %function -diff --git a/sysdeps/arm/crtn.S b/sysdeps/arm/crtn.S -index d60f9f05de..1e10ec439f 100644 ---- a/sysdeps/arm/crtn.S -+++ b/sysdeps/arm/crtn.S -@@ -37,6 +37,8 @@ - #define NO_THUMB - #include - -+ .file "crtn.S" -+ - /* crtn.S puts function epilogues in the .init and .fini sections - corresponding to the prologues in crti.S. */ - -diff --git a/sysdeps/arm/dl-tlsdesc.S b/sysdeps/arm/dl-tlsdesc.S -index 8415e47607..29c2cb8128 100644 ---- a/sysdeps/arm/dl-tlsdesc.S -+++ b/sysdeps/arm/dl-tlsdesc.S -@@ -21,6 +21,8 @@ - #include - #include "tlsdesc.h" - -+ .file "dl-tlsdesc.S" -+ - .text - @ emit debug information with cfi - @ use arm-specific pseudos for unwinding itself -diff --git a/sysdeps/arm/dl-trampoline.S b/sysdeps/arm/dl-trampoline.S -index a20baf555e..2dd3bef04e 100644 ---- a/sysdeps/arm/dl-trampoline.S -+++ b/sysdeps/arm/dl-trampoline.S -@@ -21,6 +21,8 @@ - #include - #include - -+ .file "dl-trampoline.S" -+ - .text - .globl _dl_runtime_resolve - .type _dl_runtime_resolve, #function -diff --git a/sysdeps/arm/start.S b/sysdeps/arm/start.S -index 2ff56179d2..c118046ec6 100644 ---- a/sysdeps/arm/start.S -+++ b/sysdeps/arm/start.S -@@ -57,6 +57,8 @@ - NULL - */ - -+ .file "start.S" -+ - /* Tag_ABI_align8_preserved: This code preserves 8-byte - alignment in any callee. */ - .eabi_attribute 25, 1 -diff --git a/sysdeps/mips/start.S b/sysdeps/mips/start.S -index fabc8080df..83c6b91d7d 100644 ---- a/sysdeps/mips/start.S -+++ b/sysdeps/mips/start.S -@@ -38,6 +38,8 @@ - #include - #include - -+ .file "start.S" -+ - #ifndef ENTRY_POINT - #error ENTRY_POINT needs to be defined for start.S on MIPS/ELF. - #endif -diff --git a/sysdeps/powerpc/powerpc32/dl-start.S b/sysdeps/powerpc/powerpc32/dl-start.S -index c15242d3bc..6ca27715a4 100644 ---- a/sysdeps/powerpc/powerpc32/dl-start.S -+++ b/sysdeps/powerpc/powerpc32/dl-start.S -@@ -18,6 +18,8 @@ - - #include - -+ .file "dl-start.S" -+ - /* Initial entry point code for the dynamic linker. - The C function `_dl_start' is the real entry point; - its return value is the user program's entry point. */ -diff --git a/sysdeps/powerpc/powerpc32/start.S b/sysdeps/powerpc/powerpc32/start.S -index b7b9a133a2..8df714f81a 100644 ---- a/sysdeps/powerpc/powerpc32/start.S -+++ b/sysdeps/powerpc/powerpc32/start.S -@@ -35,6 +35,8 @@ - - #include - -+ .file "start.S" -+ - /* We do not want .eh_frame info for crt1.o since crt1.o is linked - before crtbegin.o, the file defining __EH_FRAME_BEGIN__. */ - #undef cfi_startproc -diff --git a/sysdeps/powerpc/powerpc64/start.S b/sysdeps/powerpc/powerpc64/start.S -index 94bf771e83..cffb99500a 100644 ---- a/sysdeps/powerpc/powerpc64/start.S -+++ b/sysdeps/powerpc/powerpc64/start.S -@@ -35,6 +35,8 @@ - - #include - -+ .file "start.S" -+ - /* We do not want .eh_frame info for crt1.o since crt1.o is linked - before crtbegin.o, the file defining __EH_FRAME_BEGIN__. */ - #undef cfi_startproc -diff --git a/sysdeps/unix/sysv/linux/aarch64/__read_tp.S b/sysdeps/unix/sysv/linux/aarch64/__read_tp.S -index 12e1131fe7..767e0d043b 100644 ---- a/sysdeps/unix/sysv/linux/aarch64/__read_tp.S -+++ b/sysdeps/unix/sysv/linux/aarch64/__read_tp.S -@@ -18,6 +18,8 @@ - - #include - -+ .file "__read_tp.S" -+ - .hidden __read_tp - ENTRY (__read_tp) - mrs x0, tpidr_el0 -diff --git a/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S b/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S -index 3b0d611039..4a7e476c37 100644 ---- a/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S -+++ b/sysdeps/unix/sysv/linux/arm/aeabi_read_tp.S -@@ -39,6 +39,8 @@ - a normal function call) in a high page of memory; tail call to the - helper. */ - -+ .file "aeabi_read_tp.S" -+ - .hidden __aeabi_read_tp - ENTRY (__aeabi_read_tp) - #ifdef ARCH_HAS_HARD_TP -diff --git a/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S b/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S -index fb5e0c7d97..bd07bdb38b 100644 ---- a/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S -+++ b/sysdeps/unix/sysv/linux/arm/libc-do-syscall.S -@@ -27,6 +27,8 @@ - ARM unwind tables for register to register moves, the actual opcodes - are not defined. */ - -+ .file "libc-do-syscall.S" -+ - #if defined(__thumb__) - .thumb - .syntax unified -diff --git a/sysdeps/unix/sysv/linux/powerpc/dl-brk.S b/sysdeps/unix/sysv/linux/powerpc/dl-brk.S -index eeb96544e3..da182b28f8 100644 ---- a/sysdeps/unix/sysv/linux/powerpc/dl-brk.S -+++ b/sysdeps/unix/sysv/linux/powerpc/dl-brk.S -@@ -1 +1,3 @@ -+ .file "dl-brk.S" -+ - #include diff --git a/poky/meta/recipes-core/glibc/glibc/0028-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch b/poky/meta/recipes-core/glibc/glibc/0028-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch new file mode 100644 index 000000000..25c046f5c --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0028-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch @@ -0,0 +1,33 @@ +From 70731329feb7ba20364aa37aed83d920de97f028 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 18 Mar 2015 00:11:22 +0000 +Subject: [PATCH 28/29] readlib: Add OECORE_KNOWN_INTERPRETER_NAMES to known + names + +This bolts in a hook for OE to pass its own version of interpreter +names into glibc especially for multilib case, where it differs from any +other distros + +Upstream-Status: Inappropriate [OE specific] + +Signed-off-by: Lianhao Lu +Signed-off-by: Khem Raj +--- + elf/readlib.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/elf/readlib.c b/elf/readlib.c +index 994a4426a1..baabf099b1 100644 +--- a/elf/readlib.c ++++ b/elf/readlib.c +@@ -51,6 +51,7 @@ static struct known_names interpreters[] = + #ifdef SYSDEP_KNOWN_INTERPRETER_NAMES + SYSDEP_KNOWN_INTERPRETER_NAMES + #endif ++ OECORE_KNOWN_INTERPRETER_NAMES + }; + + static struct known_names known_libs[] = +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch b/poky/meta/recipes-core/glibc/glibc/0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch deleted file mode 100644 index c8c359f2b..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch +++ /dev/null @@ -1,53 +0,0 @@ -From 66963ad83bdd3b075006ddca9dfe357aed181d6a Mon Sep 17 00:00:00 2001 -From: Martin Jansa -Date: Mon, 17 Dec 2018 21:36:18 +0000 -Subject: [PATCH] locale: prevent maybe-uninitialized errors with -Os [BZ - #19444] - -Fixes following error when building for aarch64 with -Os: -| In file included from strcoll_l.c:43: -| strcoll_l.c: In function '__strcoll_l': -| ../locale/weight.h:31:26: error: 'seq2.back_us' may be used uninitialized in this function [-Werror=maybe-uninitialized] -| int_fast32_t i = table[*(*cpp)++]; -| ^~~~~~~~~ -| strcoll_l.c:304:18: note: 'seq2.back_us' was declared here -| coll_seq seq1, seq2; -| ^~~~ -| In file included from strcoll_l.c:43: -| ../locale/weight.h:31:26: error: 'seq1.back_us' may be used uninitialized in this function [-Werror=maybe-uninitialized] -| int_fast32_t i = table[*(*cpp)++]; -| ^~~~~~~~~ -| strcoll_l.c:304:12: note: 'seq1.back_us' was declared here -| coll_seq seq1, seq2; -| ^~~~ - - Partial fix for [BZ #19444] - * locale/weight.h: Fix build with -Os. - -Upstream-Status: Submitted [https://patchwork.ozlabs.org/patch/1014766] - -Signed-off-by: Martin Jansa -Signed-off-by: Khem Raj ---- - locale/weight.h | 7 +++++++ - 1 file changed, 7 insertions(+) - -diff --git a/locale/weight.h b/locale/weight.h -index e071253f85..2889c395f1 100644 ---- a/locale/weight.h -+++ b/locale/weight.h -@@ -28,7 +28,14 @@ findidx (const int32_t *table, - const unsigned char *extra, - const unsigned char **cpp, size_t len) - { -+ /* With GCC 8 when compiling with -Os the compiler warns that -+ seq1.back_us and seq2.back_us might be used uninitialized. -+ This uninitialized use is impossible for the same reason -+ as described in comments in locale/weightwc.h. */ -+ DIAG_PUSH_NEEDS_COMMENT; -+ DIAG_IGNORE_Os_NEEDS_COMMENT (8, "-Wmaybe-uninitialized"); - int_fast32_t i = table[*(*cpp)++]; -+ DIAG_POP_NEEDS_COMMENT; - const unsigned char *cp; - const unsigned char *usrc; - diff --git a/poky/meta/recipes-core/glibc/glibc/0029-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch b/poky/meta/recipes-core/glibc/glibc/0029-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch new file mode 100644 index 000000000..0ebf8a618 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0029-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch @@ -0,0 +1,70 @@ +From bf1c4b2f01c5f86bb770fd1711a69ea727e0f56e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 15 May 2020 17:05:45 -0700 +Subject: [PATCH 29/29] wordsize.h: Unify the header between arm and aarch64 + +This helps OE multilibs to not sythesize this header which causes all +kind of recursions and other issues since wordsize is fundamental header +and ends up including itself in many case e.g. clang tidy, bpf etc. + +Upstream-Status: Inappropriate [ OE-Specific ] + +Signed-off-by: Khem Raj +--- + sysdeps/aarch64/bits/wordsize.h | 8 ++++++-- + sysdeps/{aarch64 => arm}/bits/wordsize.h | 8 ++++++-- + 2 files changed, 12 insertions(+), 4 deletions(-) + copy sysdeps/{aarch64 => arm}/bits/wordsize.h (85%) + +diff --git a/sysdeps/aarch64/bits/wordsize.h b/sysdeps/aarch64/bits/wordsize.h +index ee01841773..34fcdef1f1 100644 +--- a/sysdeps/aarch64/bits/wordsize.h ++++ b/sysdeps/aarch64/bits/wordsize.h +@@ -17,12 +17,16 @@ + License along with the GNU C Library; if not, see + . */ + +-#ifdef __LP64__ ++#if defined (__aarch64__) && defined (__LP64__) + # define __WORDSIZE 64 +-#else ++#elif defined (__aarch64__) + # define __WORDSIZE 32 + # define __WORDSIZE32_SIZE_ULONG 1 + # define __WORDSIZE32_PTRDIFF_LONG 1 ++#else ++# define __WORDSIZE 32 ++# define __WORDSIZE32_SIZE_ULONG 0 ++# define __WORDSIZE32_PTRDIFF_LONG 0 + #endif + + #define __WORDSIZE_TIME64_COMPAT32 0 +diff --git a/sysdeps/aarch64/bits/wordsize.h b/sysdeps/arm/bits/wordsize.h +similarity index 85% +copy from sysdeps/aarch64/bits/wordsize.h +copy to sysdeps/arm/bits/wordsize.h +index ee01841773..34fcdef1f1 100644 +--- a/sysdeps/aarch64/bits/wordsize.h ++++ b/sysdeps/arm/bits/wordsize.h +@@ -17,12 +17,16 @@ + License along with the GNU C Library; if not, see + . */ + +-#ifdef __LP64__ ++#if defined (__aarch64__) && defined (__LP64__) + # define __WORDSIZE 64 +-#else ++#elif defined (__aarch64__) + # define __WORDSIZE 32 + # define __WORDSIZE32_SIZE_ULONG 1 + # define __WORDSIZE32_PTRDIFF_LONG 1 ++#else ++# define __WORDSIZE 32 ++# define __WORDSIZE32_SIZE_ULONG 0 ++# define __WORDSIZE32_PTRDIFF_LONG 0 + #endif + + #define __WORDSIZE_TIME64_COMPAT32 0 +-- +2.27.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0030-powerpc-Do-not-ask-compiler-for-finding-arch.patch b/poky/meta/recipes-core/glibc/glibc/0030-powerpc-Do-not-ask-compiler-for-finding-arch.patch new file mode 100644 index 000000000..a76455236 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc/0030-powerpc-Do-not-ask-compiler-for-finding-arch.patch @@ -0,0 +1,51 @@ +From 8b1d521290218df8ceeaf2e6ee44a54b19d8d328 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 7 Aug 2020 14:31:16 -0700 +Subject: [PATCH 30/30] powerpc: Do not ask compiler for finding arch + +This does not work well in cross compiling environments like OE +and moreover it uses its own -mcpu/-march options via cflags + +Upstream-Status: Inappropriate [ OE-Specific] + +Signed-off-by: Khem Raj +--- + sysdeps/powerpc/preconfigure | 5 +---- + sysdeps/powerpc/preconfigure.ac | 5 +---- + 2 files changed, 2 insertions(+), 8 deletions(-) + +diff --git a/sysdeps/powerpc/preconfigure b/sysdeps/powerpc/preconfigure +index dfe8e20399..bbff040f0f 100644 +--- a/sysdeps/powerpc/preconfigure ++++ b/sysdeps/powerpc/preconfigure +@@ -29,10 +29,7 @@ esac + # directive which shows up, and try using it. + case "${machine}:${submachine}" in + *powerpc*:) +- archcpu=`echo "int foo () { return 0; }" \ +- | $CC $CFLAGS $CPPFLAGS -S -frecord-gcc-switches -xc -o - - \ +- | grep -E "mcpu=|.machine" -m 1 \ +- | sed -e "s/.*machine //" -e "s/.*mcpu=\(.*\)\"/\1/"` ++ archcpu='' + # Note if you add patterns here you must ensure that an appropriate + # directory exists in sysdeps/powerpc. Likewise, if we find a + # cpu, don't let the generic configure append extra compiler options. +diff --git a/sysdeps/powerpc/preconfigure.ac b/sysdeps/powerpc/preconfigure.ac +index 6c63bd8257..3e925f1d48 100644 +--- a/sysdeps/powerpc/preconfigure.ac ++++ b/sysdeps/powerpc/preconfigure.ac +@@ -29,10 +29,7 @@ esac + # directive which shows up, and try using it. + case "${machine}:${submachine}" in + *powerpc*:) +- archcpu=`echo "int foo () { return 0; }" \ +- | $CC $CFLAGS $CPPFLAGS -S -frecord-gcc-switches -xc -o - - \ +- | grep -E "mcpu=|[.]machine" -m 1 \ +- | sed -e "s/.*machine //" -e "s/.*mcpu=\(.*\)\"/\1/"` ++ archcpu='' + # Note if you add patterns here you must ensure that an appropriate + # directory exists in sysdeps/powerpc. Likewise, if we find a + # cpu, don't let the generic configure append extra compiler options. +-- +2.28.0 + diff --git a/poky/meta/recipes-core/glibc/glibc/0030-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch b/poky/meta/recipes-core/glibc/glibc/0030-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch deleted file mode 100644 index cbef2f283..000000000 --- a/poky/meta/recipes-core/glibc/glibc/0030-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch +++ /dev/null @@ -1,67 +0,0 @@ -From 9cb0a756b017f5961b70ac781d3eaec6c82513cb Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 15 May 2020 17:05:45 -0700 -Subject: [PATCH] wordsize.h: Unify the header between arm and aarch64 - -This helps OE multilibs to not sythesize this header which causes all -kind of recursions and other issues since wordsize is fundamental header -and ends up including itself in many case e.g. clang tidy, bpf etc. - -Upstream-Status: Inappropriate [ OE-Specific ] - -Signed-off-by: Khem Raj ---- - sysdeps/aarch64/bits/wordsize.h | 8 ++++++-- - sysdeps/{aarch64 => arm}/bits/wordsize.h | 8 ++++++-- - 2 files changed, 12 insertions(+), 4 deletions(-) - copy sysdeps/{aarch64 => arm}/bits/wordsize.h (85%) - -diff --git a/sysdeps/aarch64/bits/wordsize.h b/sysdeps/aarch64/bits/wordsize.h -index ee01841773..34fcdef1f1 100644 ---- a/sysdeps/aarch64/bits/wordsize.h -+++ b/sysdeps/aarch64/bits/wordsize.h -@@ -17,12 +17,16 @@ - License along with the GNU C Library; if not, see - . */ - --#ifdef __LP64__ -+#if defined (__aarch64__) && defined (__LP64__) - # define __WORDSIZE 64 --#else -+#elif defined (__aarch64__) - # define __WORDSIZE 32 - # define __WORDSIZE32_SIZE_ULONG 1 - # define __WORDSIZE32_PTRDIFF_LONG 1 -+#else -+# define __WORDSIZE 32 -+# define __WORDSIZE32_SIZE_ULONG 0 -+# define __WORDSIZE32_PTRDIFF_LONG 0 - #endif - - #define __WORDSIZE_TIME64_COMPAT32 0 -diff --git a/sysdeps/aarch64/bits/wordsize.h b/sysdeps/arm/bits/wordsize.h -similarity index 85% -copy from sysdeps/aarch64/bits/wordsize.h -copy to sysdeps/arm/bits/wordsize.h -index ee01841773..34fcdef1f1 100644 ---- a/sysdeps/aarch64/bits/wordsize.h -+++ b/sysdeps/arm/bits/wordsize.h -@@ -17,12 +17,16 @@ - License along with the GNU C Library; if not, see - . */ - --#ifdef __LP64__ -+#if defined (__aarch64__) && defined (__LP64__) - # define __WORDSIZE 64 --#else -+#elif defined (__aarch64__) - # define __WORDSIZE 32 - # define __WORDSIZE32_SIZE_ULONG 1 - # define __WORDSIZE32_PTRDIFF_LONG 1 -+#else -+# define __WORDSIZE 32 -+# define __WORDSIZE32_SIZE_ULONG 0 -+# define __WORDSIZE32_PTRDIFF_LONG 0 - #endif - - #define __WORDSIZE_TIME64_COMPAT32 0 diff --git a/poky/meta/recipes-core/glibc/glibc_2.31.bb b/poky/meta/recipes-core/glibc/glibc_2.31.bb deleted file mode 100644 index 9b2cf1bde..000000000 --- a/poky/meta/recipes-core/glibc/glibc_2.31.bb +++ /dev/null @@ -1,113 +0,0 @@ -require glibc.inc -require glibc-version.inc - -CVE_CHECK_WHITELIST += "CVE-2020-10029" - -DEPENDS += "gperf-native bison-native make-native" - -NATIVESDKFIXES ?= "" -NATIVESDKFIXES_class-nativesdk = "\ - file://0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch \ - file://0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch \ - file://0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch \ - file://0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch \ - file://0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch \ -" - -SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \ - file://etc/ld.so.conf \ - file://generate-supported.mk \ - file://makedbs.sh \ - \ - ${NATIVESDKFIXES} \ - file://0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch \ - file://0009-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch \ - file://0010-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch \ - file://0011-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch \ - file://0012-Quote-from-bug-1443-which-explains-what-the-patch-do.patch \ - file://0013-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch \ - file://0014-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch \ - file://0015-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch \ - file://0016-Add-unused-attribute.patch \ - file://0017-yes-within-the-path-sets-wrong-config-variables.patch \ - file://0018-timezone-re-written-tzselect-as-posix-sh.patch \ - file://0019-Remove-bash-dependency-for-nscd-init-script.patch \ - file://0020-eglibc-Cross-building-and-testing-instructions.patch \ - file://0021-eglibc-Help-bootstrap-cross-toolchain.patch \ - file://0022-eglibc-Resolve-__fpscr_values-on-SH4.patch \ - file://0023-eglibc-Forward-port-cross-locale-generation-support.patch \ - file://0024-Define-DUMMY_LOCALE_T-if-not-defined.patch \ - file://0025-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch \ - file://0026-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch \ - file://0027-intl-Emit-no-lines-in-bison-generated-files.patch \ - file://0028-inject-file-assembly-directives.patch \ - file://0029-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch \ - file://0030-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch \ - " -S = "${WORKDIR}/git" -B = "${WORKDIR}/build-${TARGET_SYS}" - -PACKAGES_DYNAMIC = "" - -# the -isystem in bitbake.conf screws up glibc do_stage -BUILD_CPPFLAGS = "-I${STAGING_INCDIR_NATIVE}" -TARGET_CPPFLAGS = "-I${STAGING_DIR_TARGET}${includedir}" - -GLIBC_BROKEN_LOCALES = "" - -GLIBCPIE ??= "" - -EXTRA_OECONF = "--enable-kernel=${OLDEST_KERNEL} \ - --disable-profile \ - --disable-debug --without-gd \ - --enable-clocale=gnu \ - --with-headers=${STAGING_INCDIR} \ - --without-selinux \ - --enable-tunables \ - --enable-bind-now \ - --enable-stack-protector=strong \ - --enable-stackguard-randomization \ - --disable-crypt \ - --with-default-link \ - --enable-nscd \ - ${@bb.utils.contains_any('SELECTED_OPTIMIZATION', '-O0 -Og', '--disable-werror', '', d)} \ - ${GLIBCPIE} \ - ${GLIBC_EXTRA_OECONF}" - -EXTRA_OECONF += "${@get_libc_fpu_setting(bb, d)}" - -do_patch_append() { - bb.build.exec_func('do_fix_readlib_c', d) -} - -do_fix_readlib_c () { - sed -i -e 's#OECORE_KNOWN_INTERPRETER_NAMES#${EGLIBC_KNOWN_INTERPRETER_NAMES}#' ${S}/elf/readlib.c -} - -do_configure () { -# override this function to avoid the autoconf/automake/aclocal/autoheader -# calls for now -# don't pass CPPFLAGS into configure, since it upsets the kernel-headers -# version check and doesn't really help with anything - (cd ${S} && gnu-configize) || die "failure in running gnu-configize" - find ${S} -name "configure" | xargs touch - CPPFLAGS="" oe_runconf -} - -LDFLAGS += "-fuse-ld=bfd" -do_compile () { - base_do_compile - echo "Adjust ldd script" - if [ -n "${RTLDLIST}" ] - then - prevrtld=`cat ${B}/elf/ldd | grep "^RTLDLIST=" | sed 's#^RTLDLIST="\?\([^"]*\)"\?$#\1#'` - # remove duplicate entries - newrtld=`echo $(printf '%s\n' ${prevrtld} ${RTLDLIST} | LC_ALL=C sort -u)` - echo "ldd \"${prevrtld} ${RTLDLIST}\" -> \"${newrtld}\"" - sed -i ${B}/elf/ldd -e "s#^RTLDLIST=.*\$#RTLDLIST=\"${newrtld}\"#" - fi -} - -require glibc-package.inc - -BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-core/glibc/glibc_2.32.bb b/poky/meta/recipes-core/glibc/glibc_2.32.bb new file mode 100644 index 000000000..7049e6162 --- /dev/null +++ b/poky/meta/recipes-core/glibc/glibc_2.32.bb @@ -0,0 +1,113 @@ +require glibc.inc +require glibc-version.inc + +CVE_CHECK_WHITELIST += "CVE-2020-10029" + +DEPENDS += "gperf-native bison-native make-native" + +NATIVESDKFIXES ?= "" +NATIVESDKFIXES_class-nativesdk = "\ + file://0003-nativesdk-glibc-Look-for-host-system-ld.so.cache-as-.patch \ + file://0004-nativesdk-glibc-Fix-buffer-overrun-with-a-relocated-.patch \ + file://0005-nativesdk-glibc-Raise-the-size-of-arrays-containing-.patch \ + file://0006-nativesdk-glibc-Allow-64-bit-atomics-for-x86.patch \ + file://0007-nativesdk-glibc-Make-relocatable-install-for-locales.patch \ +" + +SRC_URI = "${GLIBC_GIT_URI};branch=${SRCBRANCH};name=glibc \ + file://etc/ld.so.conf \ + file://generate-supported.mk \ + file://makedbs.sh \ + \ + ${NATIVESDKFIXES} \ + file://0008-fsl-e500-e5500-e6500-603e-fsqrt-implementation.patch \ + file://0009-ppc-sqrt-Fix-undefined-reference-to-__sqrt_finite.patch \ + file://0010-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch \ + file://0011-Quote-from-bug-1443-which-explains-what-the-patch-do.patch \ + file://0012-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch \ + file://0013-__ieee754_sqrt-f-are-now-inline-functions-and-call-o.patch \ + file://0014-sysdeps-gnu-configure.ac-handle-correctly-libc_cv_ro.patch \ + file://0015-yes-within-the-path-sets-wrong-config-variables.patch \ + file://0016-timezone-re-written-tzselect-as-posix-sh.patch \ + file://0017-Remove-bash-dependency-for-nscd-init-script.patch \ + file://0018-eglibc-Cross-building-and-testing-instructions.patch \ + file://0019-eglibc-Help-bootstrap-cross-toolchain.patch \ + file://0020-eglibc-Resolve-__fpscr_values-on-SH4.patch \ + file://0021-eglibc-Forward-port-cross-locale-generation-support.patch \ + file://0022-Define-DUMMY_LOCALE_T-if-not-defined.patch \ + file://0023-localedef-add-to-archive-uses-a-hard-coded-locale-pa.patch \ + file://0024-elf-dl-deps.c-Make-_dl_build_local_scope-breadth-fir.patch \ + file://0025-intl-Emit-no-lines-in-bison-generated-files.patch \ + file://0026-inject-file-assembly-directives.patch \ + file://0027-locale-prevent-maybe-uninitialized-errors-with-Os-BZ.patch \ + file://0028-readlib-Add-OECORE_KNOWN_INTERPRETER_NAMES-to-known-.patch \ + file://0029-wordsize.h-Unify-the-header-between-arm-and-aarch64.patch \ + file://0030-powerpc-Do-not-ask-compiler-for-finding-arch.patch \ + " +S = "${WORKDIR}/git" +B = "${WORKDIR}/build-${TARGET_SYS}" + +PACKAGES_DYNAMIC = "" + +# the -isystem in bitbake.conf screws up glibc do_stage +BUILD_CPPFLAGS = "-I${STAGING_INCDIR_NATIVE}" +TARGET_CPPFLAGS = "-I${STAGING_DIR_TARGET}${includedir}" + +GLIBC_BROKEN_LOCALES = "" + +GLIBCPIE ??= "" + +EXTRA_OECONF = "--enable-kernel=${OLDEST_KERNEL} \ + --disable-profile \ + --disable-debug --without-gd \ + --enable-clocale=gnu \ + --with-headers=${STAGING_INCDIR} \ + --without-selinux \ + --enable-tunables \ + --enable-bind-now \ + --enable-stack-protector=strong \ + --enable-stackguard-randomization \ + --disable-crypt \ + --with-default-link \ + --enable-nscd \ + ${@bb.utils.contains_any('SELECTED_OPTIMIZATION', '-O0 -Og', '--disable-werror', '', d)} \ + ${GLIBCPIE} \ + ${GLIBC_EXTRA_OECONF}" + +EXTRA_OECONF += "${@get_libc_fpu_setting(bb, d)}" + +do_patch_append() { + bb.build.exec_func('do_fix_readlib_c', d) +} + +do_fix_readlib_c () { + sed -i -e 's#OECORE_KNOWN_INTERPRETER_NAMES#${EGLIBC_KNOWN_INTERPRETER_NAMES}#' ${S}/elf/readlib.c +} + +do_configure () { +# override this function to avoid the autoconf/automake/aclocal/autoheader +# calls for now +# don't pass CPPFLAGS into configure, since it upsets the kernel-headers +# version check and doesn't really help with anything + (cd ${S} && gnu-configize) || die "failure in running gnu-configize" + find ${S} -name "configure" | xargs touch + CPPFLAGS="" oe_runconf +} + +LDFLAGS += "-fuse-ld=bfd" +do_compile () { + base_do_compile + echo "Adjust ldd script" + if [ -n "${RTLDLIST}" ] + then + prevrtld=`cat ${B}/elf/ldd | grep "^RTLDLIST=" | sed 's#^RTLDLIST="\?\([^"]*\)"\?$#\1#'` + # remove duplicate entries + newrtld=`echo $(printf '%s\n' ${prevrtld} ${RTLDLIST} | LC_ALL=C sort -u)` + echo "ldd \"${prevrtld} ${RTLDLIST}\" -> \"${newrtld}\"" + sed -i ${B}/elf/ldd -e "s#^RTLDLIST=.*\$#RTLDLIST=\"${newrtld}\"#" + fi +} + +require glibc-package.inc + +BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-core/initscripts/initscripts-1.0/populate-volatile.sh b/poky/meta/recipes-core/initscripts/initscripts-1.0/populate-volatile.sh index 1c525b71b..f21f48dd3 100755 --- a/poky/meta/recipes-core/initscripts/initscripts-1.0/populate-volatile.sh +++ b/poky/meta/recipes-core/initscripts/initscripts-1.0/populate-volatile.sh @@ -9,10 +9,10 @@ ### END INIT INFO # Get ROOT_DIR -DIRNAME=`dirname $0` -ROOT_DIR=`echo $DIRNAME | sed -ne 's:/etc/.*::p'` +DIRNAME="$(dirname "$0")" +ROOT_DIR="$(echo "$DIRNAME" | sed -ne 's:/etc/.*::p')" -[ -e ${ROOT_DIR}/etc/default/rcS ] && . ${ROOT_DIR}/etc/default/rcS +[ -e "${ROOT_DIR}/etc/default/rcS" ] && . "${ROOT_DIR}/etc/default/rcS" # When running populate-volatile.sh at rootfs time, disable cache. [ -n "$ROOT_DIR" ] && VOLATILE_ENABLE_CACHE=no # If rootfs is read-only, disable cache. @@ -26,15 +26,15 @@ COREDEF="00_core" create_file() { EXEC="" - [ -z "$2" ] && { + if [ -z "$2" ]; then EXEC=" touch \"$1\"; " - } || { + else EXEC=" cp \"$2\" \"$1\"; " - } + fi EXEC=" ${EXEC} chown ${TUSER}:${TGROUP} $1 || echo \"Failed to set owner -${TUSER}- for -$1-.\" >/dev/tty0 2>&1; @@ -42,19 +42,19 @@ create_file() { test "$VOLATILE_ENABLE_CACHE" = yes && echo "$EXEC" >> /etc/volatile.cache.build - [ -e "$1" ] && { + if [ -e "$1" ]; then [ "${VERBOSE}" != "no" ] && echo "Target already exists. Skipping." - } || { + else if [ -z "$ROOT_DIR" ]; then - eval $EXEC + eval "$EXEC" else # Creating some files at rootfs time may fail and should fail, # but these failures should not be logged to make sure the do_rootfs # process doesn't fail. This does no harm, as this script will # run on target to set up the correct files and directories. - eval $EXEC > /dev/null 2>&1 + eval "$EXEC" > /dev/null 2>&1 fi - } + fi } mk_dir() { @@ -64,17 +64,17 @@ mk_dir() { chmod ${TMODE} $1 || echo \"Failed to set mode -${TMODE}- for -$1-.\" >/dev/tty0 2>&1 " test "$VOLATILE_ENABLE_CACHE" = yes && echo "$EXEC" >> /etc/volatile.cache.build - [ -e "$1" ] && { + if [ -e "$1" ]; then [ "${VERBOSE}" != "no" ] && echo "Target already exists. Skipping." - } || { + else if [ -z "$ROOT_DIR" ]; then - eval $EXEC + eval "$EXEC" else # For the same reason with create_file(), failures should # not be logged. - eval $EXEC > /dev/null 2>&1 + eval "$EXEC" > /dev/null 2>&1 fi - } + fi } link_file() { @@ -96,11 +96,11 @@ link_file() { test "$VOLATILE_ENABLE_CACHE" = yes && echo " $EXEC" >> /etc/volatile.cache.build if [ -z "$ROOT_DIR" ]; then - eval $EXEC + eval "$EXEC" else # For the same reason with create_file(), failures should # not be logged. - eval $EXEC > /dev/null 2>&1 + eval "$EXEC" > /dev/null 2>&1 fi } @@ -117,11 +117,11 @@ check_requirements() { TMP_DEFINED="${TMPROOT}/tmpdefined.$$" TMP_COMBINED="${TMPROOT}/tmpcombined.$$" - sed 's@\(^:\)*:.*@\1@' ${ROOT_DIR}/etc/passwd | sort | uniq > "${TMP_DEFINED}" - cat ${CFGFILE} | grep -v "^#" | cut -s -d " " -f 2 > "${TMP_INTERMED}" + sed 's@\(^:\)*:.*@\1@' "${ROOT_DIR}/etc/passwd" | sort | uniq > "${TMP_DEFINED}" + grep -v "^#" "${CFGFILE}" | cut -s -d " " -f 2 > "${TMP_INTERMED}" cat "${TMP_DEFINED}" "${TMP_INTERMED}" | sort | uniq > "${TMP_COMBINED}" - NR_DEFINED_USERS="`cat "${TMP_DEFINED}" | wc -l`" - NR_COMBINED_USERS="`cat "${TMP_COMBINED}" | wc -l`" + NR_DEFINED_USERS="$(wc -l < "${TMP_DEFINED}")" + NR_COMBINED_USERS="$(wc -l < "${TMP_COMBINED}")" [ "${NR_DEFINED_USERS}" -ne "${NR_COMBINED_USERS}" ] && { echo "Undefined users:" @@ -131,12 +131,12 @@ check_requirements() { } - sed 's@\(^:\)*:.*@\1@' ${ROOT_DIR}/etc/group | sort | uniq > "${TMP_DEFINED}" - cat ${CFGFILE} | grep -v "^#" | cut -s -d " " -f 3 > "${TMP_INTERMED}" + sed 's@\(^:\)*:.*@\1@' "${ROOT_DIR}/etc/group" | sort | uniq > "${TMP_DEFINED}" + grep -v "^#" "${CFGFILE}" | cut -s -d " " -f 3 > "${TMP_INTERMED}" cat "${TMP_DEFINED}" "${TMP_INTERMED}" | sort | uniq > "${TMP_COMBINED}" - NR_DEFINED_GROUPS="`cat "${TMP_DEFINED}" | wc -l`" - NR_COMBINED_GROUPS="`cat "${TMP_COMBINED}" | wc -l`" + NR_DEFINED_GROUPS="$(wc -l < "${TMP_DEFINED}")" + NR_COMBINED_GROUPS="$(wc -l < "${TMP_COMBINED}")" [ "${NR_DEFINED_GROUPS}" -ne "${NR_COMBINED_GROUPS}" ] && { echo "Undefined groups:" @@ -157,13 +157,13 @@ apply_cfgfile() { [ "${VERBOSE}" != "no" ] && echo "Applying ${CFGFILE}" - [ "${SKIP_REQUIREMENTS}" == "yes" ] || check_requirements "${CFGFILE}" || { + [ "${SKIP_REQUIREMENTS}" = "yes" ] || check_requirements "${CFGFILE}" || { echo "Skipping ${CFGFILE}" return 1 } - cat ${CFGFILE} | sed 's/#.*//' | \ - while read TTYPE TUSER TGROUP TMODE TNAME TLTARGET; do + sed 's/#.*//' "${CFGFILE}" | \ + while read -r TTYPE TUSER TGROUP TMODE TNAME TLTARGET; do test -z "${TLTARGET}" && continue TNAME=${ROOT_DIR}${TNAME} [ "${VERBOSE}" != "no" ] && echo "Checking for -${TNAME}-." @@ -187,14 +187,14 @@ apply_cfgfile() { [ -L "${TNAME}" ] && { [ "${VERBOSE}" != "no" ] && echo "Found link." - NEWNAME=`ls -l "${TNAME}" | sed -e 's/^.*-> \(.*\)$/\1/'` - echo ${NEWNAME} | grep -v "^/" >/dev/null && { - TNAME="`echo ${TNAME} | sed -e 's@\(.*\)/.*@\1@'`/${NEWNAME}" + NEWNAME=$(ls -l "${TNAME}" | sed -e 's/^.*-> \(.*\)$/\1/') + if echo "${NEWNAME}" | grep -v "^/" >/dev/null; then + TNAME="$(echo "${TNAME}" | sed -e 's@\(.*\)/.*@\1@')/${NEWNAME}" [ "${VERBOSE}" != "no" ] && echo "Converted relative linktarget to absolute path -${TNAME}-." - } || { + else TNAME="${NEWNAME}" [ "${VERBOSE}" != "no" ] && echo "Using absolute link target -${TNAME}-." - } + fi } case "${TTYPE}" in @@ -217,7 +217,7 @@ apply_cfgfile() { clearcache=0 exec 9&- -if test -e ${ROOT_DIR}/etc/volatile.cache -a "$VOLATILE_ENABLE_CACHE" = "yes" -a "x$1" != "xupdate" -a "x$clearcache" = "x0" +if test -e "${ROOT_DIR}/etc/volatile.cache" -a "$VOLATILE_ENABLE_CACHE" = "yes" -a "x$1" != "xupdate" -a "x$clearcache" = "x0" then - sh ${ROOT_DIR}/etc/volatile.cache + sh "${ROOT_DIR}/etc/volatile.cache" else - rm -f ${ROOT_DIR}/etc/volatile.cache ${ROOT_DIR}/etc/volatile.cache.build + rm -f "${ROOT_DIR}/etc/volatile.cache" "${ROOT_DIR}/etc/volatile.cache.build" # Apply the core file with out checking requirements. ${TMPROOT} is # needed by check_requirements but is setup by this file, so it must be @@ -246,7 +246,7 @@ else TMP_FILE="${TMPROOT}/tmp_volatile.$$" rm -f "$TMP_FILE" - CFGFILES="`ls -1 "${CFGDIR}" | grep -v "^${COREDEF}\$" | sort`" + CFGFILES="$(ls -1 "${CFGDIR}" | grep -v "^${COREDEF}\$" | sort)" for file in ${CFGFILES}; do cat "${CFGDIR}/${file}" >> "$TMP_FILE" done @@ -264,7 +264,7 @@ else fi rm "$TMP_FILE" - [ -e ${ROOT_DIR}/etc/volatile.cache.build ] && sync && mv ${ROOT_DIR}/etc/volatile.cache.build ${ROOT_DIR}/etc/volatile.cache + [ -e "${ROOT_DIR}/etc/volatile.cache.build" ] && sync && mv "${ROOT_DIR}/etc/volatile.cache.build" "${ROOT_DIR}/etc/volatile.cache" fi if [ -z "${ROOT_DIR}" ] && [ -f /etc/ld.so.cache ] && [ ! -f /var/run/ld.so.cache ] diff --git a/poky/meta/recipes-core/kbd/kbd/0001-Use-DATADIR-and-append-i386-to-fix-libkbdfile-test08.patch b/poky/meta/recipes-core/kbd/kbd/0001-Use-DATADIR-and-append-i386-to-fix-libkbdfile-test08.patch deleted file mode 100644 index dc5236063..000000000 --- a/poky/meta/recipes-core/kbd/kbd/0001-Use-DATADIR-and-append-i386-to-fix-libkbdfile-test08.patch +++ /dev/null @@ -1,45 +0,0 @@ -From 4c12f76f4177cfd560cf708a16774ebfadbd41a5 Mon Sep 17 00:00:00 2001 -From: "Mingde (Matthew) Zeng" -Date: Wed, 22 Jan 2020 11:02:17 -0500 -Subject: [PATCH] Use DATADIR and append i386 to fix libkbdfile-test08 ptest - failure - -Replace ABS_DATADIR with DATADIR and append i386 to dirpath. - -Upstream-Status: Inappropriate [OE specific] - -This OE specific patch applies to kbd v2.2.0 for now, the upstream -made drastic changes since v2.2.0, in fact they got rid of ABS_DATADIR -in commit 5b6df5c along with a series of other commits which may or -may not fix this issue. We will find out in future releases. - -Signed-off-by: Matthew Zeng ---- - tests/libkbdfile-test08.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/tests/libkbdfile-test08.c b/tests/libkbdfile-test08.c -index bf41707..5e287f1 100644 ---- a/tests/libkbdfile-test08.c -+++ b/tests/libkbdfile-test08.c -@@ -14,14 +14,14 @@ main(int __attribute__((unused)) argc, char **argv) - if (!fp) - kbd_error(EXIT_FAILURE, 0, "unable to create kbdfile"); - -- const char *const dirpath[] = { "", DATADIR "/findfile/test_0/keymaps/**", 0 }; -+ const char *const dirpath[] = { "", DATADIR "/findfile/test_0/keymaps/i386/**", 0 }; - const char *const suffixes[] = { "", ".map", ".kmap", 0 }; - -- const char *expect = ABS_DATADIR "/findfile/test_0/keymaps/i386/qwerty/test0.map"; -+ const char *expect = DATADIR "/findfile/test_0/keymaps/i386/qwerty/test0.map"; - - int rc = 0; - -- rc = kbdfile_find((char *)(ABS_DATADIR "/findfile/test_0/keymaps/i386/qwerty/test0"), (char **) dirpath, (char **) suffixes, fp); -+ rc = kbdfile_find((char *)"test0", (char **) dirpath, (char **) suffixes, fp); - - if (rc != 0) - kbd_error(EXIT_FAILURE, 0, "unable to find file"); --- -2.24.1 - diff --git a/poky/meta/recipes-core/kbd/kbd/0001-analyze.l-add-missing-string-format.patch b/poky/meta/recipes-core/kbd/kbd/0001-analyze.l-add-missing-string-format.patch deleted file mode 100644 index 8916fd9f5..000000000 --- a/poky/meta/recipes-core/kbd/kbd/0001-analyze.l-add-missing-string-format.patch +++ /dev/null @@ -1,24 +0,0 @@ -From dc6bf2ae0835c6569b270e8e1f26a3173f3927d9 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Wed, 4 Dec 2019 13:14:01 +0100 -Subject: [PATCH] analyze.l: add missing string format - -Upstream-Status: Submitted [https://github.com/legionus/kbd/pull/35] -Signed-off-by: Alexander Kanavin ---- - src/libkeymap/analyze.l | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/libkeymap/analyze.l b/src/libkeymap/analyze.l -index e32ace6..de62f46 100644 ---- a/src/libkeymap/analyze.l -+++ b/src/libkeymap/analyze.l -@@ -463,7 +463,7 @@ To to|To|TO - - strerror_r(errno, buf, sizeof(buf)); - -- ERR(yyextra, buf); -+ ERR(yyextra, "%s", buf); - return(ERROR); - } - diff --git a/poky/meta/recipes-core/kbd/kbd/fix_cflags.patch b/poky/meta/recipes-core/kbd/kbd/fix_cflags.patch deleted file mode 100644 index 37220960a..000000000 --- a/poky/meta/recipes-core/kbd/kbd/fix_cflags.patch +++ /dev/null @@ -1,25 +0,0 @@ -We need to ensure our CFLAGS are preserved as well as whatever tweak configure -tries to make. Without these, the debug prefix changes get lost and we lose -build reproducibility, likely with other side effects. - -Signed-off-by: Richard Purdie -Upstream-Status: Pending -2020/1/27 - -Index: kbd-2.2.0/configure.ac -=================================================================== ---- kbd-2.2.0.orig/configure.ac -+++ kbd-2.2.0/configure.ac -@@ -72,9 +72,9 @@ if test "$enable_code_coverage" = yes; t - fi - - case "$GCC,$ac_cv_prog_cc_g" in -- yes,yes) CFLAGS="-g $CC_O_LEVEL $FORTIFY_SOURCE" ;; -- yes,) CFLAGS="$CC_O_LEVEL $FORTIFY_SOURCE" ;; -- ,yes) CFLAGS="-g" ;; -+ yes,yes) CFLAGS="-g $CC_O_LEVEL $FORTIFY_SOURCE $CFLAGS" ;; -+ yes,) CFLAGS="$CC_O_LEVEL $FORTIFY_SOURCE $CFLAGS" ;; -+ ,yes) CFLAGS="-g $CFLAGS" ;; - esac - - CC_CHECK_CFLAGS_APPEND([\ diff --git a/poky/meta/recipes-core/kbd/kbd/run-ptest b/poky/meta/recipes-core/kbd/kbd/run-ptest deleted file mode 100644 index 7a2d205f5..000000000 --- a/poky/meta/recipes-core/kbd/kbd/run-ptest +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -CURDIR=$(dirname `readlink -f $0`) -make -k -C ${CURDIR}/tests check-TESTS diff --git a/poky/meta/recipes-core/kbd/kbd/set-proper-path-of-resources.patch b/poky/meta/recipes-core/kbd/kbd/set-proper-path-of-resources.patch deleted file mode 100644 index 4b1e5b8ee..000000000 --- a/poky/meta/recipes-core/kbd/kbd/set-proper-path-of-resources.patch +++ /dev/null @@ -1,99 +0,0 @@ -From cb3af8fb072f8999dbb5160bdc95a102b02fd37a Mon Sep 17 00:00:00 2001 -From: Kai Kang -Date: Fri, 30 Sep 2016 16:49:55 +0800 -Subject: [PATCH] kbd: create ptest sub-package - -Upstream-Status: Inappropriate [embedded specific] - -kbd is out of source built, then the value of $(srcdir) is relative path of -${S}/tests to ${B}/tests. Macro DATADIR is defined with $(srcdir) and replaced -in .c files by compiler, and string @DATADIR@ is replaced with $(srdir) by rule -"%: %.in" in Makefile. - -But kbd-ptest puts test cases and resource files in same directory, then some -ptest cases fail to find resources. - -Replace DATADIR and @DATADIR@ with current directory(dot) to make test cases -run as expected. - -Signed-off-by: Kai Kang - ---- - tests/Makefile.am | 4 ++-- - tests/alt-is-meta.in | 2 +- - tests/dumpkeys-bkeymap.in | 4 ++-- - tests/dumpkeys-fulltable.in | 2 +- - tests/dumpkeys-mktable.in | 4 ++-- - 5 files changed, 8 insertions(+), 8 deletions(-) - -diff --git a/tests/Makefile.am b/tests/Makefile.am -index 8d0ab69..5147c28 100644 ---- a/tests/Makefile.am -+++ b/tests/Makefile.am -@@ -7,8 +7,8 @@ AM_CPPFLAGS = \ - -I$(builddir)/../src/libkeymap \ - -I$(srcdir)/../src/libkbdfile \ - -I$(builddir)/../src/libkbdfile \ -- -DDATADIR=\"$(srcdir)\" \ -- -DABS_DATADIR=\"$(realpath $(srcdir))\" \ -+ -DDATADIR=\".\" \ -+ -DABS_DATADIR=\"/usr/lib/kbd/ptest/tests\" \ - -DBUILDDIR=\"$(builddir)\" - - AM_CFLAGS = $(CHECK_CFLAGS) $(CODE_COVERAGE_CFLAGS) -diff --git a/tests/alt-is-meta.in b/tests/alt-is-meta.in -index 3a1441f..d829f2e 100755 ---- a/tests/alt-is-meta.in -+++ b/tests/alt-is-meta.in -@@ -7,7 +7,7 @@ cd "$cwd" - rc=0 - temp="$(mktemp "@BUILDDIR@/temp.XXXXXXXXX")" - --datadir="@DATADIR@/data/alt-is-meta" -+datadir="./data/alt-is-meta" - - ./libkeymap-showmaps "$datadir"/alt-is-meta.map > "$temp" || rc=$? - cmp -s "$datadir/alt-is-meta.output" "$temp" || rc=$? -diff --git a/tests/dumpkeys-bkeymap.in b/tests/dumpkeys-bkeymap.in -index 03d4ca2..d1d0e26 100755 ---- a/tests/dumpkeys-bkeymap.in -+++ b/tests/dumpkeys-bkeymap.in -@@ -9,8 +9,8 @@ temp="$(mktemp "@BUILDDIR@/temp.XXXXXXXXX")" - - datadir="@DATADIR@" - --./libkeymap-bkeymap "$datadir/"../data/keymaps/i386/qwerty/defkeymap.map > "$temp" || rc=$? --cmp -s "$datadir/data/dumpkeys-bkeymap/bkeymap.bin" "$temp" || rc=$? -+./libkeymap-bkeymap ../data/keymaps/i386/qwerty/defkeymap.map > "$temp" || rc=$? -+cmp -s "./data/dumpkeys-bkeymap/bkeymap.bin" "$temp" || rc=$? - - if [ "$rc" != 0 ]; then - printf 'failed\n' -diff --git a/tests/dumpkeys-fulltable.in b/tests/dumpkeys-fulltable.in -index a3a5ece..67a73ef 100755 ---- a/tests/dumpkeys-fulltable.in -+++ b/tests/dumpkeys-fulltable.in -@@ -5,7 +5,7 @@ cwd="$(readlink -ev "${0%/*}")" - cd "$cwd" - - BUILDDIR="@BUILDDIR@" --DATADIR="@DATADIR@" -+DATADIR="." - - check_keymap() { - local kmap temp rc -diff --git a/tests/dumpkeys-mktable.in b/tests/dumpkeys-mktable.in -index 0f17c40..7e5161d 100755 ---- a/tests/dumpkeys-mktable.in -+++ b/tests/dumpkeys-mktable.in -@@ -7,8 +7,8 @@ cd "$cwd" - rc=0 - temp="$(mktemp "@BUILDDIR@/temp.XXXXXXXXX")" - --./libkeymap-mktable "@DATADIR@/"../data/keymaps/i386/qwerty/defkeymap.map > "$temp" || rc=$? --cmp -s "@DATADIR@/data/dumpkeys-mktable/defkeymap.c" "$temp" || rc=$? -+./libkeymap-mktable ../data/keymaps/i386/qwerty/defkeymap.map > "$temp" || rc=$? -+cmp -s "./data/dumpkeys-mktable/defkeymap.c" "$temp" || rc=$? - - if [ "$rc" != 0 ]; then - printf 'failed\n' diff --git a/poky/meta/recipes-core/kbd/kbd_2.2.0.bb b/poky/meta/recipes-core/kbd/kbd_2.2.0.bb deleted file mode 100644 index e5700ff57..000000000 --- a/poky/meta/recipes-core/kbd/kbd_2.2.0.bb +++ /dev/null @@ -1,72 +0,0 @@ -SUMMARY = "Keytable files and keyboard utilities" -HOMEPAGE = "http://www.kbd-project.org/" -# everything minus console-fonts is GPLv2+ -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=892f569a555ba9c07a568a7c0c4fa63a" - -inherit autotools gettext ptest pkgconfig - -DEPENDS += "flex-native" - -RREPLACES_${PN} = "console-tools" -RPROVIDES_${PN} = "console-tools" -RCONFLICTS_${PN} = "console-tools" - -SRC_URI = "${KERNELORG_MIRROR}/linux/utils/${BPN}/${BP}.tar.xz \ - file://run-ptest \ - ${@bb.utils.contains('DISTRO_FEATURES', 'ptest', 'file://set-proper-path-of-resources.patch', '', d)} \ - file://0001-analyze.l-add-missing-string-format.patch \ - file://0001-Use-DATADIR-and-append-i386-to-fix-libkbdfile-test08.patch \ - file://fix_cflags.patch \ - " - -SRC_URI[md5sum] = "d1d7ae0b5fb875dc082731e09cd0c8bc" -SRC_URI[sha256sum] = "21a1bc5f6fb3b18ce9fdd717e4533368060a3182a39c7155eaf7ec0f5f83e9f7" - -PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)} \ - ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests','', d)} \ - " - -PACKAGECONFIG[pam] = "--enable-vlock, --disable-vlock, libpam," -PACKAGECONFIG[tests] = "--enable-tests, --disable-tests, libcheck" - -do_compile_ptest() { - oe_runmake -C ${B}/tests alt-is-meta dumpkeys-bkeymap dumpkeys-fulltable dumpkeys-mktable -} - -do_install_ptest() { - install -D ${B}/tests/Makefile ${D}${PTEST_PATH}/tests/Makefile - sed -i -e '/Makefile:/,/^$/d' -e '/%: %.in/,/^$/d' \ - -e 's:--sysroot=${STAGING_DIR_TARGET}::g' \ - -e 's:${DEBUG_PREFIX_MAP}::g' \ - -e 's:${HOSTTOOLS_DIR}/::g' \ - -e 's:${RECIPE_SYSROOT_NATIVE}::g' \ - -e 's:${RECIPE_SYSROOT}::g' \ - -e 's:${S}/config/missing::g' \ - -e 's:${WORKDIR}::g' \ - -e '/^lib.*_SOURCES =/d' -e '/$(EXEEXT):/,/^$/d' ${D}${PTEST_PATH}/tests/Makefile - - find ${B}/tests -executable -exec install {} ${D}${PTEST_PATH}/tests \; - cp -rf ${S}/tests/data ${D}${PTEST_PATH}/tests - cp -rf ${S}/tests/findfile ${D}${PTEST_PATH}/tests - cp -rf ${S}/data ${D}${PTEST_PATH} - - install -D -m 755 ${S}/config/test-driver ${D}${PTEST_PATH}/config/test-driver -} - -PACKAGES += "${PN}-consolefonts ${PN}-keymaps ${PN}-unimaps ${PN}-consoletrans" - -FILES_${PN}-consolefonts = "${datadir}/consolefonts" -FILES_${PN}-consoletrans = "${datadir}/consoletrans" -FILES_${PN}-keymaps = "${datadir}/keymaps" -FILES_${PN}-unimaps = "${datadir}/unimaps" - -RDEPENDS_${PN}-ptest = "make" - -inherit update-alternatives - -ALTERNATIVE_${PN} = "chvt deallocvt fgconsole openvt showkey \ - ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'vlock','', d)}" -ALTERNATIVE_PRIORITY = "100" - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-core/kbd/kbd_2.3.0.bb b/poky/meta/recipes-core/kbd/kbd_2.3.0.bb new file mode 100644 index 000000000..529f49bef --- /dev/null +++ b/poky/meta/recipes-core/kbd/kbd_2.3.0.bb @@ -0,0 +1,38 @@ +SUMMARY = "Keytable files and keyboard utilities" +HOMEPAGE = "http://www.kbd-project.org/" +# everything minus console-fonts is GPLv2+ +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=892f569a555ba9c07a568a7c0c4fa63a" + +inherit autotools gettext pkgconfig + +DEPENDS += "flex-native" + +RREPLACES_${PN} = "console-tools" +RPROVIDES_${PN} = "console-tools" +RCONFLICTS_${PN} = "console-tools" + +SRC_URI = "${KERNELORG_MIRROR}/linux/utils/${BPN}/${BP}.tar.xz \ + " + +SRC_URI[sha256sum] = "685056143cb8effd0a1d44b5c391eb50d80dcfd014b1a4d6e2650a28d61cb82a" + +PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)} \ + " + +PACKAGECONFIG[pam] = "--enable-vlock, --disable-vlock, libpam," + +PACKAGES += "${PN}-consolefonts ${PN}-keymaps ${PN}-unimaps ${PN}-consoletrans" + +FILES_${PN}-consolefonts = "${datadir}/consolefonts" +FILES_${PN}-consoletrans = "${datadir}/consoletrans" +FILES_${PN}-keymaps = "${datadir}/keymaps" +FILES_${PN}-unimaps = "${datadir}/unimaps" + +inherit update-alternatives + +ALTERNATIVE_${PN} = "chvt deallocvt fgconsole openvt showkey \ + ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'vlock','', d)}" +ALTERNATIVE_PRIORITY = "100" + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-core/meta/buildtools-tarball.bb b/poky/meta/recipes-core/meta/buildtools-tarball.bb index d0f8dd7d7..3785941c2 100644 --- a/poky/meta/recipes-core/meta/buildtools-tarball.bb +++ b/poky/meta/recipes-core/meta/buildtools-tarball.bb @@ -66,16 +66,19 @@ create_sdk_files_append () { script=${1:-${SDK_OUTPUT}/${SDKPATH}/environment-setup-${SDK_SYS}} touch $script echo 'export PATH=${SDKPATHNATIVE}${bindir_nativesdk}:$PATH' >> $script - # In order for the self-extraction script to correctly extract and set up things, - # we need a 'OECORE_NATIVE_SYSROOT=xxx' line in environment setup script. - # However, buildtools-tarball is inherently a tool set instead of a fully functional SDK, - # so instead of exporting the variable, we use a comment here. - echo '#OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script - toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${SDK_SYS} - + echo 'export OECORE_NATIVE_SYSROOT="${SDKPATHNATIVE}"' >> $script echo 'export GIT_SSL_CAINFO="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script echo 'export SSL_CERT_FILE="${SDKPATHNATIVE}${sysconfdir}/ssl/certs/ca-certificates.crt"' >>$script - echo 'export OPENSSL_CONF="${SDKPATHNATIVE}${sysconfdir}/ssl/openssl.cnf"' >>$script + + toolchain_create_sdk_version ${SDK_OUTPUT}/${SDKPATH}/version-${SDK_SYS} + + cat >> $script <>$script diff --git a/poky/meta/recipes-core/meta/cve-update-db-native.bb b/poky/meta/recipes-core/meta/cve-update-db-native.bb index f27ade40d..32d6dbdff 100644 --- a/poky/meta/recipes-core/meta/cve-update-db-native.bb +++ b/poky/meta/recipes-core/meta/cve-update-db-native.bb @@ -176,15 +176,20 @@ def update_db(c, jsondata): if not elt['impact']: continue + accessVector = None cveId = elt['cve']['CVE_data_meta']['ID'] cveDesc = elt['cve']['description']['description_data'][0]['value'] date = elt['lastModifiedDate'] - accessVector = elt['impact']['baseMetricV2']['cvssV2']['accessVector'] - cvssv2 = elt['impact']['baseMetricV2']['cvssV2']['baseScore'] - try: + accessVector = elt['impact']['baseMetricV2']['cvssV2']['accessVector'] + cvssv2 = elt['impact']['baseMetricV2']['cvssV2']['baseScore'] + except KeyError: + cvssv2 = 0.0 + try: + accessVector = accessVector or elt['impact']['baseMetricV3']['cvssV3']['attackVector'] cvssv3 = elt['impact']['baseMetricV3']['cvssV3']['baseScore'] - except: + except KeyError: + accessVector = accessVector or "UNKNOWN" cvssv3 = 0.0 c.execute("insert or replace into NVD values (?, ?, ?, ?, ?, ?)", diff --git a/poky/meta/recipes-core/musl/musl_git.bb b/poky/meta/recipes-core/musl/musl_git.bb index ed2178b5a..51b19e89a 100644 --- a/poky/meta/recipes-core/musl/musl_git.bb +++ b/poky/meta/recipes-core/musl/musl_git.bb @@ -4,9 +4,9 @@ require musl.inc inherit linuxloader -SRCREV = "0a005f499cf39822166dd4db3d2d31f0639f1b1b" +SRCREV = "73cc775bee53300c7cf759f37580220b18ac13d3" -BASEVER = "1.2.0" +BASEVER = "1.2.1" PV = "${BASEVER}+git${SRCPV}" diff --git a/poky/meta/recipes-core/systemd/systemd-boot_245.6.bb b/poky/meta/recipes-core/systemd/systemd-boot_245.6.bb deleted file mode 100644 index f92c63981..000000000 --- a/poky/meta/recipes-core/systemd/systemd-boot_245.6.bb +++ /dev/null @@ -1,70 +0,0 @@ -require systemd.inc -FILESEXTRAPATHS =. "${FILE_DIRNAME}/systemd:" - -require conf/image-uefi.conf - -DEPENDS = "intltool-native libcap util-linux gnu-efi gperf-native" - -inherit meson pkgconfig gettext -inherit deploy - -LDFLAGS_prepend = "${@ " ".join(d.getVar('LD').split()[1:])} " - -do_write_config[vardeps] += "CC OBJCOPY" -do_write_config_append() { - cat >${WORKDIR}/meson-${PN}.cross <${WORKDIR}/meson-${PN}.cross < src/shared/userdb.c | 6 ++++++ 3 files changed, 30 insertions(+) -diff --git a/src/shared/group-record-nss.c b/src/shared/group-record-nss.c -index 77924f1c4067..c64490253ff3 100644 ---- a/src/shared/group-record-nss.c -+++ b/src/shared/group-record-nss.c +Index: systemd-stable/src/shared/group-record-nss.c +=================================================================== +--- systemd-stable.orig/src/shared/group-record-nss.c ++++ systemd-stable/src/shared/group-record-nss.c @@ -19,8 +19,10 @@ int nss_group_to_group_record( if (isempty(grp->gr_name)) return -EINVAL; @@ -45,7 +45,7 @@ index 77924f1c4067..c64490253ff3 100644 r = json_build(&g->json, JSON_BUILD_OBJECT( JSON_BUILD_PAIR("groupName", JSON_BUILD_STRING(g->group_name)), -@@ -76,6 +80,7 @@ int nss_sgrp_for_group(const struct group *grp, struct sgrp *ret_sgrp, char **re +@@ -76,6 +80,7 @@ int nss_sgrp_for_group(const struct grou assert(ret_sgrp); assert(ret_buffer); @@ -53,7 +53,7 @@ index 77924f1c4067..c64490253ff3 100644 for (;;) { _cleanup_free_ char *buf = NULL; struct sgrp sgrp, *result; -@@ -104,6 +109,9 @@ int nss_sgrp_for_group(const struct group *grp, struct sgrp *ret_sgrp, char **re +@@ -104,6 +109,9 @@ int nss_sgrp_for_group(const struct grou buflen *= 2; buf = mfree(buf); } @@ -62,67 +62,67 @@ index 77924f1c4067..c64490253ff3 100644 +#endif } - int nss_group_record_by_name(const char *name, GroupRecord **ret) { -@@ -111,7 +119,9 @@ int nss_group_record_by_name(const char *name, GroupRecord **ret) { + int nss_group_record_by_name( +@@ -115,7 +123,9 @@ int nss_group_record_by_name( struct group grp, *result; bool incomplete = false; size_t buflen = 4096; +#if ENABLE_GSHADOW - struct sgrp sgrp; + struct sgrp sgrp, *sresult = NULL; +#endif int r; assert(name); -@@ -141,6 +151,7 @@ int nss_group_record_by_name(const char *name, GroupRecord **ret) { +@@ -145,6 +155,7 @@ int nss_group_record_by_name( buf = mfree(buf); } +#if ENABLE_GSHADOW - r = nss_sgrp_for_group(result, &sgrp, &sbuf); - if (r < 0) { - log_debug_errno(r, "Failed to do shadow lookup for group %s, ignoring: %m", result->gr_name); -@@ -148,6 +159,9 @@ int nss_group_record_by_name(const char *name, GroupRecord **ret) { - } + if (with_shadow) { + r = nss_sgrp_for_group(result, &sgrp, &sbuf); + if (r < 0) { +@@ -156,6 +167,9 @@ int nss_group_record_by_name( + incomplete = true; - r = nss_group_to_group_record(result, r >= 0 ? &sgrp : NULL, ret); + r = nss_group_to_group_record(result, sresult, ret); +#else + r = nss_group_to_group_record(result, NULL, ret); +#endif if (r < 0) return r; -@@ -160,7 +174,9 @@ int nss_group_record_by_gid(gid_t gid, GroupRecord **ret) { +@@ -172,7 +186,9 @@ int nss_group_record_by_gid( struct group grp, *result; bool incomplete = false; size_t buflen = 4096; +#if ENABLE_GSHADOW - struct sgrp sgrp; + struct sgrp sgrp, *sresult = NULL; +#endif int r; assert(ret); -@@ -188,6 +204,7 @@ int nss_group_record_by_gid(gid_t gid, GroupRecord **ret) { +@@ -200,6 +216,7 @@ int nss_group_record_by_gid( buf = mfree(buf); } +#if ENABLE_GSHADOW - r = nss_sgrp_for_group(result, &sgrp, &sbuf); - if (r < 0) { - log_debug_errno(r, "Failed to do shadow lookup for group %s, ignoring: %m", result->gr_name); -@@ -195,6 +212,9 @@ int nss_group_record_by_gid(gid_t gid, GroupRecord **ret) { - } + if (with_shadow) { + r = nss_sgrp_for_group(result, &sgrp, &sbuf); + if (r < 0) { +@@ -211,6 +228,9 @@ int nss_group_record_by_gid( + incomplete = true; - r = nss_group_to_group_record(result, r >= 0 ? &sgrp : NULL, ret); + r = nss_group_to_group_record(result, sresult, ret); +#else + r = nss_group_to_group_record(result, NULL, ret); +#endif if (r < 0) return r; -diff --git a/src/shared/group-record-nss.h b/src/shared/group-record-nss.h -index 38b2995178ff..d7d95c44cf11 100644 ---- a/src/shared/group-record-nss.h -+++ b/src/shared/group-record-nss.h +Index: systemd-stable/src/shared/group-record-nss.h +=================================================================== +--- systemd-stable.orig/src/shared/group-record-nss.h ++++ systemd-stable/src/shared/group-record-nss.h @@ -2,7 +2,11 @@ #pragma once @@ -135,11 +135,11 @@ index 38b2995178ff..d7d95c44cf11 100644 #include "group-record.h" -diff --git a/src/shared/userdb.c b/src/shared/userdb.c -index 92f8796768d7..5d912862f85c 100644 ---- a/src/shared/userdb.c -+++ b/src/shared/userdb.c -@@ -924,13 +924,16 @@ int groupdb_iterator_get(UserDBIterator *iterator, GroupRecord **ret) { +Index: systemd-stable/src/shared/userdb.c +=================================================================== +--- systemd-stable.orig/src/shared/userdb.c ++++ systemd-stable/src/shared/userdb.c +@@ -930,13 +930,16 @@ int groupdb_iterator_get(UserDBIterator if (gr) { _cleanup_free_ char *buffer = NULL; bool incomplete = false; @@ -156,7 +156,7 @@ index 92f8796768d7..5d912862f85c 100644 r = nss_sgrp_for_group(gr, &sgrp, &buffer); if (r < 0) { log_debug_errno(r, "Failed to acquire shadow entry for group %s, ignoring: %m", gr->gr_name); -@@ -938,6 +941,9 @@ int groupdb_iterator_get(UserDBIterator *iterator, GroupRecord **ret) { +@@ -944,6 +947,9 @@ int groupdb_iterator_get(UserDBIterator } r = nss_group_to_group_record(gr, r >= 0 ? &sgrp : NULL, ret); @@ -166,6 +166,3 @@ index 92f8796768d7..5d912862f85c 100644 if (r < 0) return r; --- -2.17.1 - diff --git a/poky/meta/recipes-core/systemd/systemd/0001-Use-PREFIX-ROOTPREFIX-correctly.patch b/poky/meta/recipes-core/systemd/systemd/0001-Use-PREFIX-ROOTPREFIX-correctly.patch new file mode 100644 index 000000000..c61941df9 --- /dev/null +++ b/poky/meta/recipes-core/systemd/systemd/0001-Use-PREFIX-ROOTPREFIX-correctly.patch @@ -0,0 +1,81 @@ +From 2868e3b72d4ac02860e380d70c9af0d61a985790 Mon Sep 17 00:00:00 2001 +From: Alex Kiernan +Date: Sun, 16 Aug 2020 16:07:12 +0000 +Subject: [PATCH] Use PREFIX/ROOTPREFIX correctly + +Signed-off-by: Alex Kiernan +Upstream-status: Pending [https://github.com/systemd/systemd/issues/16773] +--- + meson.build | 1 + + src/core/systemd.pc.in | 8 ++++---- + src/libsystemd/sd-path/sd-path.c | 8 ++++---- + 3 files changed, 9 insertions(+), 8 deletions(-) + +diff --git a/meson.build b/meson.build +index dbbddb68e232..18618dba0623 100644 +--- a/meson.build ++++ b/meson.build +@@ -226,6 +226,7 @@ conf.set_quoted('SYSTEMD_SHUTDOWN_BINARY_PATH', join_paths(rootlib + conf.set_quoted('SYSTEMCTL_BINARY_PATH', join_paths(rootbindir, 'systemctl')) + conf.set_quoted('SYSTEMD_TTY_ASK_PASSWORD_AGENT_BINARY_PATH', join_paths(rootbindir, 'systemd-tty-ask-password-agent')) + conf.set_quoted('SYSTEMD_STDIO_BRIDGE_BINARY_PATH', join_paths(bindir, 'systemd-stdio-bridge')) ++conf.set_quoted('PREFIX', prefixdir) + conf.set_quoted('ROOTPREFIX', rootprefixdir) + conf.set_quoted('RANDOM_SEED_DIR', randomseeddir) + conf.set_quoted('RANDOM_SEED', join_paths(randomseeddir, 'random-seed')) +diff --git a/src/core/systemd.pc.in b/src/core/systemd.pc.in +index 8424837824b5..410a126317ad 100644 +--- a/src/core/systemd.pc.in ++++ b/src/core/systemd.pc.in +@@ -65,16 +65,16 @@ systemdshutdowndir=${systemd_shutdown_dir} + tmpfiles_dir=${prefix}/lib/tmpfiles.d + tmpfilesdir=${tmpfiles_dir} + +-sysusers_dir=${rootprefix}/lib/sysusers.d ++sysusers_dir=${prefix}/lib/sysusers.d + sysusersdir=${sysusers_dir} + +-sysctl_dir=${rootprefix}/lib/sysctl.d ++sysctl_dir=${prefix}/lib/sysctl.d + sysctldir=${sysctl_dir} + +-binfmt_dir=${rootprefix}/lib/binfmt.d ++binfmt_dir=${prefix}/lib/binfmt.d + binfmtdir=${binfmt_dir} + +-modules_load_dir=${rootprefix}/lib/modules-load.d ++modules_load_dir=${prefix}/lib/modules-load.d + modulesloaddir=${modules_load_dir} + + catalog_dir=${prefix}/lib/systemd/catalog +diff --git a/src/libsystemd/sd-path/sd-path.c b/src/libsystemd/sd-path/sd-path.c +index 736795d1d797..3572916dc073 100644 +--- a/src/libsystemd/sd-path/sd-path.c ++++ b/src/libsystemd/sd-path/sd-path.c +@@ -371,19 +371,19 @@ static int get_path(uint64_t type, char **buffer, const char **ret) { + return 0; + + case SD_PATH_SYSUSERS: +- *ret = ROOTPREFIX "/lib/sysusers.d"; ++ *ret = PREFIX "/lib/sysusers.d"; + return 0; + + case SD_PATH_SYSCTL: +- *ret = ROOTPREFIX "/lib/sysctl.d"; ++ *ret = PREFIX "/lib/sysctl.d"; + return 0; + + case SD_PATH_BINFMT: +- *ret = ROOTPREFIX "/lib/binfmt.d"; ++ *ret = PREFIX "/lib/binfmt.d"; + return 0; + + case SD_PATH_MODULES_LOAD: +- *ret = ROOTPREFIX "/lib/modules-load.d"; ++ *ret = PREFIX "/lib/modules-load.d"; + return 0; + + case SD_PATH_CATALOG: +-- +2.17.1 + diff --git a/poky/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch b/poky/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch index 26f70b35d..2676c144f 100644 --- a/poky/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch +++ b/poky/meta/recipes-core/systemd/systemd/0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch @@ -25,21 +25,21 @@ Signed-off-by: Scott Murray units/systemd-binfmt.service.in | 4 ++++ 3 files changed, 9 insertions(+), 4 deletions(-) -diff --git a/units/meson.build b/units/meson.build -index ea91f0cc9ea7..25186f88dfeb 100644 ---- a/units/meson.build -+++ b/units/meson.build -@@ -52,8 +52,7 @@ units = [ +Index: systemd-stable/units/meson.build +=================================================================== +--- systemd-stable.orig/units/meson.build ++++ systemd-stable/units/meson.build +@@ -54,8 +54,7 @@ units = [ ['poweroff.target', '', - 'runlevel0.target'], + (with_runlevels ? 'runlevel0.target' : '')], ['printer.target', ''], - ['proc-sys-fs-binfmt_misc.automount', 'ENABLE_BINFMT', - 'sysinit.target.wants/'], + ['proc-sys-fs-binfmt_misc.automount', 'ENABLE_BINFMT'], ['proc-sys-fs-binfmt_misc.mount', 'ENABLE_BINFMT'], ['reboot.target', '', - 'runlevel6.target ctrl-alt-del.target'], -@@ -161,8 +160,7 @@ in_units = [ + (with_runlevels ? 'runlevel6.target ctrl-alt-del.target' : 'ctrl-alt-del.target')], +@@ -162,8 +161,7 @@ in_units = [ ['rc-local.service', 'HAVE_SYSV_COMPAT'], ['rescue.service', ''], ['systemd-backlight@.service', 'ENABLE_BACKLIGHT'], @@ -49,10 +49,10 @@ index ea91f0cc9ea7..25186f88dfeb 100644 ['systemd-bless-boot.service', 'ENABLE_EFI HAVE_BLKID'], ['systemd-boot-check-no-failures.service', ''], ['systemd-coredump@.service', 'ENABLE_COREDUMP'], -diff --git a/units/proc-sys-fs-binfmt_misc.automount b/units/proc-sys-fs-binfmt_misc.automount -index 30a6bc991844..4231f3b70fe9 100644 ---- a/units/proc-sys-fs-binfmt_misc.automount -+++ b/units/proc-sys-fs-binfmt_misc.automount +Index: systemd-stable/units/proc-sys-fs-binfmt_misc.automount +=================================================================== +--- systemd-stable.orig/units/proc-sys-fs-binfmt_misc.automount ++++ systemd-stable/units/proc-sys-fs-binfmt_misc.automount @@ -18,3 +18,6 @@ ConditionPathIsReadWrite=/proc/sys/ [Automount] @@ -60,11 +60,11 @@ index 30a6bc991844..4231f3b70fe9 100644 + +[Install] +WantedBy=sysinit.target -diff --git a/units/systemd-binfmt.service.in b/units/systemd-binfmt.service.in -index e54e95e11d5d..372a598614d3 100644 ---- a/units/systemd-binfmt.service.in -+++ b/units/systemd-binfmt.service.in -@@ -14,6 +14,7 @@ Documentation=https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.htm +Index: systemd-stable/units/systemd-binfmt.service.in +=================================================================== +--- systemd-stable.orig/units/systemd-binfmt.service.in ++++ systemd-stable/units/systemd-binfmt.service.in +@@ -14,6 +14,7 @@ Documentation=https://www.kernel.org/doc Documentation=https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems DefaultDependencies=no Conflicts=shutdown.target diff --git a/poky/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch b/poky/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch index 4eeec7b7d..39a975319 100644 --- a/poky/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch +++ b/poky/meta/recipes-core/systemd/systemd/0001-do-not-disable-buffer-in-writing-files.patch @@ -40,11 +40,11 @@ Signed-off-by: Scott Murray src/vconsole/vconsole-setup.c | 2 +- 18 files changed, 35 insertions(+), 35 deletions(-) -diff --git a/src/basic/cgroup-util.c b/src/basic/cgroup-util.c -index a5141f4cbedd..04c06e7a55cb 100644 ---- a/src/basic/cgroup-util.c -+++ b/src/basic/cgroup-util.c -@@ -739,7 +739,7 @@ int cg_install_release_agent(const char *controller, const char *agent) { +Index: systemd-stable/src/basic/cgroup-util.c +=================================================================== +--- systemd-stable.orig/src/basic/cgroup-util.c ++++ systemd-stable/src/basic/cgroup-util.c +@@ -769,7 +769,7 @@ int cg_install_release_agent(const char sc = strstrip(contents); if (isempty(sc)) { @@ -53,7 +53,7 @@ index a5141f4cbedd..04c06e7a55cb 100644 if (r < 0) return r; } else if (!path_equal(sc, agent)) -@@ -757,7 +757,7 @@ int cg_install_release_agent(const char *controller, const char *agent) { +@@ -787,7 +787,7 @@ int cg_install_release_agent(const char sc = strstrip(contents); if (streq(sc, "0")) { @@ -62,7 +62,7 @@ index a5141f4cbedd..04c06e7a55cb 100644 if (r < 0) return r; -@@ -784,7 +784,7 @@ int cg_uninstall_release_agent(const char *controller) { +@@ -814,7 +814,7 @@ int cg_uninstall_release_agent(const cha if (r < 0) return r; @@ -71,7 +71,7 @@ index a5141f4cbedd..04c06e7a55cb 100644 if (r < 0) return r; -@@ -794,7 +794,7 @@ int cg_uninstall_release_agent(const char *controller) { +@@ -824,7 +824,7 @@ int cg_uninstall_release_agent(const cha if (r < 0) return r; @@ -80,7 +80,7 @@ index a5141f4cbedd..04c06e7a55cb 100644 if (r < 0) return r; -@@ -1650,7 +1650,7 @@ int cg_set_attribute(const char *controller, const char *path, const char *attri +@@ -1656,7 +1656,7 @@ int cg_set_attribute(const char *control if (r < 0) return r; @@ -89,11 +89,11 @@ index a5141f4cbedd..04c06e7a55cb 100644 } int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret) { -diff --git a/src/basic/procfs-util.c b/src/basic/procfs-util.c -index da7e836f143e..2138f20bcc03 100644 ---- a/src/basic/procfs-util.c -+++ b/src/basic/procfs-util.c -@@ -86,13 +86,13 @@ int procfs_tasks_set_limit(uint64_t limit) { +Index: systemd-stable/src/basic/procfs-util.c +=================================================================== +--- systemd-stable.orig/src/basic/procfs-util.c ++++ systemd-stable/src/basic/procfs-util.c +@@ -86,13 +86,13 @@ int procfs_tasks_set_limit(uint64_t limi * decrease it, as threads-max is the much more relevant sysctl. */ if (limit > pid_max-1) { sprintf(buffer, "%" PRIu64, limit+1); /* Add one, since PID 0 is not a valid PID */ @@ -109,11 +109,11 @@ index da7e836f143e..2138f20bcc03 100644 if (r < 0) { uint64_t threads_max; -diff --git a/src/basic/smack-util.c b/src/basic/smack-util.c -index da9a2139d31a..5e91f5b8f5d9 100644 ---- a/src/basic/smack-util.c -+++ b/src/basic/smack-util.c -@@ -114,7 +114,7 @@ int mac_smack_apply_pid(pid_t pid, const char *label) { +Index: systemd-stable/src/basic/smack-util.c +=================================================================== +--- systemd-stable.orig/src/basic/smack-util.c ++++ systemd-stable/src/basic/smack-util.c +@@ -114,7 +114,7 @@ int mac_smack_apply_pid(pid_t pid, const return 0; p = procfs_file_alloca(pid, "attr/current"); @@ -122,10 +122,10 @@ index da9a2139d31a..5e91f5b8f5d9 100644 if (r < 0) return r; -diff --git a/src/basic/util.c b/src/basic/util.c -index 2b3b3918a32f..aff8d0fcd473 100644 ---- a/src/basic/util.c -+++ b/src/basic/util.c +Index: systemd-stable/src/basic/util.c +=================================================================== +--- systemd-stable.orig/src/basic/util.c ++++ systemd-stable/src/basic/util.c @@ -267,7 +267,7 @@ void disable_coredumps(void) { if (detect_container() > 0) return; @@ -135,11 +135,11 @@ index 2b3b3918a32f..aff8d0fcd473 100644 if (r < 0) log_debug_errno(r, "Failed to turn off coredumps, ignoring: %m"); } -diff --git a/src/binfmt/binfmt.c b/src/binfmt/binfmt.c -index 7ff844c78c3a..5c5721d7c2f7 100644 ---- a/src/binfmt/binfmt.c -+++ b/src/binfmt/binfmt.c -@@ -47,7 +47,7 @@ static int delete_rule(const char *rule) { +Index: systemd-stable/src/binfmt/binfmt.c +=================================================================== +--- systemd-stable.orig/src/binfmt/binfmt.c ++++ systemd-stable/src/binfmt/binfmt.c +@@ -48,7 +48,7 @@ static int delete_rule(const char *rule) if (!fn) return log_oom(); @@ -148,7 +148,7 @@ index 7ff844c78c3a..5c5721d7c2f7 100644 } static int apply_rule(const char *rule) { -@@ -55,7 +55,7 @@ static int apply_rule(const char *rule) { +@@ -56,7 +56,7 @@ static int apply_rule(const char *rule) (void) delete_rule(rule); @@ -157,7 +157,7 @@ index 7ff844c78c3a..5c5721d7c2f7 100644 if (r < 0) return log_error_errno(r, "Failed to add binary format: %m"); -@@ -212,7 +212,7 @@ static int run(int argc, char *argv[]) { +@@ -223,7 +223,7 @@ static int run(int argc, char *argv[]) { } /* Flush out all rules */ @@ -166,11 +166,11 @@ index 7ff844c78c3a..5c5721d7c2f7 100644 STRV_FOREACH(f, files) { k = apply_file(*f, true); -diff --git a/src/core/main.c b/src/core/main.c -index 3c6b66e89c8e..c39ebe56a5b3 100644 ---- a/src/core/main.c -+++ b/src/core/main.c -@@ -1312,7 +1312,7 @@ static int bump_unix_max_dgram_qlen(void) { +Index: systemd-stable/src/core/main.c +=================================================================== +--- systemd-stable.orig/src/core/main.c ++++ systemd-stable/src/core/main.c +@@ -1382,7 +1382,7 @@ static int bump_unix_max_dgram_qlen(void if (v >= DEFAULT_UNIX_MAX_DGRAM_QLEN) return 0; @@ -179,7 +179,7 @@ index 3c6b66e89c8e..c39ebe56a5b3 100644 if (r < 0) return log_full_errno(IN_SET(r, -EROFS, -EPERM, -EACCES) ? LOG_DEBUG : LOG_WARNING, r, "Failed to bump AF_UNIX datagram queue length, ignoring: %m"); -@@ -1536,7 +1536,7 @@ static void initialize_core_pattern(bool skip_setup) { +@@ -1668,7 +1668,7 @@ static void initialize_core_pattern(bool if (getpid_cached() != 1) return; @@ -188,11 +188,11 @@ index 3c6b66e89c8e..c39ebe56a5b3 100644 if (r < 0) log_warning_errno(r, "Failed to write '%s' to /proc/sys/kernel/core_pattern, ignoring: %m", arg_early_core_pattern); } -diff --git a/src/core/smack-setup.c b/src/core/smack-setup.c -index 4427397f2715..8aeb5c829513 100644 ---- a/src/core/smack-setup.c -+++ b/src/core/smack-setup.c -@@ -325,17 +325,17 @@ int mac_smack_setup(bool *loaded_policy) { +Index: systemd-stable/src/core/smack-setup.c +=================================================================== +--- systemd-stable.orig/src/core/smack-setup.c ++++ systemd-stable/src/core/smack-setup.c +@@ -325,17 +325,17 @@ int mac_smack_setup(bool *loaded_policy) } #ifdef SMACK_RUN_LABEL @@ -214,10 +214,10 @@ index 4427397f2715..8aeb5c829513 100644 if (r < 0) log_warning_errno(r, "Failed to set SMACK netlabel rule \"127.0.0.1 -CIPSO\": %m"); #endif -diff --git a/src/hibernate-resume/hibernate-resume.c b/src/hibernate-resume/hibernate-resume.c -index 17e7cd1a009b..87a766771663 100644 ---- a/src/hibernate-resume/hibernate-resume.c -+++ b/src/hibernate-resume/hibernate-resume.c +Index: systemd-stable/src/hibernate-resume/hibernate-resume.c +=================================================================== +--- systemd-stable.orig/src/hibernate-resume/hibernate-resume.c ++++ systemd-stable/src/hibernate-resume/hibernate-resume.c @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } @@ -227,11 +227,11 @@ index 17e7cd1a009b..87a766771663 100644 if (r < 0) { log_error_errno(r, "Failed to write '%s' to /sys/power/resume: %m", major_minor); return EXIT_FAILURE; -diff --git a/src/libsystemd/sd-device/sd-device.c b/src/libsystemd/sd-device/sd-device.c -index 1f2451f8e1b4..3f676ec2841a 100644 ---- a/src/libsystemd/sd-device/sd-device.c -+++ b/src/libsystemd/sd-device/sd-device.c -@@ -1849,7 +1849,7 @@ _public_ int sd_device_set_sysattr_value(sd_device *device, const char *sysattr, +Index: systemd-stable/src/libsystemd/sd-device/sd-device.c +=================================================================== +--- systemd-stable.orig/src/libsystemd/sd-device/sd-device.c ++++ systemd-stable/src/libsystemd/sd-device/sd-device.c +@@ -1877,7 +1877,7 @@ _public_ int sd_device_set_sysattr_value if (!value) return -ENOMEM; @@ -240,11 +240,11 @@ index 1f2451f8e1b4..3f676ec2841a 100644 if (r < 0) { if (r == -ELOOP) return -EINVAL; -diff --git a/src/login/logind-dbus.c b/src/login/logind-dbus.c -index 52a7ea3c77e9..9703de0dabee 100644 ---- a/src/login/logind-dbus.c -+++ b/src/login/logind-dbus.c -@@ -1339,7 +1339,7 @@ static int trigger_device(Manager *m, sd_device *d) { +Index: systemd-stable/src/login/logind-dbus.c +=================================================================== +--- systemd-stable.orig/src/login/logind-dbus.c ++++ systemd-stable/src/login/logind-dbus.c +@@ -1341,7 +1341,7 @@ static int trigger_device(Manager *m, sd if (!t) return -ENOMEM; @@ -253,11 +253,11 @@ index 52a7ea3c77e9..9703de0dabee 100644 } return 0; -diff --git a/src/nspawn/nspawn-cgroup.c b/src/nspawn/nspawn-cgroup.c -index f5048d9473cb..b6383ab5c97e 100644 ---- a/src/nspawn/nspawn-cgroup.c -+++ b/src/nspawn/nspawn-cgroup.c -@@ -124,7 +124,7 @@ int sync_cgroup(pid_t pid, CGroupUnified unified_requested, uid_t uid_shift) { +Index: systemd-stable/src/nspawn/nspawn-cgroup.c +=================================================================== +--- systemd-stable.orig/src/nspawn/nspawn-cgroup.c ++++ systemd-stable/src/nspawn/nspawn-cgroup.c +@@ -124,7 +124,7 @@ int sync_cgroup(pid_t pid, CGroupUnified fn = strjoina(tree, cgroup, "/cgroup.procs"); sprintf(pid_string, PID_FMT, pid); @@ -266,11 +266,11 @@ index f5048d9473cb..b6383ab5c97e 100644 if (r < 0) { log_error_errno(r, "Failed to move process: %m"); goto finish; -diff --git a/src/nspawn/nspawn.c b/src/nspawn/nspawn.c -index 734dee1130e0..71add9a055d2 100644 ---- a/src/nspawn/nspawn.c -+++ b/src/nspawn/nspawn.c -@@ -2440,7 +2440,7 @@ static int reset_audit_loginuid(void) { +Index: systemd-stable/src/nspawn/nspawn.c +=================================================================== +--- systemd-stable.orig/src/nspawn/nspawn.c ++++ systemd-stable/src/nspawn/nspawn.c +@@ -2493,7 +2493,7 @@ static int reset_audit_loginuid(void) { if (streq(p, "4294967295")) return 0; @@ -279,7 +279,7 @@ index 734dee1130e0..71add9a055d2 100644 if (r < 0) { log_error_errno(r, "Failed to reset audit login UID. This probably means that your kernel is too\n" -@@ -3665,13 +3665,13 @@ static int setup_uid_map(pid_t pid) { +@@ -3726,13 +3726,13 @@ static int setup_uid_map(pid_t pid) { xsprintf(uid_map, "/proc/" PID_FMT "/uid_map", pid); xsprintf(line, UID_FMT " " UID_FMT " " UID_FMT "\n", 0, arg_uid_shift, arg_uid_range); @@ -295,11 +295,11 @@ index 734dee1130e0..71add9a055d2 100644 if (r < 0) return log_error_errno(r, "Failed to write GID map: %m"); -diff --git a/src/shared/cgroup-setup.c b/src/shared/cgroup-setup.c -index e8398cbde5ba..ba682ec0c9e7 100644 ---- a/src/shared/cgroup-setup.c -+++ b/src/shared/cgroup-setup.c -@@ -267,7 +267,7 @@ int cg_attach(const char *controller, const char *path, pid_t pid) { +Index: systemd-stable/src/shared/cgroup-setup.c +=================================================================== +--- systemd-stable.orig/src/shared/cgroup-setup.c ++++ systemd-stable/src/shared/cgroup-setup.c +@@ -267,7 +267,7 @@ int cg_attach(const char *controller, co xsprintf(c, PID_FMT "\n", pid); @@ -317,11 +317,11 @@ index e8398cbde5ba..ba682ec0c9e7 100644 if (r < 0) { log_debug_errno(r, "Failed to %s controller %s for %s (%s): %m", FLAGS_SET(mask, bit) ? "enable" : "disable", n, p, fs); -diff --git a/src/shared/sysctl-util.c b/src/shared/sysctl-util.c -index 8543dbd2d05f..76162599817e 100644 ---- a/src/shared/sysctl-util.c -+++ b/src/shared/sysctl-util.c -@@ -93,7 +93,7 @@ int sysctl_write_ip_property(int af, const char *ifname, const char *property, c +Index: systemd-stable/src/shared/sysctl-util.c +=================================================================== +--- systemd-stable.orig/src/shared/sysctl-util.c ++++ systemd-stable/src/shared/sysctl-util.c +@@ -93,7 +93,7 @@ int sysctl_write_ip_property(int af, con log_debug("Setting '%s' to '%s'", p, value); @@ -330,11 +330,11 @@ index 8543dbd2d05f..76162599817e 100644 } int sysctl_read(const char *property, char **content) { -diff --git a/src/sleep/sleep.c b/src/sleep/sleep.c -index fbfddc0262fc..7cc2902154e9 100644 ---- a/src/sleep/sleep.c -+++ b/src/sleep/sleep.c -@@ -47,7 +47,7 @@ static int write_hibernate_location_info(const HibernateLocation *hibernate_loca +Index: systemd-stable/src/sleep/sleep.c +=================================================================== +--- systemd-stable.orig/src/sleep/sleep.c ++++ systemd-stable/src/sleep/sleep.c +@@ -48,7 +48,7 @@ static int write_hibernate_location_info assert(hibernate_location->swap); xsprintf(resume_str, "%u:%u", major(hibernate_location->devno), minor(hibernate_location->devno)); @@ -343,7 +343,7 @@ index fbfddc0262fc..7cc2902154e9 100644 if (r < 0) return log_debug_errno(r, "Failed to write partition device to /sys/power/resume for '%s': '%s': %m", hibernate_location->swap->device, resume_str); -@@ -74,7 +74,7 @@ static int write_hibernate_location_info(const HibernateLocation *hibernate_loca +@@ -75,7 +75,7 @@ static int write_hibernate_location_info } xsprintf(offset_str, "%" PRIu64, hibernate_location->offset); @@ -352,7 +352,7 @@ index fbfddc0262fc..7cc2902154e9 100644 if (r < 0) return log_debug_errno(r, "Failed to write swap file offset to /sys/power/resume_offset for '%s': '%s': %m", hibernate_location->swap->device, offset_str); -@@ -91,7 +91,7 @@ static int write_mode(char **modes) { +@@ -92,7 +92,7 @@ static int write_mode(char **modes) { STRV_FOREACH(mode, modes) { int k; @@ -361,7 +361,7 @@ index fbfddc0262fc..7cc2902154e9 100644 if (k >= 0) return 0; -@@ -110,7 +110,7 @@ static int write_state(FILE **f, char **states) { +@@ -114,7 +114,7 @@ static int write_state(FILE **f, char ** STRV_FOREACH(state, states) { int k; @@ -370,11 +370,11 @@ index fbfddc0262fc..7cc2902154e9 100644 if (k >= 0) return 0; log_debug_errno(k, "Failed to write '%s' to /sys/power/state: %m", *state); -diff --git a/src/udev/udevadm-trigger.c b/src/udev/udevadm-trigger.c -index 60c68b5029cf..fdca03d3d42c 100644 ---- a/src/udev/udevadm-trigger.c -+++ b/src/udev/udevadm-trigger.c -@@ -43,7 +43,7 @@ static int exec_list(sd_device_enumerator *e, const char *action, Set *settle_se +Index: systemd-stable/src/udev/udevadm-trigger.c +=================================================================== +--- systemd-stable.orig/src/udev/udevadm-trigger.c ++++ systemd-stable/src/udev/udevadm-trigger.c +@@ -43,7 +43,7 @@ static int exec_list(sd_device_enumerato if (!filename) return log_oom(); @@ -383,11 +383,11 @@ index 60c68b5029cf..fdca03d3d42c 100644 if (r < 0) { bool ignore = IN_SET(r, -ENOENT, -EACCES, -ENODEV, -EROFS); -diff --git a/src/udev/udevd.c b/src/udev/udevd.c -index ca65474f2763..38780681431a 100644 ---- a/src/udev/udevd.c -+++ b/src/udev/udevd.c -@@ -1089,7 +1089,7 @@ static int synthesize_change_one(sd_device *dev, const char *syspath) { +Index: systemd-stable/src/udev/udevd.c +=================================================================== +--- systemd-stable.orig/src/udev/udevd.c ++++ systemd-stable/src/udev/udevd.c +@@ -1153,7 +1153,7 @@ static int synthesize_change_one(sd_devi filename = strjoina(syspath, "/uevent"); log_device_debug(dev, "device is closed, synthesising 'change' on %s", syspath); @@ -396,11 +396,11 @@ index ca65474f2763..38780681431a 100644 if (r < 0) return log_device_debug_errno(dev, r, "Failed to write 'change' to %s: %m", filename); return 0; -diff --git a/src/vconsole/vconsole-setup.c b/src/vconsole/vconsole-setup.c -index 9d706085fb47..30dcfa86f4d0 100644 ---- a/src/vconsole/vconsole-setup.c -+++ b/src/vconsole/vconsole-setup.c -@@ -116,7 +116,7 @@ static int toggle_utf8_vc(const char *name, int fd, bool utf8) { +Index: systemd-stable/src/vconsole/vconsole-setup.c +=================================================================== +--- systemd-stable.orig/src/vconsole/vconsole-setup.c ++++ systemd-stable/src/vconsole/vconsole-setup.c +@@ -116,7 +116,7 @@ static int toggle_utf8_vc(const char *na static int toggle_utf8_sysfs(bool utf8) { int r; diff --git a/poky/meta/recipes-core/systemd/systemd/0002-don-t-use-glibc-specific-qsort_r.patch b/poky/meta/recipes-core/systemd/systemd/0002-don-t-use-glibc-specific-qsort_r.patch index a5e41bfab..30fe9a14d 100644 --- a/poky/meta/recipes-core/systemd/systemd/0002-don-t-use-glibc-specific-qsort_r.patch +++ b/poky/meta/recipes-core/systemd/systemd/0002-don-t-use-glibc-specific-qsort_r.patch @@ -17,13 +17,13 @@ Signed-off-by: Andrej Valek src/shared/format-table.c | 36 ++++++++++++++++++++---------- 3 files changed, 38 insertions(+), 31 deletions(-) -diff --git a/src/basic/sort-util.h b/src/basic/sort-util.h -index e029f8646eb0..27d68b341cf3 100644 ---- a/src/basic/sort-util.h -+++ b/src/basic/sort-util.h -@@ -54,17 +54,3 @@ static inline void qsort_safe(void *base, size_t nmemb, size_t size, __compar_fn +Index: systemd-stable/src/basic/sort-util.h +=================================================================== +--- systemd-stable.orig/src/basic/sort-util.h ++++ systemd-stable/src/basic/sort-util.h +@@ -54,17 +54,3 @@ static inline void _qsort_safe(void *bas int (*_func_)(const typeof(p[0])*, const typeof(p[0])*) = func; \ - qsort_safe((p), (n), sizeof((p)[0]), (__compar_fn_t) _func_); \ + _qsort_safe((p), (n), sizeof((p)[0]), (__compar_fn_t) _func_); \ }) - -static inline void qsort_r_safe(void *base, size_t nmemb, size_t size, __compar_d_fn_t compar, void *userdata) { @@ -39,11 +39,11 @@ index e029f8646eb0..27d68b341cf3 100644 - int (*_func_)(const typeof(p[0])*, const typeof(p[0])*, typeof(userdata)) = func; \ - qsort_r_safe((p), (n), sizeof((p)[0]), (__compar_d_fn_t) _func_, userdata); \ - }) -diff --git a/src/libsystemd/sd-hwdb/hwdb-util.c b/src/libsystemd/sd-hwdb/hwdb-util.c -index d790e8fd0b19..42e0fd7c9b3c 100644 ---- a/src/libsystemd/sd-hwdb/hwdb-util.c -+++ b/src/libsystemd/sd-hwdb/hwdb-util.c -@@ -128,9 +128,13 @@ static void trie_free(struct trie *trie) { +Index: systemd-stable/src/libsystemd/sd-hwdb/hwdb-util.c +=================================================================== +--- systemd-stable.orig/src/libsystemd/sd-hwdb/hwdb-util.c ++++ systemd-stable/src/libsystemd/sd-hwdb/hwdb-util.c +@@ -128,9 +128,13 @@ static void trie_free(struct trie *trie) DEFINE_TRIVIAL_CLEANUP_FUNC(struct trie*, trie_free); @@ -60,7 +60,7 @@ index d790e8fd0b19..42e0fd7c9b3c 100644 } static int trie_node_add_value(struct trie *trie, struct trie_node *node, -@@ -158,7 +162,10 @@ static int trie_node_add_value(struct trie *trie, struct trie_node *node, +@@ -158,7 +162,10 @@ static int trie_node_add_value(struct tr .value_off = v, }; @@ -72,7 +72,7 @@ index d790e8fd0b19..42e0fd7c9b3c 100644 if (val) { /* At this point we have 2 identical properties on the same match-string. * Since we process files in order, we just replace the previous value. */ -@@ -184,7 +191,9 @@ static int trie_node_add_value(struct trie *trie, struct trie_node *node, +@@ -184,7 +191,9 @@ static int trie_node_add_value(struct tr .line_number = line_number, }; node->values_count++; @@ -83,11 +83,11 @@ index d790e8fd0b19..42e0fd7c9b3c 100644 return 0; } -diff --git a/src/shared/format-table.c b/src/shared/format-table.c -index 425013046491..33c1c5a12d43 100644 ---- a/src/shared/format-table.c -+++ b/src/shared/format-table.c -@@ -1164,31 +1164,33 @@ static int cell_data_compare(TableData *a, size_t index_a, TableData *b, size_t +Index: systemd-stable/src/shared/format-table.c +=================================================================== +--- systemd-stable.orig/src/shared/format-table.c ++++ systemd-stable/src/shared/format-table.c +@@ -1246,31 +1246,33 @@ static int cell_data_compare(TableData * return CMP(index_a, index_b); } @@ -131,7 +131,7 @@ index 425013046491..33c1c5a12d43 100644 } /* Order identical lines by the order there were originally added in */ -@@ -1690,7 +1692,12 @@ int table_print(Table *t, FILE *f) { +@@ -1798,7 +1800,12 @@ int table_print(Table *t, FILE *f) { for (i = 0; i < n_rows; i++) sorted[i] = i * t->n_columns; @@ -145,7 +145,7 @@ index 425013046491..33c1c5a12d43 100644 } if (t->display_map) -@@ -2236,7 +2243,12 @@ int table_to_json(Table *t, JsonVariant **ret) { +@@ -2375,7 +2382,12 @@ int table_to_json(Table *t, JsonVariant for (i = 0; i < n_rows; i++) sorted[i] = i * t->n_columns; diff --git a/poky/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch b/poky/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch index 0dea93327..e65c54361 100644 --- a/poky/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch +++ b/poky/meta/recipes-core/systemd/systemd/0004-add-fallback-parse_printf_format-implementation.patch @@ -22,11 +22,11 @@ Signed-off-by: Scott Murray create mode 100644 src/basic/parse-printf-format.c create mode 100644 src/basic/parse-printf-format.h -diff --git a/meson.build b/meson.build -index fc216d22da24..a25996803d64 100644 ---- a/meson.build -+++ b/meson.build -@@ -640,6 +640,7 @@ endif +Index: systemd-stable/meson.build +=================================================================== +--- systemd-stable.orig/meson.build ++++ systemd-stable/meson.build +@@ -638,6 +638,7 @@ endif foreach header : ['crypt.h', 'linux/memfd.h', 'linux/vm_sockets.h', @@ -34,11 +34,11 @@ index fc216d22da24..a25996803d64 100644 'sys/auxv.h', 'valgrind/memcheck.h', 'valgrind/valgrind.h', -diff --git a/src/basic/meson.build b/src/basic/meson.build -index ccb22e159505..25c77ea6bc0e 100644 ---- a/src/basic/meson.build -+++ b/src/basic/meson.build -@@ -313,6 +313,11 @@ foreach item : [['af', af_list_txt, 'af', ''], +Index: systemd-stable/src/basic/meson.build +=================================================================== +--- systemd-stable.orig/src/basic/meson.build ++++ systemd-stable/src/basic/meson.build +@@ -317,6 +317,11 @@ foreach item : [['af', af_list_txt, endforeach basic_sources += generated_gperf_headers @@ -50,11 +50,10 @@ index ccb22e159505..25c77ea6bc0e 100644 basic_gcrypt_sources = files( 'gcrypt-util.c', 'gcrypt-util.h') -diff --git a/src/basic/parse-printf-format.c b/src/basic/parse-printf-format.c -new file mode 100644 -index 000000000000..49437e544540 +Index: systemd-stable/src/basic/parse-printf-format.c +=================================================================== --- /dev/null -+++ b/src/basic/parse-printf-format.c ++++ systemd-stable/src/basic/parse-printf-format.c @@ -0,0 +1,273 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + @@ -329,11 +328,10 @@ index 000000000000..49437e544540 + + return last; +} -diff --git a/src/basic/parse-printf-format.h b/src/basic/parse-printf-format.h -new file mode 100644 -index 000000000000..47be7522d7fa +Index: systemd-stable/src/basic/parse-printf-format.h +=================================================================== --- /dev/null -+++ b/src/basic/parse-printf-format.h ++++ systemd-stable/src/basic/parse-printf-format.h @@ -0,0 +1,57 @@ +/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/ + @@ -392,10 +390,10 @@ index 000000000000..47be7522d7fa +size_t parse_printf_format(const char *fmt, size_t n, int *types); + +#endif /* HAVE_PRINTF_H */ -diff --git a/src/basic/stdio-util.h b/src/basic/stdio-util.h -index c3b9448d4f4f..2937aa13b178 100644 ---- a/src/basic/stdio-util.h -+++ b/src/basic/stdio-util.h +Index: systemd-stable/src/basic/stdio-util.h +=================================================================== +--- systemd-stable.orig/src/basic/stdio-util.h ++++ systemd-stable/src/basic/stdio-util.h @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ #pragma once @@ -411,10 +409,10 @@ index c3b9448d4f4f..2937aa13b178 100644 #define snprintf_ok(buf, len, fmt, ...) \ ((size_t) snprintf(buf, len, fmt, __VA_ARGS__) < (len)) -diff --git a/src/journal/journal-send.c b/src/journal/journal-send.c -index 912ecef73cce..43ed756bda53 100644 ---- a/src/journal/journal-send.c -+++ b/src/journal/journal-send.c +Index: systemd-stable/src/journal/journal-send.c +=================================================================== +--- systemd-stable.orig/src/journal/journal-send.c ++++ systemd-stable/src/journal/journal-send.c @@ -2,7 +2,6 @@ #include @@ -423,7 +421,7 @@ index 912ecef73cce..43ed756bda53 100644 #include #include #include -@@ -20,6 +19,7 @@ +@@ -21,6 +20,7 @@ #include "stdio-util.h" #include "string-util.h" #include "tmpfile-util.h" diff --git a/poky/meta/recipes-core/systemd/systemd/0005-src-basic-missing.h-check-for-missing-strndupa.patch b/poky/meta/recipes-core/systemd/systemd/0005-src-basic-missing.h-check-for-missing-strndupa.patch index ca4f0d5d6..8e7a2fb6e 100644 --- a/poky/meta/recipes-core/systemd/systemd/0005-src-basic-missing.h-check-for-missing-strndupa.patch +++ b/poky/meta/recipes-core/systemd/systemd/0005-src-basic-missing.h-check-for-missing-strndupa.patch @@ -1,4 +1,4 @@ -From 85dcaad8f38521ec3dc580794072b601900eed84 Mon Sep 17 00:00:00 2001 +From 7e771de87cf728a8678f1f28f391bba3589e2496 Mon Sep 17 00:00:00 2001 From: Chen Qi Date: Mon, 25 Feb 2019 14:18:21 +0800 Subject: [PATCH] src/basic/missing.h: check for missing strndupa @@ -25,6 +25,7 @@ Signed-off-by: Alex Kiernan src/basic/missing_stdlib.h | 12 ++++++++++++ src/basic/mkdir.c | 1 + src/basic/parse-util.c | 1 + + src/basic/path-lookup.c | 1 + src/basic/proc-cmdline.c | 1 + src/basic/procfs-util.c | 1 + src/basic/selinux-util.c | 1 + @@ -52,6 +53,7 @@ Signed-off-by: Alex Kiernan src/nss-mymachines/nss-mymachines.c | 1 + src/portable/portable.c | 1 + src/resolve/resolvectl.c | 1 + + src/shared/bus-get-properties.c | 1 + src/shared/bus-unit-procs.c | 1 + src/shared/bus-unit-util.c | 1 + src/shared/bus-util.c | 1 + @@ -59,20 +61,19 @@ Signed-off-by: Alex Kiernan src/shared/journal-importer.c | 1 + src/shared/logs-show.c | 1 + src/shared/pager.c | 1 + - src/shared/path-lookup.c | 1 + src/shared/uid-range.c | 1 + src/socket-proxy/socket-proxyd.c | 1 + src/test/test-hexdecoct.c | 1 + src/udev/udev-builtin-path_id.c | 1 + src/udev/udev-event.c | 1 + src/udev/udev-rules.c | 1 + - 49 files changed, 60 insertions(+) + 50 files changed, 61 insertions(+) diff --git a/meson.build b/meson.build -index a25996803d64..72b305b5ab58 100644 +index 9187439bdd..bea9935a91 100644 --- a/meson.build +++ b/meson.build -@@ -529,6 +529,7 @@ foreach ident : [ +@@ -527,6 +527,7 @@ foreach ident : [ #include #include #include '''], @@ -81,7 +82,7 @@ index a25996803d64..72b305b5ab58 100644 have = cc.has_function(ident[0], prefix : ident[1], args : '-D_GNU_SOURCE') diff --git a/src/backlight/backlight.c b/src/backlight/backlight.c -index 048441429025..01d74ea0ed4e 100644 +index 3a644363e1..73946a829b 100644 --- a/src/backlight/backlight.c +++ b/src/backlight/backlight.c @@ -17,6 +17,7 @@ @@ -93,19 +94,19 @@ index 048441429025..01d74ea0ed4e 100644 static int find_pci_or_platform_parent(sd_device *device, sd_device **ret) { const char *subsystem, *sysname, *value; diff --git a/src/basic/cgroup-util.c b/src/basic/cgroup-util.c -index 54fc6ecf8b20..a5141f4cbedd 100644 +index e94fcfad02..856a7068b0 100644 --- a/src/basic/cgroup-util.c +++ b/src/basic/cgroup-util.c -@@ -37,6 +37,7 @@ - #include "strv.h" +@@ -38,6 +38,7 @@ #include "unit-name.h" #include "user-util.h" + #include "xattr-util.h" +#include "missing_stdlib.h" static int cg_enumerate_items(const char *controller, const char *path, FILE **_f, const char *item) { _cleanup_free_ char *fs = NULL; diff --git a/src/basic/env-util.c b/src/basic/env-util.c -index b8dc98915f81..5049b37594bc 100644 +index b8dc98915f..5049b37594 100644 --- a/src/basic/env-util.c +++ b/src/basic/env-util.c @@ -15,6 +15,7 @@ @@ -117,10 +118,10 @@ index b8dc98915f81..5049b37594bc 100644 #define VALID_CHARS_ENV_NAME \ DIGITS LETTERS \ diff --git a/src/basic/log.c b/src/basic/log.c -index 17557e1844b2..6cec4d324aab 100644 +index c6fe203808..b7ef932d28 100644 --- a/src/basic/log.c +++ b/src/basic/log.c -@@ -34,6 +34,7 @@ +@@ -35,6 +35,7 @@ #include "terminal-util.h" #include "time-util.h" #include "utf8.h" @@ -129,7 +130,7 @@ index 17557e1844b2..6cec4d324aab 100644 #define SNDBUF_SIZE (8*1024*1024) diff --git a/src/basic/missing_stdlib.h b/src/basic/missing_stdlib.h -index 188a8d44066a..1e16ec287aad 100644 +index 188a8d4406..1e16ec287a 100644 --- a/src/basic/missing_stdlib.h +++ b/src/basic/missing_stdlib.h @@ -11,3 +11,15 @@ @@ -149,7 +150,7 @@ index 188a8d44066a..1e16ec287aad 100644 + }) +#endif diff --git a/src/basic/mkdir.c b/src/basic/mkdir.c -index fa682d4c438e..37902551490a 100644 +index 6ebc2b95fd..88f4359bab 100644 --- a/src/basic/mkdir.c +++ b/src/basic/mkdir.c @@ -13,6 +13,7 @@ @@ -158,22 +159,34 @@ index fa682d4c438e..37902551490a 100644 #include "user-util.h" +#include "missing_stdlib.h" - int mkdir_safe_internal(const char *path, mode_t mode, uid_t uid, gid_t gid, MkdirFlags flags, mkdir_func_t _mkdir) { - struct stat st; + int mkdir_safe_internal( + const char *path, diff --git a/src/basic/parse-util.c b/src/basic/parse-util.c -index e0094b0f370a..00da6518124b 100644 +index 44f0438cf4..54b4133343 100644 --- a/src/basic/parse-util.c +++ b/src/basic/parse-util.c -@@ -18,6 +18,7 @@ - #include "process-util.h" +@@ -19,6 +19,7 @@ #include "stat-util.h" #include "string-util.h" + #include "strv.h" +#include "missing_stdlib.h" int parse_boolean(const char *v) { if (!v) +diff --git a/src/basic/path-lookup.c b/src/basic/path-lookup.c +index 52968dee34..2f4f7e3dcd 100644 +--- a/src/basic/path-lookup.c ++++ b/src/basic/path-lookup.c +@@ -15,6 +15,7 @@ + #include "strv.h" + #include "tmpfile-util.h" + #include "user-util.h" ++#include "missing_stdlib.h" + + int xdg_user_runtime_dir(char **ret, const char *suffix) { + const char *e; diff --git a/src/basic/proc-cmdline.c b/src/basic/proc-cmdline.c -index 1af58717c686..c1020f4611d4 100644 +index ba47ca5812..8baf728fde 100644 --- a/src/basic/proc-cmdline.c +++ b/src/basic/proc-cmdline.c @@ -15,6 +15,7 @@ @@ -185,7 +198,7 @@ index 1af58717c686..c1020f4611d4 100644 int proc_cmdline(char **ret) { const char *e; diff --git a/src/basic/procfs-util.c b/src/basic/procfs-util.c -index 7aaf95bfced2..da7e836f143e 100644 +index 7aaf95bfce..da7e836f14 100644 --- a/src/basic/procfs-util.c +++ b/src/basic/procfs-util.c @@ -11,6 +11,7 @@ @@ -197,10 +210,10 @@ index 7aaf95bfced2..da7e836f143e 100644 int procfs_tasks_get_limit(uint64_t *ret) { _cleanup_free_ char *value = NULL; diff --git a/src/basic/selinux-util.c b/src/basic/selinux-util.c -index 1095cb426cce..806ef4bd97a9 100644 +index c94ee26bd9..14e35b4653 100644 --- a/src/basic/selinux-util.c +++ b/src/basic/selinux-util.c -@@ -26,6 +26,7 @@ +@@ -27,6 +27,7 @@ #include "selinux-util.h" #include "stdio-util.h" #include "time-util.h" @@ -209,7 +222,7 @@ index 1095cb426cce..806ef4bd97a9 100644 #if HAVE_SELINUX DEFINE_TRIVIAL_CLEANUP_FUNC(context_t, context_free); diff --git a/src/basic/time-util.c b/src/basic/time-util.c -index 105584e2e72f..eb0bed47dac3 100644 +index 15cc1b8851..02bb3f01f9 100644 --- a/src/basic/time-util.c +++ b/src/basic/time-util.c @@ -26,6 +26,7 @@ @@ -221,7 +234,7 @@ index 105584e2e72f..eb0bed47dac3 100644 static clockid_t map_clock_id(clockid_t c) { diff --git a/src/boot/bless-boot.c b/src/boot/bless-boot.c -index b96e1f927fff..cba979baca3e 100644 +index b96e1f927f..cba979baca 100644 --- a/src/boot/bless-boot.c +++ b/src/boot/bless-boot.c @@ -18,6 +18,7 @@ @@ -233,7 +246,7 @@ index b96e1f927fff..cba979baca3e 100644 static char **arg_path = NULL; diff --git a/src/core/dbus-cgroup.c b/src/core/dbus-cgroup.c -index 27dc9e43c3e2..b1a83023600b 100644 +index b7d2e32639..fdbc1df95e 100644 --- a/src/core/dbus-cgroup.c +++ b/src/core/dbus-cgroup.c @@ -15,6 +15,7 @@ @@ -245,7 +258,7 @@ index 27dc9e43c3e2..b1a83023600b 100644 BUS_DEFINE_PROPERTY_GET(bus_property_get_tasks_max, "t", TasksMax, tasks_max_resolve); diff --git a/src/core/dbus-execute.c b/src/core/dbus-execute.c -index d8ba3e5d9241..729e13fda64c 100644 +index 50f7ada8ce..5c760ee487 100644 --- a/src/core/dbus-execute.c +++ b/src/core/dbus-execute.c @@ -41,6 +41,7 @@ @@ -257,7 +270,7 @@ index d8ba3e5d9241..729e13fda64c 100644 BUS_DEFINE_PROPERTY_GET_ENUM(bus_property_get_exec_output, exec_output, ExecOutput); static BUS_DEFINE_PROPERTY_GET_ENUM(property_get_exec_input, exec_input, ExecInput); diff --git a/src/core/dbus-util.c b/src/core/dbus-util.c -index 7862beaacb6d..3b1ea53a5f0d 100644 +index 951450e53d..50d134e9a1 100644 --- a/src/core/dbus-util.c +++ b/src/core/dbus-util.c @@ -7,6 +7,7 @@ @@ -269,10 +282,10 @@ index 7862beaacb6d..3b1ea53a5f0d 100644 int bus_property_get_triggered_unit( sd_bus *bus, diff --git a/src/core/execute.c b/src/core/execute.c -index 89dbf6fbd2c1..9762dc57443c 100644 +index 2a4840a3a9..d3f1e0e0f8 100644 --- a/src/core/execute.c +++ b/src/core/execute.c -@@ -88,6 +88,7 @@ +@@ -89,6 +89,7 @@ #include "unit.h" #include "user-util.h" #include "utmp-wtmp.h" @@ -281,7 +294,7 @@ index 89dbf6fbd2c1..9762dc57443c 100644 #define IDLE_TIMEOUT_USEC (5*USEC_PER_SEC) #define IDLE_TIMEOUT2_USEC (1*USEC_PER_SEC) diff --git a/src/core/kmod-setup.c b/src/core/kmod-setup.c -index 09ccd613e32c..f4e64fa283e9 100644 +index 09ccd613e3..f4e64fa283 100644 --- a/src/core/kmod-setup.c +++ b/src/core/kmod-setup.c @@ -11,6 +11,7 @@ @@ -293,7 +306,7 @@ index 09ccd613e32c..f4e64fa283e9 100644 #if HAVE_KMOD #include "module-util.h" diff --git a/src/core/service.c b/src/core/service.c -index 17f27a4abce3..e5dcc532d0ce 100644 +index 00e61945ba..1ecab28354 100644 --- a/src/core/service.c +++ b/src/core/service.c @@ -41,6 +41,7 @@ @@ -305,7 +318,7 @@ index 17f27a4abce3..e5dcc532d0ce 100644 static const UnitActiveState state_translation_table[_SERVICE_STATE_MAX] = { [SERVICE_DEAD] = UNIT_INACTIVE, diff --git a/src/coredump/coredump-vacuum.c b/src/coredump/coredump-vacuum.c -index 35885dfb47c4..bb9f0660a6a0 100644 +index 35885dfb47..bb9f0660a6 100644 --- a/src/coredump/coredump-vacuum.c +++ b/src/coredump/coredump-vacuum.c @@ -16,6 +16,7 @@ @@ -317,7 +330,7 @@ index 35885dfb47c4..bb9f0660a6a0 100644 #define DEFAULT_MAX_USE_LOWER (uint64_t) (1ULL*1024ULL*1024ULL) /* 1 MiB */ #define DEFAULT_MAX_USE_UPPER (uint64_t) (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */ diff --git a/src/journal-remote/journal-remote-main.c b/src/journal-remote/journal-remote-main.c -index 88e42d3a984b..0f08376e5399 100644 +index 77dfdefd64..e21ecbeff8 100644 --- a/src/journal-remote/journal-remote-main.c +++ b/src/journal-remote/journal-remote-main.c @@ -22,6 +22,7 @@ @@ -329,7 +342,7 @@ index 88e42d3a984b..0f08376e5399 100644 #define PRIV_KEY_FILE CERTIFICATE_ROOT "/private/journal-remote.pem" #define CERT_FILE CERTIFICATE_ROOT "/certs/journal-remote.pem" diff --git a/src/journal/journalctl.c b/src/journal/journalctl.c -index e5feec83bce6..c3aec1e219d7 100644 +index 8d4897b942..15476b3c83 100644 --- a/src/journal/journalctl.c +++ b/src/journal/journalctl.c @@ -69,6 +69,7 @@ @@ -341,7 +354,7 @@ index e5feec83bce6..c3aec1e219d7 100644 #define DEFAULT_FSS_INTERVAL_USEC (15*USEC_PER_MINUTE) #define PROCESS_INOTIFY_INTERVAL 1024 /* Every 1,024 messages processed */ diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c -index 3fa98dfda237..e655d77e714a 100644 +index 6fb0abb419..2d94d9938e 100644 --- a/src/journal/sd-journal.c +++ b/src/journal/sd-journal.c @@ -40,6 +40,7 @@ @@ -353,7 +366,7 @@ index 3fa98dfda237..e655d77e714a 100644 #define JOURNAL_FILES_MAX 7168 diff --git a/src/libsystemd/sd-bus/bus-message.c b/src/libsystemd/sd-bus/bus-message.c -index 73127dfe0253..cc8635dea591 100644 +index 55e35cd902..0ed98f9224 100644 --- a/src/libsystemd/sd-bus/bus-message.c +++ b/src/libsystemd/sd-bus/bus-message.c @@ -21,6 +21,7 @@ @@ -365,7 +378,7 @@ index 73127dfe0253..cc8635dea591 100644 static int message_append_basic(sd_bus_message *m, char type, const void *p, const void **stored); diff --git a/src/libsystemd/sd-bus/bus-objects.c b/src/libsystemd/sd-bus/bus-objects.c -index 6d140348ec4c..9126b8801bc5 100644 +index 6abac8822c..c74c9cd7fa 100644 --- a/src/libsystemd/sd-bus/bus-objects.c +++ b/src/libsystemd/sd-bus/bus-objects.c @@ -13,6 +13,7 @@ @@ -377,7 +390,7 @@ index 6d140348ec4c..9126b8801bc5 100644 static int node_vtable_get_userdata( sd_bus *bus, diff --git a/src/libsystemd/sd-bus/bus-socket.c b/src/libsystemd/sd-bus/bus-socket.c -index 18d30d010a20..be2ab703f8ed 100644 +index fc7e8e844a..7af4dd2712 100644 --- a/src/libsystemd/sd-bus/bus-socket.c +++ b/src/libsystemd/sd-bus/bus-socket.c @@ -28,6 +28,7 @@ @@ -389,7 +402,7 @@ index 18d30d010a20..be2ab703f8ed 100644 #define SNDBUF_SIZE (8*1024*1024) diff --git a/src/libsystemd/sd-bus/sd-bus.c b/src/libsystemd/sd-bus/sd-bus.c -index 7ad03680f48d..b9d2181e4910 100644 +index 9de5e454a6..fe86c93c63 100644 --- a/src/libsystemd/sd-bus/sd-bus.c +++ b/src/libsystemd/sd-bus/sd-bus.c @@ -41,6 +41,7 @@ @@ -401,7 +414,7 @@ index 7ad03680f48d..b9d2181e4910 100644 #define log_debug_bus_message(m) \ do { \ diff --git a/src/libsystemd/sd-bus/test-bus-benchmark.c b/src/libsystemd/sd-bus/test-bus-benchmark.c -index 8de0a859ee94..58044b6ba908 100644 +index 8de0a859ee..58044b6ba9 100644 --- a/src/libsystemd/sd-bus/test-bus-benchmark.c +++ b/src/libsystemd/sd-bus/test-bus-benchmark.c @@ -14,6 +14,7 @@ @@ -413,7 +426,7 @@ index 8de0a859ee94..58044b6ba908 100644 #define MAX_SIZE (2*1024*1024) diff --git a/src/locale/keymap-util.c b/src/locale/keymap-util.c -index 30669a9359e5..6544b3722099 100644 +index 233d081300..40a32b9700 100644 --- a/src/locale/keymap-util.c +++ b/src/locale/keymap-util.c @@ -21,6 +21,7 @@ @@ -425,7 +438,7 @@ index 30669a9359e5..6544b3722099 100644 static bool startswith_comma(const char *s, const char *prefix) { s = startswith(s, prefix); diff --git a/src/login/pam_systemd.c b/src/login/pam_systemd.c -index 84bea21ab7be..49720c7f742e 100644 +index 16f4289585..6c5e438b36 100644 --- a/src/login/pam_systemd.c +++ b/src/login/pam_systemd.c @@ -31,6 +31,7 @@ @@ -437,7 +450,7 @@ index 84bea21ab7be..49720c7f742e 100644 #include "parse-util.h" #include "path-util.h" diff --git a/src/network/generator/network-generator.c b/src/network/generator/network-generator.c -index bed1e42697c4..e4847c2beea2 100644 +index bed1e42697..e4847c2bee 100644 --- a/src/network/generator/network-generator.c +++ b/src/network/generator/network-generator.c @@ -13,6 +13,7 @@ @@ -449,7 +462,7 @@ index bed1e42697c4..e4847c2beea2 100644 /* # .network diff --git a/src/nspawn/nspawn-settings.c b/src/nspawn/nspawn-settings.c -index 5fb5b49bbcc3..785ccc2da307 100644 +index d341fa25aa..91646bc2c2 100644 --- a/src/nspawn/nspawn-settings.c +++ b/src/nspawn/nspawn-settings.c @@ -16,6 +16,7 @@ @@ -461,19 +474,19 @@ index 5fb5b49bbcc3..785ccc2da307 100644 Settings *settings_new(void) { Settings *s; diff --git a/src/nss-mymachines/nss-mymachines.c b/src/nss-mymachines/nss-mymachines.c -index 364356da5622..47d4ea44e40f 100644 +index 5db0dcef76..681f8677e4 100644 --- a/src/nss-mymachines/nss-mymachines.c +++ b/src/nss-mymachines/nss-mymachines.c @@ -19,6 +19,7 @@ + #include "nss-util.h" #include "signal-util.h" #include "string-util.h" - #include "user-util.h" +#include "missing_stdlib.h" NSS_GETHOSTBYNAME_PROTOTYPES(mymachines); NSS_GETPW_PROTOTYPES(mymachines); diff --git a/src/portable/portable.c b/src/portable/portable.c -index e18826ab2685..d9f4b81d8937 100644 +index 3a1367ec2b..f29336cb1e 100644 --- a/src/portable/portable.c +++ b/src/portable/portable.c @@ -31,6 +31,7 @@ @@ -485,10 +498,10 @@ index e18826ab2685..d9f4b81d8937 100644 static const char profile_dirs[] = CONF_PATHS_NULSTR("systemd/portable/profile"); diff --git a/src/resolve/resolvectl.c b/src/resolve/resolvectl.c -index f20e8c44b8bc..9f6c4e8f49a7 100644 +index 3072b984e5..c46ae374bf 100644 --- a/src/resolve/resolvectl.c +++ b/src/resolve/resolvectl.c -@@ -33,6 +33,7 @@ +@@ -36,6 +36,7 @@ #include "strv.h" #include "terminal-util.h" #include "verbs.h" @@ -496,8 +509,20 @@ index f20e8c44b8bc..9f6c4e8f49a7 100644 static int arg_family = AF_UNSPEC; static int arg_ifindex = 0; +diff --git a/src/shared/bus-get-properties.c b/src/shared/bus-get-properties.c +index 8ad4694046..da5082c02a 100644 +--- a/src/shared/bus-get-properties.c ++++ b/src/shared/bus-get-properties.c +@@ -3,6 +3,7 @@ + #include "bus-get-properties.h" + #include "rlimit-util.h" + #include "string-util.h" ++#include "missing_stdlib.h" + + int bus_property_get_bool( + sd_bus *bus, diff --git a/src/shared/bus-unit-procs.c b/src/shared/bus-unit-procs.c -index b21fe393265f..af2640005c1d 100644 +index b21fe39326..af2640005c 100644 --- a/src/shared/bus-unit-procs.c +++ b/src/shared/bus-unit-procs.c @@ -10,6 +10,7 @@ @@ -509,10 +534,10 @@ index b21fe393265f..af2640005c1d 100644 struct CGroupInfo { char *cgroup_path; diff --git a/src/shared/bus-unit-util.c b/src/shared/bus-unit-util.c -index 28d85944a8a7..4743a84a417e 100644 +index f2652ed9a5..eb019fc89f 100644 --- a/src/shared/bus-unit-util.c +++ b/src/shared/bus-unit-util.c -@@ -34,6 +34,7 @@ +@@ -39,6 +39,7 @@ #include "unit-def.h" #include "user-util.h" #include "utf8.h" @@ -521,19 +546,19 @@ index 28d85944a8a7..4743a84a417e 100644 int bus_parse_unit_info(sd_bus_message *message, UnitInfo *u) { assert(message); diff --git a/src/shared/bus-util.c b/src/shared/bus-util.c -index 8e6a6e2ce2de..0cbf4b1997df 100644 +index 77c1c62182..5cd31f3c15 100644 --- a/src/shared/bus-util.c +++ b/src/shared/bus-util.c -@@ -30,6 +30,7 @@ +@@ -22,6 +22,7 @@ + #include "socket-util.h" #include "stdio-util.h" - #include "strv.h" - #include "user-util.h" + /* #include "string-util.h" */ +#include "missing_stdlib.h" static int name_owner_change_callback(sd_bus_message *m, void *userdata, sd_bus_error *ret_error) { sd_event *e = userdata; diff --git a/src/shared/dns-domain.c b/src/shared/dns-domain.c -index b812665315f6..8e68f7f8fc6c 100644 +index b812665315..8e68f7f8fc 100644 --- a/src/shared/dns-domain.c +++ b/src/shared/dns-domain.c @@ -23,6 +23,7 @@ @@ -545,7 +570,7 @@ index b812665315f6..8e68f7f8fc6c 100644 int dns_label_unescape(const char **name, char *dest, size_t sz, DNSLabelFlags flags) { const char *n; diff --git a/src/shared/journal-importer.c b/src/shared/journal-importer.c -index 7c4fc7021dec..3fbaf5a63969 100644 +index 7c4fc7021d..3fbaf5a639 100644 --- a/src/shared/journal-importer.c +++ b/src/shared/journal-importer.c @@ -14,6 +14,7 @@ @@ -557,19 +582,19 @@ index 7c4fc7021dec..3fbaf5a63969 100644 enum { IMPORTER_STATE_LINE = 0, /* waiting to read, or reading line */ diff --git a/src/shared/logs-show.c b/src/shared/logs-show.c -index 2bfd0b60c26b..6a1bb3a0760f 100644 +index 899e894ab7..628854ac9c 100644 --- a/src/shared/logs-show.c +++ b/src/shared/logs-show.c -@@ -39,6 +39,7 @@ - #include "time-util.h" +@@ -41,6 +41,7 @@ #include "utf8.h" #include "util.h" + #include "web-util.h" +#include "missing_stdlib.h" /* up to three lines (each up to 100 characters) or 300 characters, whichever is less */ #define PRINT_LINE_THRESHOLD 3 diff --git a/src/shared/pager.c b/src/shared/pager.c -index 1fe9db179176..67954b5cab93 100644 +index e03be6d23b..50e3d1f75c 100644 --- a/src/shared/pager.c +++ b/src/shared/pager.c @@ -23,6 +23,7 @@ @@ -580,20 +605,8 @@ index 1fe9db179176..67954b5cab93 100644 static pid_t pager_pid = 0; -diff --git a/src/shared/path-lookup.c b/src/shared/path-lookup.c -index 5b1620974536..0e7cd1c2af12 100644 ---- a/src/shared/path-lookup.c -+++ b/src/shared/path-lookup.c -@@ -19,6 +19,7 @@ - #include "tmpfile-util.h" - #include "user-util.h" - #include "util.h" -+#include "missing_stdlib.h" - - int xdg_user_runtime_dir(char **ret, const char *suffix) { - const char *e; diff --git a/src/shared/uid-range.c b/src/shared/uid-range.c -index 7cb7d8a477e9..8e7d7f9e7ca6 100644 +index 7cb7d8a477..8e7d7f9e7c 100644 --- a/src/shared/uid-range.c +++ b/src/shared/uid-range.c @@ -9,6 +9,7 @@ @@ -605,7 +618,7 @@ index 7cb7d8a477e9..8e7d7f9e7ca6 100644 static bool uid_range_intersect(UidRange *range, uid_t start, uid_t nr) { assert(range); diff --git a/src/socket-proxy/socket-proxyd.c b/src/socket-proxy/socket-proxyd.c -index 2ee6fc2f0a6a..4a9934f9c14d 100644 +index b461aead60..9941695ed9 100644 --- a/src/socket-proxy/socket-proxyd.c +++ b/src/socket-proxy/socket-proxyd.c @@ -26,6 +26,7 @@ @@ -617,7 +630,7 @@ index 2ee6fc2f0a6a..4a9934f9c14d 100644 #define BUFFER_SIZE (256 * 1024) diff --git a/src/test/test-hexdecoct.c b/src/test/test-hexdecoct.c -index 52217429b154..70708dedf318 100644 +index 52217429b1..70708dedf3 100644 --- a/src/test/test-hexdecoct.c +++ b/src/test/test-hexdecoct.c @@ -6,6 +6,7 @@ @@ -629,7 +642,7 @@ index 52217429b154..70708dedf318 100644 static void test_hexchar(void) { assert_se(hexchar(0xa) == 'a'); diff --git a/src/udev/udev-builtin-path_id.c b/src/udev/udev-builtin-path_id.c -index ca38f5608791..9d8cf4d2807b 100644 +index 6c020ac0ed..10723ec46c 100644 --- a/src/udev/udev-builtin-path_id.c +++ b/src/udev/udev-builtin-path_id.c @@ -22,6 +22,7 @@ @@ -641,7 +654,7 @@ index ca38f5608791..9d8cf4d2807b 100644 _printf_(2,3) static void path_prepend(char **path, const char *fmt, ...) { diff --git a/src/udev/udev-event.c b/src/udev/udev-event.c -index eb51139e519c..977cc16e9d7c 100644 +index e1c2baf7f2..62d4086802 100644 --- a/src/udev/udev-event.c +++ b/src/udev/udev-event.c @@ -34,6 +34,7 @@ @@ -653,7 +666,7 @@ index eb51139e519c..977cc16e9d7c 100644 typedef struct Spawn { sd_device *device; diff --git a/src/udev/udev-rules.c b/src/udev/udev-rules.c -index b9b350d1ef7a..2c114cc77572 100644 +index c36f032f66..36970813d8 100644 --- a/src/udev/udev-rules.c +++ b/src/udev/udev-rules.c @@ -30,6 +30,7 @@ diff --git a/poky/meta/recipes-core/systemd/systemd/0006-Include-netinet-if_ether.h.patch b/poky/meta/recipes-core/systemd/systemd/0006-Include-netinet-if_ether.h.patch index 9142d7b45..abc438e4b 100644 --- a/poky/meta/recipes-core/systemd/systemd/0006-Include-netinet-if_ether.h.patch +++ b/poky/meta/recipes-core/systemd/systemd/0006-Include-netinet-if_ether.h.patch @@ -52,10 +52,10 @@ Signed-off-by: Scott Murray src/udev/udev-builtin-net_setup_link.c | 1 + 19 files changed, 18 insertions(+), 4 deletions(-) -diff --git a/src/libsystemd-network/sd-dhcp6-client.c b/src/libsystemd-network/sd-dhcp6-client.c -index eac2e725cce7..1beae7ba91cc 100644 ---- a/src/libsystemd-network/sd-dhcp6-client.c -+++ b/src/libsystemd-network/sd-dhcp6-client.c +Index: systemd-stable/src/libsystemd-network/sd-dhcp6-client.c +=================================================================== +--- systemd-stable.orig/src/libsystemd-network/sd-dhcp6-client.c ++++ systemd-stable/src/libsystemd-network/sd-dhcp6-client.c @@ -5,7 +5,6 @@ #include @@ -64,10 +64,10 @@ index eac2e725cce7..1beae7ba91cc 100644 #include #include "sd-dhcp6-client.h" -diff --git a/src/libsystemd/sd-netlink/netlink-types.c b/src/libsystemd/sd-netlink/netlink-types.c -index e35127a4cd2e..4f6ad9ef5886 100644 ---- a/src/libsystemd/sd-netlink/netlink-types.c -+++ b/src/libsystemd/sd-netlink/netlink-types.c +Index: systemd-stable/src/libsystemd/sd-netlink/netlink-types.c +=================================================================== +--- systemd-stable.orig/src/libsystemd/sd-netlink/netlink-types.c ++++ systemd-stable/src/libsystemd/sd-netlink/netlink-types.c @@ -3,6 +3,7 @@ #include #include @@ -76,10 +76,10 @@ index e35127a4cd2e..4f6ad9ef5886 100644 #include #include #include -diff --git a/src/machine/machine-dbus.c b/src/machine/machine-dbus.c -index a2990452af17..5af350883c28 100644 ---- a/src/machine/machine-dbus.c -+++ b/src/machine/machine-dbus.c +Index: systemd-stable/src/machine/machine-dbus.c +=================================================================== +--- systemd-stable.orig/src/machine/machine-dbus.c ++++ systemd-stable/src/machine/machine-dbus.c @@ -3,6 +3,7 @@ #include #include @@ -88,21 +88,21 @@ index a2990452af17..5af350883c28 100644 /* When we include libgen.h because we need dirname() we immediately * undefine basename() since libgen.h defines it as a macro to the POSIX -diff --git a/src/network/netdev/bond.c b/src/network/netdev/bond.c -index 8df39e35843f..8d697894f970 100644 ---- a/src/network/netdev/bond.c -+++ b/src/network/netdev/bond.c +Index: systemd-stable/src/network/netdev/bond.c +=================================================================== +--- systemd-stable.orig/src/network/netdev/bond.c ++++ systemd-stable/src/network/netdev/bond.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ +#include #include "alloc-util.h" #include "bond.h" - #include "conf-parser.h" -diff --git a/src/network/netdev/bridge.c b/src/network/netdev/bridge.c -index 6b8f9944612e..7f81ec25c407 100644 ---- a/src/network/netdev/bridge.c -+++ b/src/network/netdev/bridge.c + #include "bond-util.h" +Index: systemd-stable/src/network/netdev/bridge.c +=================================================================== +--- systemd-stable.orig/src/network/netdev/bridge.c ++++ systemd-stable/src/network/netdev/bridge.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -110,10 +110,10 @@ index 6b8f9944612e..7f81ec25c407 100644 #include #include "bridge.h" -diff --git a/src/network/netdev/macsec.c b/src/network/netdev/macsec.c -index 7d1fec3afe6d..e948a335336d 100644 ---- a/src/network/netdev/macsec.c -+++ b/src/network/netdev/macsec.c +Index: systemd-stable/src/network/netdev/macsec.c +=================================================================== +--- systemd-stable.orig/src/network/netdev/macsec.c ++++ systemd-stable/src/network/netdev/macsec.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -121,10 +121,10 @@ index 7d1fec3afe6d..e948a335336d 100644 #include #include #include -diff --git a/src/network/netdev/netdev-gperf.gperf b/src/network/netdev/netdev-gperf.gperf -index 09a5f4822e03..873299b1f98a 100644 ---- a/src/network/netdev/netdev-gperf.gperf -+++ b/src/network/netdev/netdev-gperf.gperf +Index: systemd-stable/src/network/netdev/netdev-gperf.gperf +=================================================================== +--- systemd-stable.orig/src/network/netdev/netdev-gperf.gperf ++++ systemd-stable/src/network/netdev/netdev-gperf.gperf @@ -2,6 +2,7 @@ #if __GNUC__ >= 7 _Pragma("GCC diagnostic ignored \"-Wimplicit-fallthrough\"") @@ -133,21 +133,21 @@ index 09a5f4822e03..873299b1f98a 100644 #include #include "bond.h" #include "bridge.h" -diff --git a/src/network/netdev/netdev.c b/src/network/netdev/netdev.c -index f8121a48ed92..437f411c61e8 100644 ---- a/src/network/netdev/netdev.c -+++ b/src/network/netdev/netdev.c +Index: systemd-stable/src/network/netdev/netdev.c +=================================================================== +--- systemd-stable.orig/src/network/netdev/netdev.c ++++ systemd-stable/src/network/netdev/netdev.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ +#include #include #include - -diff --git a/src/network/networkd-brvlan.c b/src/network/networkd-brvlan.c -index 41f09287f2b7..b67ce4fc8844 100644 ---- a/src/network/networkd-brvlan.c -+++ b/src/network/networkd-brvlan.c + #include +Index: systemd-stable/src/network/networkd-brvlan.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-brvlan.c ++++ systemd-stable/src/network/networkd-brvlan.c @@ -4,6 +4,7 @@ ***/ @@ -156,11 +156,11 @@ index 41f09287f2b7..b67ce4fc8844 100644 #include #include -diff --git a/src/network/networkd-dhcp-common.c b/src/network/networkd-dhcp-common.c -index 8664d8cdc0d4..e9f91f74c1a1 100644 ---- a/src/network/networkd-dhcp-common.c -+++ b/src/network/networkd-dhcp-common.c -@@ -4,6 +4,7 @@ +Index: systemd-stable/src/network/networkd-dhcp-common.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-dhcp-common.c ++++ systemd-stable/src/network/networkd-dhcp-common.c +@@ -5,6 +5,7 @@ #include "escape.h" #include "in-addr-util.h" #include "networkd-dhcp-common.h" @@ -168,10 +168,10 @@ index 8664d8cdc0d4..e9f91f74c1a1 100644 #include "networkd-network.h" #include "parse-util.h" #include "string-table.h" -diff --git a/src/network/networkd-dhcp4.c b/src/network/networkd-dhcp4.c -index 13e3e32f40e8..5394399c9150 100644 ---- a/src/network/networkd-dhcp4.c -+++ b/src/network/networkd-dhcp4.c +Index: systemd-stable/src/network/networkd-dhcp4.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-dhcp4.c ++++ systemd-stable/src/network/networkd-dhcp4.c @@ -1,9 +1,9 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -181,12 +181,12 @@ index 13e3e32f40e8..5394399c9150 100644 #include -#include + #include "escape.h" #include "alloc-util.h" - #include "dhcp-client-internal.h" -diff --git a/src/network/networkd-dhcp6.c b/src/network/networkd-dhcp6.c -index 7304270c60b1..099064f64715 100644 ---- a/src/network/networkd-dhcp6.c -+++ b/src/network/networkd-dhcp6.c +Index: systemd-stable/src/network/networkd-dhcp6.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-dhcp6.c ++++ systemd-stable/src/network/networkd-dhcp6.c @@ -3,9 +3,9 @@ Copyright © 2014 Intel Corporation. All rights reserved. ***/ @@ -195,13 +195,13 @@ index 7304270c60b1..099064f64715 100644 #include #include -#include - #include "sd-radv.h" #include "sd-dhcp6-client.h" -diff --git a/src/network/networkd-link.c b/src/network/networkd-link.c -index 99d4b29c31ec..e8b467d6ac09 100644 ---- a/src/network/networkd-link.c -+++ b/src/network/networkd-link.c + +Index: systemd-stable/src/network/networkd-link.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-link.c ++++ systemd-stable/src/network/networkd-link.c @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -209,13 +209,13 @@ index 99d4b29c31ec..e8b467d6ac09 100644 #include #include -#include + #include #include - #include "alloc-util.h" -diff --git a/src/network/networkd-network.c b/src/network/networkd-network.c -index 2e716b291e97..56f18cea57fe 100644 ---- a/src/network/networkd-network.c -+++ b/src/network/networkd-network.c +Index: systemd-stable/src/network/networkd-network.c +=================================================================== +--- systemd-stable.orig/src/network/networkd-network.c ++++ systemd-stable/src/network/networkd-network.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -223,19 +223,19 @@ index 2e716b291e97..56f18cea57fe 100644 #include #include #include -diff --git a/src/network/test-network-tables.c b/src/network/test-network-tables.c -index 25b939639775..530e4928835c 100644 ---- a/src/network/test-network-tables.c -+++ b/src/network/test-network-tables.c +Index: systemd-stable/src/network/test-network-tables.c +=================================================================== +--- systemd-stable.orig/src/network/test-network-tables.c ++++ systemd-stable/src/network/test-network-tables.c @@ -1,3 +1,4 @@ +#include #include "bond.h" #include "dhcp6-internal.h" #include "dhcp6-protocol.h" -diff --git a/src/shared/ethtool-util.c b/src/shared/ethtool-util.c -index 00a71d64a638..4593e89120b8 100644 ---- a/src/shared/ethtool-util.c -+++ b/src/shared/ethtool-util.c +Index: systemd-stable/src/shared/ethtool-util.c +=================================================================== +--- systemd-stable.orig/src/shared/ethtool-util.c ++++ systemd-stable/src/shared/ethtool-util.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ @@ -243,10 +243,10 @@ index 00a71d64a638..4593e89120b8 100644 #include #include #include -diff --git a/src/shared/ethtool-util.h b/src/shared/ethtool-util.h -index c1d5d7590ef9..b3e018bf76e9 100644 ---- a/src/shared/ethtool-util.h -+++ b/src/shared/ethtool-util.h +Index: systemd-stable/src/shared/ethtool-util.h +=================================================================== +--- systemd-stable.orig/src/shared/ethtool-util.h ++++ systemd-stable/src/shared/ethtool-util.h @@ -3,6 +3,7 @@ #include @@ -255,21 +255,21 @@ index c1d5d7590ef9..b3e018bf76e9 100644 #include #include "conf-parser.h" -diff --git a/src/udev/net/link-config.c b/src/udev/net/link-config.c -index 0332e99269c9..ff3aead4a779 100644 ---- a/src/udev/net/link-config.c -+++ b/src/udev/net/link-config.c +Index: systemd-stable/src/udev/net/link-config.c +=================================================================== +--- systemd-stable.orig/src/udev/net/link-config.c ++++ systemd-stable/src/udev/net/link-config.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ +#include #include #include - -diff --git a/src/udev/udev-builtin-net_setup_link.c b/src/udev/udev-builtin-net_setup_link.c -index ee3ca9fa3846..9aa4e828741f 100644 ---- a/src/udev/udev-builtin-net_setup_link.c -+++ b/src/udev/udev-builtin-net_setup_link.c + #include +Index: systemd-stable/src/udev/udev-builtin-net_setup_link.c +=================================================================== +--- systemd-stable.orig/src/udev/udev-builtin-net_setup_link.c ++++ systemd-stable/src/udev/udev-builtin-net_setup_link.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: LGPL-2.1+ */ diff --git a/poky/meta/recipes-core/systemd/systemd/0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch b/poky/meta/recipes-core/systemd/systemd/0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch index 580c49fec..dd6ecebeb 100644 --- a/poky/meta/recipes-core/systemd/systemd/0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch +++ b/poky/meta/recipes-core/systemd/systemd/0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch @@ -23,10 +23,10 @@ Signed-off-by: Scott Murray src/tmpfiles/tmpfiles.c | 10 ++++++++++ 3 files changed, 38 insertions(+) -diff --git a/src/basic/glob-util.c b/src/basic/glob-util.c -index e3aa6c2e152b..38070b79c83a 100644 ---- a/src/basic/glob-util.c -+++ b/src/basic/glob-util.c +Index: systemd-stable/src/basic/glob-util.c +=================================================================== +--- systemd-stable.orig/src/basic/glob-util.c ++++ systemd-stable/src/basic/glob-util.c @@ -12,6 +12,12 @@ #include "path-util.h" #include "strv.h" @@ -48,7 +48,7 @@ index e3aa6c2e152b..38070b79c83a 100644 /* We want to set GLOB_ALTDIRFUNC ourselves, don't allow it to be set. */ assert(!(flags & GLOB_ALTDIRFUNC)); -@@ -32,9 +39,14 @@ int safe_glob(const char *path, int flags, glob_t *pglob) { +@@ -32,9 +39,14 @@ int safe_glob(const char *path, int flag pglob->gl_lstat = lstat; if (!pglob->gl_stat) pglob->gl_stat = stat; @@ -63,10 +63,10 @@ index e3aa6c2e152b..38070b79c83a 100644 if (k == GLOB_NOMATCH) return -ENOENT; if (k == GLOB_NOSPACE) -diff --git a/src/test/test-glob-util.c b/src/test/test-glob-util.c -index 667d15335fbf..76a84443aacf 100644 ---- a/src/test/test-glob-util.c -+++ b/src/test/test-glob-util.c +Index: systemd-stable/src/test/test-glob-util.c +=================================================================== +--- systemd-stable.orig/src/test/test-glob-util.c ++++ systemd-stable/src/test/test-glob-util.c @@ -12,6 +12,12 @@ #include "rm-rf.h" #include "tmpfile-util.h" @@ -114,11 +114,11 @@ index 667d15335fbf..76a84443aacf 100644 assert_se(r == GLOB_NOMATCH); (void) rm_rf(template, REMOVE_ROOT|REMOVE_PHYSICAL); -diff --git a/src/tmpfiles/tmpfiles.c b/src/tmpfiles/tmpfiles.c -index 193ed0bc781b..2f94fd2efd8f 100644 ---- a/src/tmpfiles/tmpfiles.c -+++ b/src/tmpfiles/tmpfiles.c -@@ -58,6 +58,12 @@ +Index: systemd-stable/src/tmpfiles/tmpfiles.c +=================================================================== +--- systemd-stable.orig/src/tmpfiles/tmpfiles.c ++++ systemd-stable/src/tmpfiles/tmpfiles.c +@@ -59,6 +59,12 @@ #include "umask-util.h" #include "user-util.h" @@ -131,7 +131,7 @@ index 193ed0bc781b..2f94fd2efd8f 100644 /* This reads all files listed in /etc/tmpfiles.d/?*.conf and creates * them in the file system. This is intended to be used to create * properly owned directories beneath /tmp, /var/tmp, /run, which are -@@ -1850,7 +1856,9 @@ finish: +@@ -1867,7 +1873,9 @@ finish: static int glob_item(Item *i, action_t action) { _cleanup_globfree_ glob_t g = { @@ -141,7 +141,7 @@ index 193ed0bc781b..2f94fd2efd8f 100644 }; int r = 0, k; char **fn; -@@ -1870,7 +1878,9 @@ static int glob_item(Item *i, action_t action) { +@@ -1887,7 +1895,9 @@ static int glob_item(Item *i, action_t a static int glob_item_recursively(Item *i, fdaction_t action) { _cleanup_globfree_ glob_t g = { diff --git a/poky/meta/recipes-core/systemd/systemd/0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch b/poky/meta/recipes-core/systemd/systemd/0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch index 5ee501f23..15055161f 100644 --- a/poky/meta/recipes-core/systemd/systemd/0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch +++ b/poky/meta/recipes-core/systemd/systemd/0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch @@ -11,10 +11,10 @@ Signed-off-by: Chen Qi src/basic/process-util.c | 7 +++++++ 1 file changed, 7 insertions(+) -diff --git a/src/basic/process-util.c b/src/basic/process-util.c -index 5de366f830e8..644f53aee005 100644 ---- a/src/basic/process-util.c -+++ b/src/basic/process-util.c +Index: systemd-stable/src/basic/process-util.c +=================================================================== +--- systemd-stable.orig/src/basic/process-util.c ++++ systemd-stable/src/basic/process-util.c @@ -18,6 +18,9 @@ #if HAVE_VALGRIND_VALGRIND_H #include @@ -25,7 +25,7 @@ index 5de366f830e8..644f53aee005 100644 #include "alloc-util.h" #include "architecture.h" -@@ -1116,11 +1119,15 @@ void reset_cached_pid(void) { +@@ -1143,11 +1146,15 @@ void reset_cached_pid(void) { cached_pid = CACHED_PID_UNSET; } diff --git a/poky/meta/recipes-core/systemd/systemd/0011-Use-uintmax_t-for-handling-rlim_t.patch b/poky/meta/recipes-core/systemd/systemd/0011-Use-uintmax_t-for-handling-rlim_t.patch index e5d9515e8..a6fcd2f5d 100644 --- a/poky/meta/recipes-core/systemd/systemd/0011-Use-uintmax_t-for-handling-rlim_t.patch +++ b/poky/meta/recipes-core/systemd/systemd/0011-Use-uintmax_t-for-handling-rlim_t.patch @@ -27,11 +27,11 @@ Signed-off-by: Chen Qi src/core/execute.c | 4 ++-- 3 files changed, 8 insertions(+), 14 deletions(-) -diff --git a/src/basic/format-util.h b/src/basic/format-util.h -index c47fa76ea8ff..14a78d9f5fd0 100644 ---- a/src/basic/format-util.h -+++ b/src/basic/format-util.h -@@ -32,13 +32,7 @@ assert_cc(sizeof(gid_t) == sizeof(uint32_t)); +Index: systemd-stable/src/basic/format-util.h +=================================================================== +--- systemd-stable.orig/src/basic/format-util.h ++++ systemd-stable/src/basic/format-util.h +@@ -32,13 +32,7 @@ assert_cc(sizeof(gid_t) == sizeof(uint32 # define PRI_TIMEX "li" #endif @@ -46,11 +46,11 @@ index c47fa76ea8ff..14a78d9f5fd0 100644 #if SIZEOF_DEV_T == 8 # define DEV_FMT "%" PRIu64 -diff --git a/src/basic/rlimit-util.c b/src/basic/rlimit-util.c -index 2dc13eabc30d..0633cc67f417 100644 ---- a/src/basic/rlimit-util.c -+++ b/src/basic/rlimit-util.c -@@ -306,13 +306,13 @@ int rlimit_format(const struct rlimit *rl, char **ret) { +Index: systemd-stable/src/basic/rlimit-util.c +=================================================================== +--- systemd-stable.orig/src/basic/rlimit-util.c ++++ systemd-stable/src/basic/rlimit-util.c +@@ -306,13 +306,13 @@ int rlimit_format(const struct rlimit *r if (rl->rlim_cur >= RLIM_INFINITY && rl->rlim_max >= RLIM_INFINITY) s = strdup("infinity"); else if (rl->rlim_cur >= RLIM_INFINITY) @@ -77,11 +77,11 @@ index 2dc13eabc30d..0633cc67f417 100644 return 1; } -diff --git a/src/core/execute.c b/src/core/execute.c -index 9762dc57443c..4a3421bb3ee6 100644 ---- a/src/core/execute.c -+++ b/src/core/execute.c -@@ -4567,9 +4567,9 @@ void exec_context_dump(const ExecContext *c, FILE* f, const char *prefix) { +Index: systemd-stable/src/core/execute.c +=================================================================== +--- systemd-stable.orig/src/core/execute.c ++++ systemd-stable/src/core/execute.c +@@ -4686,9 +4686,9 @@ void exec_context_dump(const ExecContext for (i = 0; i < RLIM_NLIMITS; i++) if (c->rlimit[i]) { fprintf(f, "%sLimit%s: " RLIM_FMT "\n", diff --git a/poky/meta/recipes-core/systemd/systemd/0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch b/poky/meta/recipes-core/systemd/systemd/0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch index efeef0729..0a7594c06 100644 --- a/poky/meta/recipes-core/systemd/systemd/0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch +++ b/poky/meta/recipes-core/systemd/systemd/0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch @@ -31,13 +31,13 @@ Signed-off-by: Andre McCurdy src/shared/base-filesystem.c | 6 +++--- 2 files changed, 24 insertions(+), 4 deletions(-) -diff --git a/src/basic/fs-util.h b/src/basic/fs-util.h -index 78d68be9fd85..c5dc84d41868 100644 ---- a/src/basic/fs-util.h -+++ b/src/basic/fs-util.h -@@ -40,7 +40,27 @@ int fchmod_opath(int fd, mode_t m); - +Index: systemd-stable/src/basic/fs-util.h +=================================================================== +--- systemd-stable.orig/src/basic/fs-util.h ++++ systemd-stable/src/basic/fs-util.h +@@ -42,7 +42,27 @@ int fchmod_opath(int fd, mode_t m); int fd_warn_permissions(const char *path, int fd); + int stat_warn_permissions(const char *path, const struct stat *st); -#define laccess(path, mode) faccessat(AT_FDCWD, (path), (mode), AT_SYMLINK_NOFOLLOW) +/* @@ -64,11 +64,11 @@ index 78d68be9fd85..c5dc84d41868 100644 int touch_file(const char *path, bool parents, usec_t stamp, uid_t uid, gid_t gid, mode_t mode); int touch(const char *path); -diff --git a/src/shared/base-filesystem.c b/src/shared/base-filesystem.c -index 657407da2d37..fbd5782d84fc 100644 ---- a/src/shared/base-filesystem.c -+++ b/src/shared/base-filesystem.c -@@ -54,7 +54,7 @@ int base_filesystem_create(const char *root, uid_t uid, gid_t gid) { +Index: systemd-stable/src/shared/base-filesystem.c +=================================================================== +--- systemd-stable.orig/src/shared/base-filesystem.c ++++ systemd-stable/src/shared/base-filesystem.c +@@ -54,7 +54,7 @@ int base_filesystem_create(const char *r return log_error_errno(errno, "Failed to open root file system: %m"); for (i = 0; i < ELEMENTSOF(table); i ++) { @@ -77,7 +77,7 @@ index 657407da2d37..fbd5782d84fc 100644 continue; if (table[i].target) { -@@ -62,7 +62,7 @@ int base_filesystem_create(const char *root, uid_t uid, gid_t gid) { +@@ -62,7 +62,7 @@ int base_filesystem_create(const char *r /* check if one of the targets exists */ NULSTR_FOREACH(s, table[i].target) { @@ -86,7 +86,7 @@ index 657407da2d37..fbd5782d84fc 100644 continue; /* check if a specific file exists at the target path */ -@@ -73,7 +73,7 @@ int base_filesystem_create(const char *root, uid_t uid, gid_t gid) { +@@ -73,7 +73,7 @@ int base_filesystem_create(const char *r if (!p) return log_oom(); diff --git a/poky/meta/recipes-core/systemd/systemd/0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch b/poky/meta/recipes-core/systemd/systemd/0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch index 19a3eef84..67d504191 100644 --- a/poky/meta/recipes-core/systemd/systemd/0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch +++ b/poky/meta/recipes-core/systemd/systemd/0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch @@ -14,10 +14,10 @@ Signed-off-by: Khem Raj src/machine/machine-dbus.c | 5 +++++ 1 file changed, 5 insertions(+) -diff --git a/src/machine/machine-dbus.c b/src/machine/machine-dbus.c -index 760ccb445cd0..0df20f3864b3 100644 ---- a/src/machine/machine-dbus.c -+++ b/src/machine/machine-dbus.c +Index: systemd-stable/src/machine/machine-dbus.c +=================================================================== +--- systemd-stable.orig/src/machine/machine-dbus.c ++++ systemd-stable/src/machine/machine-dbus.c @@ -11,6 +11,11 @@ #include #undef basename @@ -29,4 +29,4 @@ index 760ccb445cd0..0df20f3864b3 100644 + #include "alloc-util.h" #include "bus-common-errors.h" - #include "bus-internal.h" + #include "bus-get-properties.h" diff --git a/poky/meta/recipes-core/systemd/systemd/0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch b/poky/meta/recipes-core/systemd/systemd/0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch index 1934b783d..3d456ec83 100644 --- a/poky/meta/recipes-core/systemd/systemd/0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch +++ b/poky/meta/recipes-core/systemd/systemd/0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch @@ -24,11 +24,11 @@ Signed-off-by: Scott Murray src/basic/process-util.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/src/basic/process-util.c b/src/basic/process-util.c -index 644f53aee005..acaf13591396 100644 ---- a/src/basic/process-util.c -+++ b/src/basic/process-util.c -@@ -1500,7 +1500,7 @@ int set_oom_score_adjust(int value) { +Index: systemd-stable/src/basic/process-util.c +=================================================================== +--- systemd-stable.orig/src/basic/process-util.c ++++ systemd-stable/src/basic/process-util.c +@@ -1536,7 +1536,7 @@ int set_oom_score_adjust(int value) { sprintf(t, "%i", value); return write_string_file("/proc/self/oom_score_adj", t, diff --git a/poky/meta/recipes-core/systemd/systemd/0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch b/poky/meta/recipes-core/systemd/systemd/0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch index 84a492f29..48fd007e1 100644 --- a/poky/meta/recipes-core/systemd/systemd/0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch +++ b/poky/meta/recipes-core/systemd/systemd/0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch @@ -24,11 +24,11 @@ Signed-off-by: Chen Qi src/libsystemd/sd-bus/bus-error.c | 5 +++++ 2 files changed, 10 insertions(+) -diff --git a/src/journal/journal-send.c b/src/journal/journal-send.c -index 43ed756bda53..227ea64dbb48 100644 ---- a/src/journal/journal-send.c -+++ b/src/journal/journal-send.c -@@ -336,7 +336,12 @@ static int fill_iovec_perror_and_send(const char *message, int skip, struct iove +Index: systemd-stable/src/journal/journal-send.c +=================================================================== +--- systemd-stable.orig/src/journal/journal-send.c ++++ systemd-stable/src/journal/journal-send.c +@@ -348,7 +348,12 @@ static int fill_iovec_perror_and_send(co char* j; errno = 0; @@ -41,11 +41,11 @@ index 43ed756bda53..227ea64dbb48 100644 if (errno == 0) { char error[STRLEN("ERRNO=") + DECIMAL_STR_MAX(int) + 1]; -diff --git a/src/libsystemd/sd-bus/bus-error.c b/src/libsystemd/sd-bus/bus-error.c -index f760f0fdd21c..28a5159c4480 100644 ---- a/src/libsystemd/sd-bus/bus-error.c -+++ b/src/libsystemd/sd-bus/bus-error.c -@@ -379,7 +379,12 @@ static void bus_error_strerror(sd_bus_error *e, int error) { +Index: systemd-stable/src/libsystemd/sd-bus/bus-error.c +=================================================================== +--- systemd-stable.orig/src/libsystemd/sd-bus/bus-error.c ++++ systemd-stable/src/libsystemd/sd-bus/bus-error.c +@@ -379,7 +379,12 @@ static void bus_error_strerror(sd_bus_er return; errno = 0; diff --git a/poky/meta/recipes-core/systemd/systemd/0021-Handle-missing-LOCK_EX.patch b/poky/meta/recipes-core/systemd/systemd/0021-Handle-missing-LOCK_EX.patch new file mode 100644 index 000000000..67d9162c0 --- /dev/null +++ b/poky/meta/recipes-core/systemd/systemd/0021-Handle-missing-LOCK_EX.patch @@ -0,0 +1,23 @@ +From 190854c2114dc6e74c8859dc251e3737e3c0f353 Mon Sep 17 00:00:00 2001 +From: Alex Kiernan +Date: Fri, 7 Aug 2020 15:19:27 +0000 +Subject: [PATCH] Handle missing LOCK_EX + +Upstream-Status: Inappropriate [musl specific] +Signed-off-by: Alex Kiernan +--- + src/partition/makefs.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/partition/makefs.c b/src/partition/makefs.c +index 97f50c9033..7f55acd229 100644 +--- a/src/partition/makefs.c ++++ b/src/partition/makefs.c +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + #include "alloc-util.h" + #include "blockdev-util.h" diff --git a/poky/meta/recipes-core/systemd/systemd/0022-Fix-incompatible-pointer-type-struct-sockaddr_un.patch b/poky/meta/recipes-core/systemd/systemd/0022-Fix-incompatible-pointer-type-struct-sockaddr_un.patch new file mode 100644 index 000000000..d57ca1fd3 --- /dev/null +++ b/poky/meta/recipes-core/systemd/systemd/0022-Fix-incompatible-pointer-type-struct-sockaddr_un.patch @@ -0,0 +1,37 @@ +From 328c39fae2631deb5737dd56f46159dd6b4cdbed Mon Sep 17 00:00:00 2001 +From: Alex Kiernan +Date: Fri, 7 Aug 2020 15:20:17 +0000 +Subject: [PATCH] Fix incompatible pointer type struct sockaddr_un * + +| ../../../../../../workspace/sources/systemd/src/nspawn/nspawn.c: In function 'cant_be_in_netns': +| ../../../../../../workspace/sources/systemd/src/nspawn/nspawn.c:4893:25: error: passing argument 2 of 'connect' from incompatible pointer type [-Werror=incompatible-pointer-types] +| 4893 | if (connect(fd, &sa.un, SOCKADDR_UN_LEN(sa.un)) < 0) { +| | ^~~~~~ +| | | +| | struct sockaddr_un * +| In file included from ../../../../../../workspace/sources/systemd/src/systemd/sd-daemon.h:22, +| from ../../../../../../workspace/sources/systemd/src/nspawn/nspawn.c:21: +| /home/ubuntu/poky/build/tmp/work/core2-64-poky-linux-musl/systemd/1_246-r0/recipe-sysroot/usr/include/sys/socket.h:384:19: note: expected 'const struct sockaddr *' but argument is of type 'struct sockaddr_un *' +| 384 | int connect (int, const struct sockaddr *, socklen_t); +| | ^~~~~~~~~~~~~~~~~~~~~~~ +| cc1: some warnings being treated as errors + +Upstream-Status: Inappropriate [musl specific] +Signed-off-by: Alex Kiernan +--- + src/nspawn/nspawn.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/nspawn/nspawn.c b/src/nspawn/nspawn.c +index 0450c9f795..7305db04ef 100644 +--- a/src/nspawn/nspawn.c ++++ b/src/nspawn/nspawn.c +@@ -4890,7 +4890,7 @@ static int cant_be_in_netns(void) { + if (fd < 0) + return log_error_errno(errno, "Failed to allocate udev control socket: %m"); + +- if (connect(fd, &sa.un, SOCKADDR_UN_LEN(sa.un)) < 0) { ++ if (connect(fd, (struct sockaddr *)&sa.un, SOCKADDR_UN_LEN(sa.un)) < 0) { + + if (errno == ENOENT || ERRNO_IS_DISCONNECT(errno)) + return log_error_errno(SYNTHETIC_ERRNO(EOPNOTSUPP), diff --git a/poky/meta/recipes-core/systemd/systemd/0023-Fix-field-efi_loader_entry_one_shot_stat-has-incompl.patch b/poky/meta/recipes-core/systemd/systemd/0023-Fix-field-efi_loader_entry_one_shot_stat-has-incompl.patch new file mode 100644 index 000000000..78660691d --- /dev/null +++ b/poky/meta/recipes-core/systemd/systemd/0023-Fix-field-efi_loader_entry_one_shot_stat-has-incompl.patch @@ -0,0 +1,31 @@ +From f939d3e07e1175caac5cf4cbf54bd8a58c2f198a Mon Sep 17 00:00:00 2001 +From: Alex Kiernan +Date: Fri, 7 Aug 2020 15:30:52 +0000 +Subject: [PATCH] Fix field 'efi_loader_entry_one_shot_stat' has incomplete + type + +| In file included from ../../../../../../workspace/sources/systemd/src/login/logind-gperf.gperf:7: +| ../../../../../../workspace/sources/systemd/src/login/logind.h:131:21: error: field 'efi_loader_entry_one_shot_stat' has incomplete type +| 131 | struct stat efi_loader_entry_one_shot_stat; +| | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upstream-Status: Inappropriate [musl specific] +Signed-off-by: Alex Kiernan +--- + src/login/logind.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/src/login/logind.h b/src/login/logind.h +index e64ecce8e2..2d4007d84b 100644 +--- a/src/login/logind.h ++++ b/src/login/logind.h +@@ -2,6 +2,9 @@ + #pragma once + + #include ++#include ++#include ++#include + + #include "sd-bus.h" + #include "sd-device.h" diff --git a/poky/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch b/poky/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch deleted file mode 100644 index 7b5e3e7f7..000000000 --- a/poky/meta/recipes-core/systemd/systemd/CVE-2020-13776.patch +++ /dev/null @@ -1,96 +0,0 @@ -From 156a5fd297b61bce31630d7a52c15614bf784843 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Zbigniew=20J=C4=99drzejewski-Szmek?= -Date: Sun, 31 May 2020 18:21:09 +0200 -Subject: [PATCH 1/1] basic/user-util: always use base 10 for user/group - numbers - -We would parse numbers with base prefixes as user identifiers. For example, -"0x2b3bfa0" would be interpreted as UID==45334432 and "01750" would be -interpreted as UID==1000. This parsing was used also in cases where either a -user/group name or number may be specified. This means that names like -0x2b3bfa0 would be ambiguous: they are a valid user name according to our -documented relaxed rules, but they would also be parsed as numeric uids. - -This behaviour is definitely not expected by users, since tools generally only -accept decimal numbers (e.g. id, getent passwd), while other tools only accept -user names and thus will interpret such strings as user names without even -attempting to convert them to numbers (su, ssh). So let's follow suit and only -accept numbers in decimal notation. Effectively this means that we will reject -such strings as a username/uid/groupname/gid where strict mode is used, and try -to look up a user/group with such a name in relaxed mode. - -Since the function changed is fairly low-level and fairly widely used, this -affects multiple tools: loginctl show-user/enable-linger/disable-linger foo', -the third argument in sysusers.d, fourth and fifth arguments in tmpfiles.d, -etc. - -Fixes #15985. ---- - src/basic/user-util.c | 2 +- - src/test/test-user-util.c | 10 ++++++++++ - 2 files changed, 11 insertions(+), 1 deletion(-) - ---- end of commit 156a5fd297b61bce31630d7a52c15614bf784843 --- - - -Add definition of safe_atou32_full() from commit b934ac3d6e7dcad114776ef30ee9098693e7ab7e - -CVE: CVE-2020-13776 - -Upstream-Status: Backport [https://github.com/systemd/systemd.git] - -Signed-off-by: Joe Slater - - - ---- git.orig/src/basic/user-util.c -+++ git/src/basic/user-util.c -@@ -49,7 +49,7 @@ int parse_uid(const char *s, uid_t *ret) - assert(s); - - assert_cc(sizeof(uid_t) == sizeof(uint32_t)); -- r = safe_atou32(s, &uid); -+ r = safe_atou32_full(s, 10, &uid); - if (r < 0) - return r; - ---- git.orig/src/test/test-user-util.c -+++ git/src/test/test-user-util.c -@@ -48,9 +48,19 @@ static void test_parse_uid(void) { - - r = parse_uid("65535", &uid); - assert_se(r == -ENXIO); -+ assert_se(uid == 100); -+ -+ r = parse_uid("0x1234", &uid); -+ assert_se(r == -EINVAL); -+ assert_se(uid == 100); -+ -+ r = parse_uid("01234", &uid); -+ assert_se(r == 0); -+ assert_se(uid == 1234); - - r = parse_uid("asdsdas", &uid); - assert_se(r == -EINVAL); -+ assert_se(uid == 1234); - } - - static void test_uid_ptr(void) { ---- git.orig/src/basic/parse-util.h -+++ git/src/basic/parse-util.h -@@ -45,9 +45,13 @@ static inline int safe_atoux16(const cha - - int safe_atoi16(const char *s, int16_t *ret); - --static inline int safe_atou32(const char *s, uint32_t *ret_u) { -+static inline int safe_atou32_full(const char *s, unsigned base, uint32_t *ret_u) { - assert_cc(sizeof(uint32_t) == sizeof(unsigned)); -- return safe_atou(s, (unsigned*) ret_u); -+ return safe_atou_full(s, base, (unsigned*) ret_u); -+} -+ -+static inline int safe_atou32(const char *s, uint32_t *ret_u) { -+ return safe_atou32_full(s, 0, (unsigned*) ret_u); - } - - static inline int safe_atoi32(const char *s, int32_t *ret_i) { diff --git a/poky/meta/recipes-core/systemd/systemd_245.6.bb b/poky/meta/recipes-core/systemd/systemd_245.6.bb deleted file mode 100644 index cdafb9824..000000000 --- a/poky/meta/recipes-core/systemd/systemd_245.6.bb +++ /dev/null @@ -1,715 +0,0 @@ -require systemd.inc - -PROVIDES = "udev" - -PE = "1" - -DEPENDS = "intltool-native gperf-native libcap util-linux" - -SECTION = "base/shell" - -inherit useradd pkgconfig meson perlnative update-rc.d update-alternatives qemu systemd gettext bash-completion manpages features_check - -# As this recipe builds udev, respect systemd being in DISTRO_FEATURES so -# that we don't build both udev and systemd in world builds. -REQUIRED_DISTRO_FEATURES = "systemd" - -SRC_URI += "file://touchscreen.rules \ - file://00-create-volatile.conf \ - file://init \ - file://99-default.preset \ - file://0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch \ - file://0003-implment-systemd-sysv-install-for-OE.patch \ - file://CVE-2020-13776.patch \ - " - -# patches needed by musl -SRC_URI_append_libc-musl = " ${SRC_URI_MUSL}" -SRC_URI_MUSL = "\ - file://0002-don-t-use-glibc-specific-qsort_r.patch \ - file://0003-missing_type.h-add-__compare_fn_t-and-comparison_fn_.patch \ - file://0004-add-fallback-parse_printf_format-implementation.patch \ - file://0005-src-basic-missing.h-check-for-missing-strndupa.patch \ - file://0006-Include-netinet-if_ether.h.patch \ - file://0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch \ - file://0008-add-missing-FTW_-macros-for-musl.patch \ - file://0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch \ - file://0011-Use-uintmax_t-for-handling-rlim_t.patch \ - file://0014-test-sizeof.c-Disable-tests-for-missing-typedefs-in-.patch \ - file://0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch \ - file://0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch \ - file://0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch \ - file://0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch \ - file://0019-Hide-__start_BUS_ERROR_MAP-and-__stop_BUS_ERROR_MAP.patch \ - file://0020-missing_type.h-add-__compar_d_fn_t-definition.patch \ - file://0021-avoid-redefinition-of-prctl_mm_map-structure.patch \ - file://0024-test-json.c-define-M_PIl.patch \ - file://0001-do-not-disable-buffer-in-writing-files.patch \ - file://0002-src-login-brightness.c-include-sys-wait.h.patch \ - file://0003-src-basic-copy.c-include-signal.h.patch \ - file://0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch \ - file://0001-Handle-missing-gshadow.patch \ - " - -PAM_PLUGINS = " \ - pam-plugin-unix \ - pam-plugin-loginuid \ - pam-plugin-keyinit \ -" - -PACKAGECONFIG ??= " \ - ${@bb.utils.filter('DISTRO_FEATURES', 'acl audit efi ldconfig pam selinux smack usrmerge polkit', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'wifi', 'rfkill', '', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'xkbcommon', '', d)} \ - backlight \ - binfmt \ - gshadow \ - hibernate \ - hostnamed \ - idn \ - ima \ - kmod \ - localed \ - logind \ - machined \ - myhostname \ - networkd \ - nss \ - nss-mymachines \ - nss-resolve \ - quotacheck \ - randomseed \ - resolved \ - set-time-epoch \ - sysusers \ - sysvinit \ - timedated \ - timesyncd \ - userdb \ - utmp \ - vconsole \ - xz \ -" - -PACKAGECONFIG_remove_libc-musl = " \ - gshadow \ - idn \ - localed \ - myhostname \ - nss \ - nss-mymachines \ - nss-resolve \ - sysusers \ - userdb \ - utmp \ -" - -CFLAGS_append_libc-musl = " -D__UAPI_DEF_ETHHDR=0 " - -# Use the upstream systemd serial-getty@.service and rely on -# systemd-getty-generator instead of using the OE-core specific -# systemd-serialgetty.bb - not enabled by default. -PACKAGECONFIG[serial-getty-generator] = "" - -PACKAGECONFIG[acl] = "-Dacl=true,-Dacl=false,acl" -PACKAGECONFIG[audit] = "-Daudit=true,-Daudit=false,audit" -PACKAGECONFIG[backlight] = "-Dbacklight=true,-Dbacklight=false" -PACKAGECONFIG[binfmt] = "-Dbinfmt=true,-Dbinfmt=false" -PACKAGECONFIG[bzip2] = "-Dbzip2=true,-Dbzip2=false,bzip2" -PACKAGECONFIG[cgroupv2] = "-Ddefault-hierarchy=unified,-Ddefault-hierarchy=hybrid" -PACKAGECONFIG[coredump] = "-Dcoredump=true,-Dcoredump=false" -PACKAGECONFIG[cryptsetup] = "-Dlibcryptsetup=true,-Dlibcryptsetup=false,cryptsetup" -PACKAGECONFIG[dbus] = "-Ddbus=true,-Ddbus=false,dbus" -PACKAGECONFIG[efi] = "-Defi=true,-Defi=false" -PACKAGECONFIG[gnu-efi] = "-Dgnu-efi=true -Defi-libdir=${STAGING_LIBDIR} -Defi-includedir=${STAGING_INCDIR}/efi,-Dgnu-efi=false,gnu-efi" -PACKAGECONFIG[elfutils] = "-Delfutils=true,-Delfutils=false,elfutils" -PACKAGECONFIG[firstboot] = "-Dfirstboot=true,-Dfirstboot=false" -# Sign the journal for anti-tampering -PACKAGECONFIG[gcrypt] = "-Dgcrypt=true,-Dgcrypt=false,libgcrypt" -PACKAGECONFIG[gnutls] = "-Dgnutls=true,-Dgnutls=false,gnutls" -PACKAGECONFIG[gshadow] = "-Dgshadow=true,-Dgshadow=false" -PACKAGECONFIG[hibernate] = "-Dhibernate=true,-Dhibernate=false" -PACKAGECONFIG[hostnamed] = "-Dhostnamed=true,-Dhostnamed=false" -PACKAGECONFIG[idn] = "-Didn=true,-Didn=false" -PACKAGECONFIG[ima] = "-Dima=true,-Dima=false" -# importd requires curl/xz/zlib/bzip2/gcrypt -PACKAGECONFIG[importd] = "-Dimportd=true,-Dimportd=false" -# Update NAT firewall rules -PACKAGECONFIG[iptc] = "-Dlibiptc=true,-Dlibiptc=false,iptables" -PACKAGECONFIG[journal-upload] = "-Dlibcurl=true,-Dlibcurl=false,curl" -PACKAGECONFIG[kmod] = "-Dkmod=true,-Dkmod=false,kmod" -PACKAGECONFIG[ldconfig] = "-Dldconfig=true,-Dldconfig=false,,ldconfig" -PACKAGECONFIG[libidn] = "-Dlibidn=true,-Dlibidn=false,libidn" -PACKAGECONFIG[libidn2] = "-Dlibidn2=true,-Dlibidn2=false,libidn2" -PACKAGECONFIG[localed] = "-Dlocaled=true,-Dlocaled=false" -PACKAGECONFIG[logind] = "-Dlogind=true,-Dlogind=false" -PACKAGECONFIG[lz4] = "-Dlz4=true,-Dlz4=false,lz4" -PACKAGECONFIG[machined] = "-Dmachined=true,-Dmachined=false" -PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native xmlto-native docbook-xml-dtd4-native docbook-xsl-stylesheets-native" -PACKAGECONFIG[microhttpd] = "-Dmicrohttpd=true,-Dmicrohttpd=false,libmicrohttpd" -PACKAGECONFIG[myhostname] = "-Dnss-myhostname=true,-Dnss-myhostname=false,,libnss-myhostname" -PACKAGECONFIG[networkd] = "-Dnetworkd=true,-Dnetworkd=false" -PACKAGECONFIG[nss] = "-Dnss-systemd=true,-Dnss-systemd=false" -PACKAGECONFIG[nss-mymachines] = "-Dnss-mymachines=true,-Dnss-mymachines=false" -PACKAGECONFIG[nss-resolve] = "-Dnss-resolve=true,-Dnss-resolve=false" -PACKAGECONFIG[openssl] = "-Dopenssl=true,-Dopenssl=false,openssl" -PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam,${PAM_PLUGINS}" -PACKAGECONFIG[pcre2] = "-Dpcre2=true,-Dpcre2=false,libpcre2" -PACKAGECONFIG[polkit] = "-Dpolkit=true,-Dpolkit=false" -PACKAGECONFIG[portabled] = "-Dportabled=true,-Dportabled=false" -PACKAGECONFIG[qrencode] = "-Dqrencode=true,-Dqrencode=false,qrencode" -PACKAGECONFIG[quotacheck] = "-Dquotacheck=true,-Dquotacheck=false" -PACKAGECONFIG[randomseed] = "-Drandomseed=true,-Drandomseed=false" -PACKAGECONFIG[resolved] = "-Dresolve=true,-Dresolve=false" -PACKAGECONFIG[rfkill] = "-Drfkill=true,-Drfkill=false" -# libseccomp is found in meta-security -PACKAGECONFIG[seccomp] = "-Dseccomp=true,-Dseccomp=false,libseccomp" -PACKAGECONFIG[selinux] = "-Dselinux=true,-Dselinux=false,libselinux,initscripts-sushell" -PACKAGECONFIG[smack] = "-Dsmack=true,-Dsmack=false" -PACKAGECONFIG[sysusers] = "-Dsysusers=true,-Dsysusers=false" -PACKAGECONFIG[sysvinit] = "-Dsysvinit-path=${sysconfdir}/init.d -Dsysvrcnd-path=${sysconfdir},-Dsysvinit-path= -Dsysvrcnd-path=,,systemd-compat-units update-rc.d" -# When enabled use reproducble build timestamp if set as time epoch, -# or build time if not. When disabled, time epoch is unset. -def build_epoch(d): - epoch = d.getVar('SOURCE_DATE_EPOCH') or "-1" - return '-Dtime-epoch=%d' % int(epoch) -PACKAGECONFIG[set-time-epoch] = "${@build_epoch(d)},-Dtime-epoch=0" -PACKAGECONFIG[timedated] = "-Dtimedated=true,-Dtimedated=false" -PACKAGECONFIG[timesyncd] = "-Dtimesyncd=true,-Dtimesyncd=false" -PACKAGECONFIG[usrmerge] = "-Dsplit-usr=false,-Dsplit-usr=true" -PACKAGECONFIG[sbinmerge] = "-Dsplit-bin=false,-Dsplit-bin=true" -PACKAGECONFIG[userdb] = "-Duserdb=true,-Duserdb=false" -PACKAGECONFIG[utmp] = "-Dutmp=true,-Dutmp=false" -PACKAGECONFIG[valgrind] = "-DVALGRIND=1,,valgrind" -PACKAGECONFIG[vconsole] = "-Dvconsole=true,-Dvconsole=false,,${PN}-vconsole-setup" -# Verify keymaps on locale change -PACKAGECONFIG[xkbcommon] = "-Dxkbcommon=true,-Dxkbcommon=false,libxkbcommon" -PACKAGECONFIG[xz] = "-Dxz=true,-Dxz=false,xz" -PACKAGECONFIG[zlib] = "-Dzlib=true,-Dzlib=false,zlib" - -# Helper variables to clarify locations. This mirrors the logic in systemd's -# build system. -rootprefix ?= "${root_prefix}" -rootlibdir ?= "${base_libdir}" -rootlibexecdir = "${rootprefix}/lib" - -# This links udev statically with systemd helper library. -# Otherwise udev package would depend on systemd package (which has the needed shared library), -# and always pull it into images. -EXTRA_OEMESON += "-Dlink-udev-shared=false" - -EXTRA_OEMESON += "-Dnobody-user=nobody \ - -Dnobody-group=nobody \ - -Drootlibdir=${rootlibdir} \ - -Drootprefix=${rootprefix} \ - -Ddefault-locale=C \ - " - -# Hardcode target binary paths to avoid using paths from sysroot -EXTRA_OEMESON += "-Dkexec-path=${sbindir}/kexec \ - -Dkmod-path=${base_bindir}/kmod \ - -Dmount-path=${base_bindir}/mount \ - -Dquotacheck-path=${sbindir}/quotacheck \ - -Dquotaon-path=${sbindir}/quotaon \ - -Dsulogin-path=${base_sbindir}/sulogin \ - -Dnologin-path=${base_sbindir}/nologin \ - -Dumount-path=${base_bindir}/umount" - -do_install() { - meson_do_install - install -d ${D}/${base_sbindir} - if ${@bb.utils.contains('PACKAGECONFIG', 'serial-getty-generator', 'false', 'true', d)}; then - # Provided by a separate recipe - rm ${D}${systemd_unitdir}/system/serial-getty* -f - fi - - # Provide support for initramfs - [ ! -e ${D}/init ] && ln -s ${rootlibexecdir}/systemd/systemd ${D}/init - [ ! -e ${D}/${base_sbindir}/udevd ] && ln -s ${rootlibexecdir}/systemd/systemd-udevd ${D}/${base_sbindir}/udevd - - install -d ${D}${sysconfdir}/udev/rules.d/ - install -d ${D}${sysconfdir}/tmpfiles.d - install -m 0644 ${WORKDIR}/*.rules ${D}${sysconfdir}/udev/rules.d/ - - install -m 0644 ${WORKDIR}/00-create-volatile.conf ${D}${sysconfdir}/tmpfiles.d/ - - if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','true','false',d)}; then - install -d ${D}${sysconfdir}/init.d - install -m 0755 ${WORKDIR}/init ${D}${sysconfdir}/init.d/systemd-udevd - sed -i s%@UDEVD@%${rootlibexecdir}/systemd/systemd-udevd% ${D}${sysconfdir}/init.d/systemd-udevd - install -Dm 0755 ${S}/src/systemctl/systemd-sysv-install.SKELETON ${D}${systemd_unitdir}/systemd-sysv-install - fi - - chown root:systemd-journal ${D}/${localstatedir}/log/journal - - # Delete journal README, as log can be symlinked inside volatile. - rm -f ${D}/${localstatedir}/log/README - - # journal-remote creates this at start - rm -rf ${D}/${localstatedir}/log/journal/remote - - install -d ${D}${systemd_unitdir}/system/graphical.target.wants - install -d ${D}${systemd_unitdir}/system/multi-user.target.wants - install -d ${D}${systemd_unitdir}/system/poweroff.target.wants - install -d ${D}${systemd_unitdir}/system/reboot.target.wants - install -d ${D}${systemd_unitdir}/system/rescue.target.wants - - # Create symlinks for systemd-update-utmp-runlevel.service - if ${@bb.utils.contains('PACKAGECONFIG', 'utmp', 'true', 'false', d)}; then - ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/graphical.target.wants/systemd-update-utmp-runlevel.service - ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/multi-user.target.wants/systemd-update-utmp-runlevel.service - ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/poweroff.target.wants/systemd-update-utmp-runlevel.service - ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/reboot.target.wants/systemd-update-utmp-runlevel.service - ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/rescue.target.wants/systemd-update-utmp-runlevel.service - fi - - # this file is needed to exist if networkd is disabled but timesyncd is still in use since timesyncd checks it - # for existence else it fails - if [ -s ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf ]; then - ${@bb.utils.contains('PACKAGECONFIG', 'networkd', ':', 'sed -i -e "\$ad /run/systemd/netif/links 0755 root root -" ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf', d)} - fi - if ! ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'true', 'false', d)}; then - echo 'L! ${sysconfdir}/resolv.conf - - - - ../run/systemd/resolve/resolv.conf' >>${D}${exec_prefix}/lib/tmpfiles.d/etc.conf - echo 'd /run/systemd/resolve 0755 root root -' >>${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf - echo 'f /run/systemd/resolve/resolv.conf 0644 root root' >>${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf - ln -s ../run/systemd/resolve/resolv.conf ${D}${sysconfdir}/resolv-conf.systemd - else - sed -i -e "s%^L! /etc/resolv.conf.*$%L! /etc/resolv.conf - - - - ../run/systemd/resolve/resolv.conf%g" ${D}${exec_prefix}/lib/tmpfiles.d/etc.conf - ln -s ../run/systemd/resolve/resolv.conf ${D}${sysconfdir}/resolv-conf.systemd - fi - if ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'false', 'true', d)}; then - rm ${D}${exec_prefix}/lib/tmpfiles.d/x11.conf - rm -r ${D}${sysconfdir}/X11 - fi - - # If polkit is setup fixup permissions and ownership - if ${@bb.utils.contains('PACKAGECONFIG', 'polkit', 'true', 'false', d)}; then - if [ -d ${D}${datadir}/polkit-1/rules.d ]; then - chmod 700 ${D}${datadir}/polkit-1/rules.d - chown polkitd:root ${D}${datadir}/polkit-1/rules.d - fi - fi - - # create link for existing udev rules - ln -s ${base_bindir}/udevadm ${D}${base_sbindir}/udevadm - - # duplicate udevadm for postinst script - install -d ${D}${libexecdir} - ln ${D}${base_bindir}/udevadm ${D}${libexecdir}/${MLPREFIX}udevadm - - # install default policy for presets - # https://www.freedesktop.org/wiki/Software/systemd/Preset/#howto - install -Dm 0644 ${WORKDIR}/99-default.preset ${D}${systemd_unitdir}/system-preset/99-default.preset -} - -python populate_packages_prepend (){ - systemdlibdir = d.getVar("rootlibdir") - do_split_packages(d, systemdlibdir, '^lib(.*)\.so\.*', 'lib%s', 'Systemd %s library', extra_depends='', allow_links=True) -} -PACKAGES_DYNAMIC += "^lib(udev|systemd|nss).*" - -PACKAGE_BEFORE_PN = "\ - ${PN}-gui \ - ${PN}-vconsole-setup \ - ${PN}-initramfs \ - ${PN}-analyze \ - ${PN}-kernel-install \ - ${PN}-rpm-macros \ - ${PN}-binfmt \ - ${PN}-zsh-completion \ - ${PN}-container \ - ${PN}-journal-gatewayd \ - ${PN}-journal-upload \ - ${PN}-journal-remote \ - ${PN}-extra-utils \ - udev \ - udev-hwdb \ -" - -SUMMARY_${PN}-container = "Tools for containers and VMs" -DESCRIPTION_${PN}-container = "Systemd tools to spawn and manage containers and virtual machines." - -SUMMARY_${PN}-journal-gatewayd = "HTTP server for journal events" -DESCRIPTION_${PN}-journal-gatewayd = "systemd-journal-gatewayd serves journal events over the network. Clients must connect using HTTP. The server listens on port 19531 by default." - -SUMMARY_${PN}-journal-upload = "Send journal messages over the network" -DESCRIPTION_${PN}-journal-upload = "systemd-journal-upload uploads journal entries to a specified URL." - -SUMMARY_${PN}-journal-remote = "Receive journal messages over the network" -DESCRIPTION_${PN}-journal-remote = "systemd-journal-remote is a command to receive serialized journal events and store them to journal files." - -SYSTEMD_PACKAGES = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', '${PN}-binfmt', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-gatewayd', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-remote', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'journal-upload', '${PN}-journal-upload', '', d)} \ -" -SYSTEMD_SERVICE_${PN}-binfmt = "systemd-binfmt.service" - -USERADD_PACKAGES = "${PN} ${PN}-extra-utils \ - ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-gateway', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-remote', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'journal-upload', '${PN}-journal-upload', '', d)} \ -" -GROUPADD_PARAM_${PN} = "-r systemd-journal" -USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'coredump', '--system -d / -M --shell /bin/nologin systemd-coredump;', '', d)}" -USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'networkd', '--system -d / -M --shell /bin/nologin systemd-network;', '', d)}" -USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'polkit', '--system --no-create-home --user-group --home-dir ${sysconfdir}/polkit-1 polkitd;', '', d)}" -USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'resolved', '--system -d / -M --shell /bin/nologin systemd-resolve;', '', d)}" -USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'timesyncd', '--system -d / -M --shell /bin/nologin systemd-timesync;', '', d)}" -USERADD_PARAM_${PN}-extra-utils = "--system -d / -M --shell /bin/nologin systemd-bus-proxy" -USERADD_PARAM_${PN}-journal-gateway = "--system -d / -M --shell /bin/nologin systemd-journal-gateway" -USERADD_PARAM_${PN}-journal-remote = "--system -d / -M --shell /bin/nologin systemd-journal-remote" -USERADD_PARAM_${PN}-journal-upload = "--system -d / -M --shell /bin/nologin systemd-journal-upload" - -FILES_${PN}-analyze = "${bindir}/systemd-analyze" - -FILES_${PN}-initramfs = "/init" -RDEPENDS_${PN}-initramfs = "${PN}" - -FILES_${PN}-gui = "${bindir}/systemadm" - -FILES_${PN}-vconsole-setup = "${rootlibexecdir}/systemd/systemd-vconsole-setup \ - ${systemd_unitdir}/system/systemd-vconsole-setup.service \ - ${systemd_unitdir}/system/sysinit.target.wants/systemd-vconsole-setup.service" - -RDEPENDS_${PN}-kernel-install += "bash" -FILES_${PN}-kernel-install = "${bindir}/kernel-install \ - ${sysconfdir}/kernel/ \ - ${exec_prefix}/lib/kernel \ - " -FILES_${PN}-rpm-macros = "${exec_prefix}/lib/rpm \ - " - -FILES_${PN}-zsh-completion = "${datadir}/zsh/site-functions" - -FILES_${PN}-binfmt = "${sysconfdir}/binfmt.d/ \ - ${exec_prefix}/lib/binfmt.d \ - ${rootlibexecdir}/systemd/systemd-binfmt \ - ${systemd_unitdir}/system/proc-sys-fs-binfmt_misc.* \ - ${systemd_unitdir}/system/systemd-binfmt.service" -RRECOMMENDS_${PN}-binfmt = "kernel-module-binfmt-misc" - -RRECOMMENDS_${PN}-vconsole-setup = "kbd kbd-consolefonts kbd-keymaps" - - -FILES_${PN}-journal-gatewayd = "${rootlibexecdir}/systemd/systemd-journal-gatewayd \ - ${systemd_system_unitdir}/systemd-journal-gatewayd.service \ - ${systemd_system_unitdir}/systemd-journal-gatewayd.socket \ - ${systemd_system_unitdir}/sockets.target.wants/systemd-journal-gatewayd.socket \ - ${datadir}/systemd/gatewayd/browse.html \ - " -SYSTEMD_SERVICE_${PN}-journal-gatewayd = "systemd-journal-gatewayd.socket" - -FILES_${PN}-journal-upload = "${rootlibexecdir}/systemd/systemd-journal-upload \ - ${systemd_system_unitdir}/systemd-journal-upload.service \ - ${sysconfdir}/systemd/journal-upload.conf \ - " -SYSTEMD_SERVICE_${PN}-journal-upload = "systemd-journal-upload.service" - -FILES_${PN}-journal-remote = "${rootlibexecdir}/systemd/systemd-journal-remote \ - ${sysconfdir}/systemd/journal-remote.conf \ - ${systemd_system_unitdir}/systemd-journal-remote.service \ - ${systemd_system_unitdir}/systemd-journal-remote.socket \ - " -SYSTEMD_SERVICE_${PN}-journal-remote = "systemd-journal-remote.socket" - - -FILES_${PN}-container = "${sysconfdir}/dbus-1/system.d/org.freedesktop.import1.conf \ - ${sysconfdir}/dbus-1/system.d/org.freedesktop.machine1.conf \ - ${sysconfdir}/systemd/system/multi-user.target.wants/machines.target \ - ${base_bindir}/machinectl \ - ${bindir}/systemd-nspawn \ - ${nonarch_libdir}/systemd/import-pubring.gpg \ - ${systemd_system_unitdir}/busnames.target.wants/org.freedesktop.import1.busname \ - ${systemd_system_unitdir}/busnames.target.wants/org.freedesktop.machine1.busname \ - ${systemd_system_unitdir}/local-fs.target.wants/var-lib-machines.mount \ - ${systemd_system_unitdir}/machines.target.wants/var-lib-machines.mount \ - ${systemd_system_unitdir}/remote-fs.target.wants/var-lib-machines.mount \ - ${systemd_system_unitdir}/machine.slice \ - ${systemd_system_unitdir}/machines.target \ - ${systemd_system_unitdir}/org.freedesktop.import1.busname \ - ${systemd_system_unitdir}/org.freedesktop.machine1.busname \ - ${systemd_system_unitdir}/systemd-importd.service \ - ${systemd_system_unitdir}/systemd-machined.service \ - ${systemd_system_unitdir}/dbus-org.freedesktop.machine1.service \ - ${systemd_system_unitdir}/var-lib-machines.mount \ - ${rootlibexecdir}/systemd/systemd-import \ - ${rootlibexecdir}/systemd/systemd-importd \ - ${rootlibexecdir}/systemd/systemd-machined \ - ${rootlibexecdir}/systemd/systemd-pull \ - ${exec_prefix}/lib/tmpfiles.d/systemd-nspawn.conf \ - ${systemd_system_unitdir}/systemd-nspawn@.service \ - ${libdir}/libnss_mymachines.so.2 \ - ${datadir}/dbus-1/system-services/org.freedesktop.import1.service \ - ${datadir}/dbus-1/system-services/org.freedesktop.machine1.service \ - ${datadir}/dbus-1/system.d/org.freedesktop.import1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.machine1.conf \ - ${datadir}/polkit-1/actions/org.freedesktop.import1.policy \ - ${datadir}/polkit-1/actions/org.freedesktop.machine1.policy \ - " - -RRECOMMENDS_${PN}-container += "\ - ${PN}-journal-upload \ - ${PN}-journal-remote \ - ${PN}-journal-gatewayd \ - " - -FILES_${PN}-extra-utils = "\ - ${base_bindir}/systemd-escape \ - ${base_bindir}/systemd-inhibit \ - ${bindir}/systemd-detect-virt \ - ${bindir}/systemd-path \ - ${bindir}/systemd-run \ - ${bindir}/systemd-cat \ - ${bindir}/systemd-delta \ - ${bindir}/systemd-cgls \ - ${bindir}/systemd-cgtop \ - ${bindir}/systemd-stdio-bridge \ - ${base_bindir}/systemd-ask-password \ - ${base_bindir}/systemd-tty-ask-password-agent \ - ${systemd_unitdir}/system/systemd-ask-password-console.path \ - ${systemd_unitdir}/system/systemd-ask-password-console.service \ - ${systemd_unitdir}/system/systemd-ask-password-wall.path \ - ${systemd_unitdir}/system/systemd-ask-password-wall.service \ - ${systemd_unitdir}/system/sysinit.target.wants/systemd-ask-password-console.path \ - ${systemd_unitdir}/system/sysinit.target.wants/systemd-ask-password-wall.path \ - ${systemd_unitdir}/system/multi-user.target.wants/systemd-ask-password-wall.path \ - ${rootlibexecdir}/systemd/systemd-resolve-host \ - ${rootlibexecdir}/systemd/systemd-ac-power \ - ${rootlibexecdir}/systemd/systemd-activate \ - ${rootlibexecdir}/systemd/systemd-bus-proxyd \ - ${systemd_unitdir}/system/systemd-bus-proxyd.service \ - ${systemd_unitdir}/system/systemd-bus-proxyd.socket \ - ${rootlibexecdir}/systemd/systemd-socket-proxyd \ - ${rootlibexecdir}/systemd/systemd-reply-password \ - ${rootlibexecdir}/systemd/systemd-sleep \ - ${rootlibexecdir}/systemd/system-sleep \ - ${systemd_unitdir}/system/systemd-hibernate.service \ - ${systemd_unitdir}/system/systemd-hybrid-sleep.service \ - ${systemd_unitdir}/system/systemd-suspend.service \ - ${systemd_unitdir}/system/sleep.target \ - ${rootlibexecdir}/systemd/systemd-initctl \ - ${systemd_unitdir}/system/systemd-initctl.service \ - ${systemd_unitdir}/system/systemd-initctl.socket \ - ${systemd_unitdir}/system/sockets.target.wants/systemd-initctl.socket \ - ${rootlibexecdir}/systemd/system-generators/systemd-gpt-auto-generator \ - ${rootlibexecdir}/systemd/systemd-cgroups-agent \ -" - -CONFFILES_${PN} = "${sysconfdir}/systemd/coredump.conf \ - ${sysconfdir}/systemd/journald.conf \ - ${sysconfdir}/systemd/logind.conf \ - ${sysconfdir}/systemd/networkd.conf \ - ${sysconfdir}/systemd/pstore.conf \ - ${sysconfdir}/systemd/resolved.conf \ - ${sysconfdir}/systemd/sleep.conf \ - ${sysconfdir}/systemd/system.conf \ - ${sysconfdir}/systemd/timesyncd.conf \ - ${sysconfdir}/systemd/user.conf \ -" - -FILES_${PN} = " ${base_bindir}/* \ - ${base_sbindir}/shutdown \ - ${base_sbindir}/halt \ - ${base_sbindir}/poweroff \ - ${base_sbindir}/runlevel \ - ${base_sbindir}/telinit \ - ${base_sbindir}/resolvconf \ - ${base_sbindir}/reboot \ - ${base_sbindir}/init \ - ${datadir}/dbus-1/services \ - ${datadir}/dbus-1/system-services \ - ${datadir}/polkit-1 \ - ${datadir}/${BPN} \ - ${datadir}/factory \ - ${sysconfdir}/dbus-1/ \ - ${sysconfdir}/modules-load.d/ \ - ${sysconfdir}/pam.d/ \ - ${sysconfdir}/sysctl.d/ \ - ${sysconfdir}/systemd/ \ - ${sysconfdir}/tmpfiles.d/ \ - ${sysconfdir}/xdg/ \ - ${sysconfdir}/init.d/README \ - ${sysconfdir}/resolv-conf.systemd \ - ${sysconfdir}/X11/xinit/xinitrc.d/* \ - ${rootlibexecdir}/systemd/* \ - ${systemd_unitdir}/* \ - ${base_libdir}/security/*.so \ - /cgroup \ - ${bindir}/systemd* \ - ${bindir}/busctl \ - ${bindir}/coredumpctl \ - ${bindir}/localectl \ - ${bindir}/hostnamectl \ - ${bindir}/resolvectl \ - ${bindir}/timedatectl \ - ${bindir}/bootctl \ - ${exec_prefix}/lib/tmpfiles.d/*.conf \ - ${exec_prefix}/lib/systemd \ - ${exec_prefix}/lib/modules-load.d \ - ${exec_prefix}/lib/sysctl.d \ - ${exec_prefix}/lib/sysusers.d \ - ${exec_prefix}/lib/environment.d \ - ${localstatedir} \ - ${rootlibexecdir}/udev/rules.d/70-uaccess.rules \ - ${rootlibexecdir}/udev/rules.d/71-seat.rules \ - ${rootlibexecdir}/udev/rules.d/73-seat-late.rules \ - ${rootlibexecdir}/udev/rules.d/99-systemd.rules \ - ${rootlibexecdir}/modprobe.d/systemd.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.timedate1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.locale1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.network1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.resolve1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.systemd1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.hostname1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.login1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.timesync1.conf \ - ${datadir}/dbus-1/system.d/org.freedesktop.portable1.conf \ - " - -FILES_${PN}-dev += "${base_libdir}/security/*.la ${datadir}/dbus-1/interfaces/ ${sysconfdir}/rpm/macros.systemd" - -RDEPENDS_${PN} += "kmod dbus util-linux-mount util-linux-umount udev (= ${EXTENDPKGV}) util-linux-agetty util-linux-fsck" -RDEPENDS_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'serial-getty-generator', '', 'systemd-serialgetty', d)}" -RDEPENDS_${PN} += "volatile-binds" - -RRECOMMENDS_${PN} += "systemd-extra-utils \ - udev-hwdb \ - e2fsprogs-e2fsck \ - kernel-module-autofs4 kernel-module-unix kernel-module-ipv6 kernel-module-sch-fq-codel \ - os-release \ - systemd-conf \ -" - -INSANE_SKIP_${PN} += "dev-so libdir" -INSANE_SKIP_${PN}-dbg += "libdir" -INSANE_SKIP_${PN}-doc += " libdir" - -RPROVIDES_udev = "hotplug" - -RDEPENDS_udev-hwdb += "udev" - -FILES_udev += "${base_sbindir}/udevd \ - ${rootlibexecdir}/systemd/network/99-default.link \ - ${rootlibexecdir}/systemd/systemd-udevd \ - ${rootlibexecdir}/udev/accelerometer \ - ${rootlibexecdir}/udev/ata_id \ - ${rootlibexecdir}/udev/cdrom_id \ - ${rootlibexecdir}/udev/collect \ - ${rootlibexecdir}/udev/fido_id \ - ${rootlibexecdir}/udev/findkeyboards \ - ${rootlibexecdir}/udev/keyboard-force-release.sh \ - ${rootlibexecdir}/udev/keymap \ - ${rootlibexecdir}/udev/mtd_probe \ - ${rootlibexecdir}/udev/scsi_id \ - ${rootlibexecdir}/udev/v4l_id \ - ${rootlibexecdir}/udev/keymaps \ - ${rootlibexecdir}/udev/rules.d/50-udev-default.rules \ - ${rootlibexecdir}/udev/rules.d/60-autosuspend-chromiumos.rules \ - ${rootlibexecdir}/udev/rules.d/60-block.rules \ - ${rootlibexecdir}/udev/rules.d/60-cdrom_id.rules \ - ${rootlibexecdir}/udev/rules.d/60-drm.rules \ - ${rootlibexecdir}/udev/rules.d/60-evdev.rules \ - ${rootlibexecdir}/udev/rules.d/60-fido-id.rules \ - ${rootlibexecdir}/udev/rules.d/60-input-id.rules \ - ${rootlibexecdir}/udev/rules.d/60-persistent-alsa.rules \ - ${rootlibexecdir}/udev/rules.d/60-persistent-input.rules \ - ${rootlibexecdir}/udev/rules.d/60-persistent-storage.rules \ - ${rootlibexecdir}/udev/rules.d/60-persistent-storage-tape.rules \ - ${rootlibexecdir}/udev/rules.d/60-persistent-v4l.rules \ - ${rootlibexecdir}/udev/rules.d/60-sensor.rules \ - ${rootlibexecdir}/udev/rules.d/60-serial.rules \ - ${rootlibexecdir}/udev/rules.d/61-autosuspend-manual.rules \ - ${rootlibexecdir}/udev/rules.d/64-btrfs.rules \ - ${rootlibexecdir}/udev/rules.d/70-joystick.rules \ - ${rootlibexecdir}/udev/rules.d/70-mouse.rules \ - ${rootlibexecdir}/udev/rules.d/70-power-switch.rules \ - ${rootlibexecdir}/udev/rules.d/70-touchpad.rules \ - ${rootlibexecdir}/udev/rules.d/75-net-description.rules \ - ${rootlibexecdir}/udev/rules.d/75-probe_mtd.rules \ - ${rootlibexecdir}/udev/rules.d/78-sound-card.rules \ - ${rootlibexecdir}/udev/rules.d/80-drivers.rules \ - ${rootlibexecdir}/udev/rules.d/80-net-setup-link.rules \ - ${rootlibexecdir}/udev/rules.d/90-vconsole.rules \ - ${sysconfdir}/udev \ - ${sysconfdir}/init.d/systemd-udevd \ - ${systemd_unitdir}/system/*udev* \ - ${systemd_unitdir}/system/*.wants/*udev* \ - ${base_bindir}/systemd-hwdb \ - ${base_bindir}/udevadm \ - ${base_sbindir}/udevadm \ - ${libexecdir}/${MLPREFIX}udevadm \ - ${datadir}/bash-completion/completions/udevadm \ - ${systemd_unitdir}/system/systemd-hwdb-update.service \ - " - -FILES_udev-hwdb = "${rootlibexecdir}/udev/hwdb.d \ - " - -RCONFLICTS_${PN} = "tiny-init ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'resolvconf', '', d)}" - -INITSCRIPT_PACKAGES = "udev" -INITSCRIPT_NAME_udev = "systemd-udevd" -INITSCRIPT_PARAMS_udev = "start 03 S ." - -python __anonymous() { - if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d): - d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1") -} - -python do_warn_musl() { - if d.getVar('TCLIBC') == "musl": - bb.warn("Using systemd with musl is not recommended since it is not supported upstream and some patches are known to be problematic.") -} -addtask warn_musl before do_configure - -ALTERNATIVE_${PN} = "halt reboot shutdown poweroff runlevel ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'resolv-conf', '', d)}" - -ALTERNATIVE_TARGET[resolv-conf] = "${sysconfdir}/resolv-conf.systemd" -ALTERNATIVE_LINK_NAME[resolv-conf] = "${sysconfdir}/resolv.conf" -ALTERNATIVE_PRIORITY[resolv-conf] ?= "50" - -ALTERNATIVE_TARGET[halt] = "${base_bindir}/systemctl" -ALTERNATIVE_LINK_NAME[halt] = "${base_sbindir}/halt" -ALTERNATIVE_PRIORITY[halt] ?= "300" - -ALTERNATIVE_TARGET[reboot] = "${base_bindir}/systemctl" -ALTERNATIVE_LINK_NAME[reboot] = "${base_sbindir}/reboot" -ALTERNATIVE_PRIORITY[reboot] ?= "300" - -ALTERNATIVE_TARGET[shutdown] = "${base_bindir}/systemctl" -ALTERNATIVE_LINK_NAME[shutdown] = "${base_sbindir}/shutdown" -ALTERNATIVE_PRIORITY[shutdown] ?= "300" - -ALTERNATIVE_TARGET[poweroff] = "${base_bindir}/systemctl" -ALTERNATIVE_LINK_NAME[poweroff] = "${base_sbindir}/poweroff" -ALTERNATIVE_PRIORITY[poweroff] ?= "300" - -ALTERNATIVE_TARGET[runlevel] = "${base_bindir}/systemctl" -ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel" -ALTERNATIVE_PRIORITY[runlevel] ?= "300" - -pkg_postinst_${PN}_libc-glibc () { - sed -e '/^hosts:/s/\s*\//' \ - -e 's/\(^hosts:.*\)\(\\)\(.*\)\(\\)\(.*\)/\1\2 myhostname \3\4\5/' \ - -i $D${sysconfdir}/nsswitch.conf -} - -pkg_prerm_${PN}_libc-glibc () { - sed -e '/^hosts:/s/\s*\//' \ - -e '/^hosts:/s/\s*myhostname//' \ - -i $D${sysconfdir}/nsswitch.conf -} - -PACKAGE_WRITE_DEPS += "qemu-native" -pkg_postinst_udev-hwdb () { - if test -n "$D"; then - $INTERCEPT_DIR/postinst_intercept update_udev_hwdb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} rootlibexecdir="${rootlibexecdir}" PREFERRED_PROVIDER_udev="${PREFERRED_PROVIDER_udev}" - else - udevadm hwdb --update - fi -} - -pkg_prerm_udev-hwdb () { - rm -f $D${sysconfdir}/udev/hwdb.bin -} diff --git a/poky/meta/recipes-core/systemd/systemd_246.2.bb b/poky/meta/recipes-core/systemd/systemd_246.2.bb new file mode 100644 index 000000000..704a36e75 --- /dev/null +++ b/poky/meta/recipes-core/systemd/systemd_246.2.bb @@ -0,0 +1,719 @@ +require systemd.inc + +PROVIDES = "udev" + +PE = "1" + +DEPENDS = "intltool-native gperf-native libcap util-linux" + +SECTION = "base/shell" + +inherit useradd pkgconfig meson perlnative update-rc.d update-alternatives qemu systemd gettext bash-completion manpages features_check + +# As this recipe builds udev, respect systemd being in DISTRO_FEATURES so +# that we don't build both udev and systemd in world builds. +REQUIRED_DISTRO_FEATURES = "systemd" + +SRC_URI += "file://touchscreen.rules \ + file://00-create-volatile.conf \ + file://init \ + file://99-default.preset \ + file://0001-binfmt-Don-t-install-dependency-links-at-install-tim.patch \ + file://0003-implment-systemd-sysv-install-for-OE.patch \ + file://0001-Use-PREFIX-ROOTPREFIX-correctly.patch \ + " + +# patches needed by musl +SRC_URI_append_libc-musl = " ${SRC_URI_MUSL}" +SRC_URI_MUSL = "\ + file://0002-don-t-use-glibc-specific-qsort_r.patch \ + file://0003-missing_type.h-add-__compare_fn_t-and-comparison_fn_.patch \ + file://0004-add-fallback-parse_printf_format-implementation.patch \ + file://0005-src-basic-missing.h-check-for-missing-strndupa.patch \ + file://0006-Include-netinet-if_ether.h.patch \ + file://0007-don-t-fail-if-GLOB_BRACE-and-GLOB_ALTDIRFUNC-is-not.patch \ + file://0008-add-missing-FTW_-macros-for-musl.patch \ + file://0010-fix-missing-of-__register_atfork-for-non-glibc-build.patch \ + file://0011-Use-uintmax_t-for-handling-rlim_t.patch \ + file://0014-test-sizeof.c-Disable-tests-for-missing-typedefs-in-.patch \ + file://0015-don-t-pass-AT_SYMLINK_NOFOLLOW-flag-to-faccessat.patch \ + file://0016-Define-glibc-compatible-basename-for-non-glibc-syste.patch \ + file://0017-Do-not-disable-buffering-when-writing-to-oom_score_a.patch \ + file://0018-distinguish-XSI-compliant-strerror_r-from-GNU-specif.patch \ + file://0019-Hide-__start_BUS_ERROR_MAP-and-__stop_BUS_ERROR_MAP.patch \ + file://0020-missing_type.h-add-__compar_d_fn_t-definition.patch \ + file://0021-avoid-redefinition-of-prctl_mm_map-structure.patch \ + file://0021-Handle-missing-LOCK_EX.patch \ + file://0022-Fix-incompatible-pointer-type-struct-sockaddr_un.patch \ + file://0023-Fix-field-efi_loader_entry_one_shot_stat-has-incompl.patch \ + file://0024-test-json.c-define-M_PIl.patch \ + file://0001-do-not-disable-buffer-in-writing-files.patch \ + file://0002-src-login-brightness.c-include-sys-wait.h.patch \ + file://0003-src-basic-copy.c-include-signal.h.patch \ + file://0004-src-shared-cpu-set-util.h-add-__cpu_mask-definition.patch \ + file://0001-Handle-missing-gshadow.patch \ + " + +PAM_PLUGINS = " \ + pam-plugin-unix \ + pam-plugin-loginuid \ + pam-plugin-keyinit \ +" + +PACKAGECONFIG ??= " \ + ${@bb.utils.filter('DISTRO_FEATURES', 'acl audit efi ldconfig pam selinux smack usrmerge polkit', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'wifi', 'rfkill', '', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'xkbcommon', '', d)} \ + backlight \ + binfmt \ + gshadow \ + hibernate \ + hostnamed \ + idn \ + ima \ + kmod \ + localed \ + logind \ + machined \ + myhostname \ + networkd \ + nss \ + nss-mymachines \ + nss-resolve \ + quotacheck \ + randomseed \ + resolved \ + set-time-epoch \ + sysusers \ + sysvinit \ + timedated \ + timesyncd \ + userdb \ + utmp \ + vconsole \ + xz \ +" + +PACKAGECONFIG_remove_libc-musl = " \ + gshadow \ + idn \ + localed \ + myhostname \ + nss \ + nss-mymachines \ + nss-resolve \ + sysusers \ + userdb \ + utmp \ +" + +CFLAGS_append_libc-musl = " -D__UAPI_DEF_ETHHDR=0 " + +# Use the upstream systemd serial-getty@.service and rely on +# systemd-getty-generator instead of using the OE-core specific +# systemd-serialgetty.bb - not enabled by default. +PACKAGECONFIG[serial-getty-generator] = "" + +PACKAGECONFIG[acl] = "-Dacl=true,-Dacl=false,acl" +PACKAGECONFIG[audit] = "-Daudit=true,-Daudit=false,audit" +PACKAGECONFIG[backlight] = "-Dbacklight=true,-Dbacklight=false" +PACKAGECONFIG[binfmt] = "-Dbinfmt=true,-Dbinfmt=false" +PACKAGECONFIG[bzip2] = "-Dbzip2=true,-Dbzip2=false,bzip2" +PACKAGECONFIG[cgroupv2] = "-Ddefault-hierarchy=unified,-Ddefault-hierarchy=hybrid" +PACKAGECONFIG[coredump] = "-Dcoredump=true,-Dcoredump=false" +PACKAGECONFIG[cryptsetup] = "-Dlibcryptsetup=true,-Dlibcryptsetup=false,cryptsetup" +PACKAGECONFIG[dbus] = "-Ddbus=true,-Ddbus=false,dbus" +PACKAGECONFIG[efi] = "-Defi=true,-Defi=false" +PACKAGECONFIG[gnu-efi] = "-Dgnu-efi=true -Defi-libdir=${STAGING_LIBDIR} -Defi-includedir=${STAGING_INCDIR}/efi,-Dgnu-efi=false,gnu-efi" +PACKAGECONFIG[elfutils] = "-Delfutils=true,-Delfutils=false,elfutils" +PACKAGECONFIG[firstboot] = "-Dfirstboot=true,-Dfirstboot=false" +# Sign the journal for anti-tampering +PACKAGECONFIG[gcrypt] = "-Dgcrypt=true,-Dgcrypt=false,libgcrypt" +PACKAGECONFIG[gnutls] = "-Dgnutls=true,-Dgnutls=false,gnutls" +PACKAGECONFIG[gshadow] = "-Dgshadow=true,-Dgshadow=false" +PACKAGECONFIG[hibernate] = "-Dhibernate=true,-Dhibernate=false" +PACKAGECONFIG[hostnamed] = "-Dhostnamed=true,-Dhostnamed=false" +PACKAGECONFIG[idn] = "-Didn=true,-Didn=false" +PACKAGECONFIG[ima] = "-Dima=true,-Dima=false" +# importd requires curl/xz/zlib/bzip2/gcrypt +PACKAGECONFIG[importd] = "-Dimportd=true,-Dimportd=false" +# Update NAT firewall rules +PACKAGECONFIG[iptc] = "-Dlibiptc=true,-Dlibiptc=false,iptables" +PACKAGECONFIG[journal-upload] = "-Dlibcurl=true,-Dlibcurl=false,curl" +PACKAGECONFIG[kmod] = "-Dkmod=true,-Dkmod=false,kmod" +PACKAGECONFIG[ldconfig] = "-Dldconfig=true,-Dldconfig=false,,ldconfig" +PACKAGECONFIG[libidn] = "-Dlibidn=true,-Dlibidn=false,libidn" +PACKAGECONFIG[libidn2] = "-Dlibidn2=true,-Dlibidn2=false,libidn2" +PACKAGECONFIG[localed] = "-Dlocaled=true,-Dlocaled=false" +PACKAGECONFIG[logind] = "-Dlogind=true,-Dlogind=false" +PACKAGECONFIG[lz4] = "-Dlz4=true,-Dlz4=false,lz4" +PACKAGECONFIG[machined] = "-Dmachined=true,-Dmachined=false" +PACKAGECONFIG[manpages] = "-Dman=true,-Dman=false,libxslt-native xmlto-native docbook-xml-dtd4-native docbook-xsl-stylesheets-native" +PACKAGECONFIG[microhttpd] = "-Dmicrohttpd=true,-Dmicrohttpd=false,libmicrohttpd" +PACKAGECONFIG[myhostname] = "-Dnss-myhostname=true,-Dnss-myhostname=false,,libnss-myhostname" +PACKAGECONFIG[networkd] = "-Dnetworkd=true,-Dnetworkd=false" +PACKAGECONFIG[nss] = "-Dnss-systemd=true,-Dnss-systemd=false" +PACKAGECONFIG[nss-mymachines] = "-Dnss-mymachines=true,-Dnss-mymachines=false" +PACKAGECONFIG[nss-resolve] = "-Dnss-resolve=true,-Dnss-resolve=false" +PACKAGECONFIG[openssl] = "-Dopenssl=true,-Dopenssl=false,openssl" +PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam,${PAM_PLUGINS}" +PACKAGECONFIG[pcre2] = "-Dpcre2=true,-Dpcre2=false,libpcre2" +PACKAGECONFIG[polkit] = "-Dpolkit=true,-Dpolkit=false" +PACKAGECONFIG[portabled] = "-Dportabled=true,-Dportabled=false" +PACKAGECONFIG[qrencode] = "-Dqrencode=true,-Dqrencode=false,qrencode" +PACKAGECONFIG[quotacheck] = "-Dquotacheck=true,-Dquotacheck=false" +PACKAGECONFIG[randomseed] = "-Drandomseed=true,-Drandomseed=false" +PACKAGECONFIG[resolved] = "-Dresolve=true,-Dresolve=false" +PACKAGECONFIG[rfkill] = "-Drfkill=true,-Drfkill=false" +# libseccomp is found in meta-security +PACKAGECONFIG[seccomp] = "-Dseccomp=true,-Dseccomp=false,libseccomp" +PACKAGECONFIG[selinux] = "-Dselinux=true,-Dselinux=false,libselinux,initscripts-sushell" +PACKAGECONFIG[smack] = "-Dsmack=true,-Dsmack=false" +PACKAGECONFIG[sysusers] = "-Dsysusers=true,-Dsysusers=false" +PACKAGECONFIG[sysvinit] = "-Dsysvinit-path=${sysconfdir}/init.d -Dsysvrcnd-path=${sysconfdir},-Dsysvinit-path= -Dsysvrcnd-path=,,systemd-compat-units update-rc.d" +# When enabled use reproducble build timestamp if set as time epoch, +# or build time if not. When disabled, time epoch is unset. +def build_epoch(d): + epoch = d.getVar('SOURCE_DATE_EPOCH') or "-1" + return '-Dtime-epoch=%d' % int(epoch) +PACKAGECONFIG[set-time-epoch] = "${@build_epoch(d)},-Dtime-epoch=0" +PACKAGECONFIG[timedated] = "-Dtimedated=true,-Dtimedated=false" +PACKAGECONFIG[timesyncd] = "-Dtimesyncd=true,-Dtimesyncd=false" +PACKAGECONFIG[usrmerge] = "-Dsplit-usr=false,-Dsplit-usr=true" +PACKAGECONFIG[sbinmerge] = "-Dsplit-bin=false,-Dsplit-bin=true" +PACKAGECONFIG[userdb] = "-Duserdb=true,-Duserdb=false" +PACKAGECONFIG[utmp] = "-Dutmp=true,-Dutmp=false" +PACKAGECONFIG[valgrind] = "-DVALGRIND=1,,valgrind" +PACKAGECONFIG[vconsole] = "-Dvconsole=true,-Dvconsole=false,,${PN}-vconsole-setup" +# Verify keymaps on locale change +PACKAGECONFIG[xkbcommon] = "-Dxkbcommon=true,-Dxkbcommon=false,libxkbcommon" +PACKAGECONFIG[xz] = "-Dxz=true,-Dxz=false,xz" +PACKAGECONFIG[zlib] = "-Dzlib=true,-Dzlib=false,zlib" + +# Helper variables to clarify locations. This mirrors the logic in systemd's +# build system. +rootprefix ?= "${root_prefix}" +rootlibdir ?= "${base_libdir}" +rootlibexecdir = "${rootprefix}/lib" + +# This links udev statically with systemd helper library. +# Otherwise udev package would depend on systemd package (which has the needed shared library), +# and always pull it into images. +EXTRA_OEMESON += "-Dlink-udev-shared=false" + +EXTRA_OEMESON += "-Dnobody-user=nobody \ + -Dnobody-group=nobody \ + -Drootlibdir=${rootlibdir} \ + -Drootprefix=${rootprefix} \ + -Ddefault-locale=C \ + " + +# Hardcode target binary paths to avoid using paths from sysroot +EXTRA_OEMESON += "-Dkexec-path=${sbindir}/kexec \ + -Dkmod-path=${base_bindir}/kmod \ + -Dmount-path=${base_bindir}/mount \ + -Dquotacheck-path=${sbindir}/quotacheck \ + -Dquotaon-path=${sbindir}/quotaon \ + -Dsulogin-path=${base_sbindir}/sulogin \ + -Dnologin-path=${base_sbindir}/nologin \ + -Dumount-path=${base_bindir}/umount" + +do_install() { + meson_do_install + install -d ${D}/${base_sbindir} + if ${@bb.utils.contains('PACKAGECONFIG', 'serial-getty-generator', 'false', 'true', d)}; then + # Provided by a separate recipe + rm ${D}${systemd_unitdir}/system/serial-getty* -f + fi + + # Provide support for initramfs + [ ! -e ${D}/init ] && ln -s ${rootlibexecdir}/systemd/systemd ${D}/init + [ ! -e ${D}/${base_sbindir}/udevd ] && ln -s ${rootlibexecdir}/systemd/systemd-udevd ${D}/${base_sbindir}/udevd + + install -d ${D}${sysconfdir}/udev/rules.d/ + install -d ${D}${sysconfdir}/tmpfiles.d + install -m 0644 ${WORKDIR}/*.rules ${D}${sysconfdir}/udev/rules.d/ + + install -m 0644 ${WORKDIR}/00-create-volatile.conf ${D}${sysconfdir}/tmpfiles.d/ + + if ${@bb.utils.contains('DISTRO_FEATURES','sysvinit','true','false',d)}; then + install -d ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/init ${D}${sysconfdir}/init.d/systemd-udevd + sed -i s%@UDEVD@%${rootlibexecdir}/systemd/systemd-udevd% ${D}${sysconfdir}/init.d/systemd-udevd + install -Dm 0755 ${S}/src/systemctl/systemd-sysv-install.SKELETON ${D}${systemd_unitdir}/systemd-sysv-install + fi + + chown root:systemd-journal ${D}/${localstatedir}/log/journal + + # Delete journal README, as log can be symlinked inside volatile. + rm -f ${D}/${localstatedir}/log/README + + # journal-remote creates this at start + rm -rf ${D}/${localstatedir}/log/journal/remote + + install -d ${D}${systemd_unitdir}/system/graphical.target.wants + install -d ${D}${systemd_unitdir}/system/multi-user.target.wants + install -d ${D}${systemd_unitdir}/system/poweroff.target.wants + install -d ${D}${systemd_unitdir}/system/reboot.target.wants + install -d ${D}${systemd_unitdir}/system/rescue.target.wants + + # Create symlinks for systemd-update-utmp-runlevel.service + if ${@bb.utils.contains('PACKAGECONFIG', 'utmp', 'true', 'false', d)}; then + ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/graphical.target.wants/systemd-update-utmp-runlevel.service + ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/multi-user.target.wants/systemd-update-utmp-runlevel.service + ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/poweroff.target.wants/systemd-update-utmp-runlevel.service + ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/reboot.target.wants/systemd-update-utmp-runlevel.service + ln -sf ../systemd-update-utmp-runlevel.service ${D}${systemd_unitdir}/system/rescue.target.wants/systemd-update-utmp-runlevel.service + fi + + # this file is needed to exist if networkd is disabled but timesyncd is still in use since timesyncd checks it + # for existence else it fails + if [ -s ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf ]; then + ${@bb.utils.contains('PACKAGECONFIG', 'networkd', ':', 'sed -i -e "\$ad /run/systemd/netif/links 0755 root root -" ${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf', d)} + fi + if ! ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'true', 'false', d)}; then + echo 'L! ${sysconfdir}/resolv.conf - - - - ../run/systemd/resolve/resolv.conf' >>${D}${exec_prefix}/lib/tmpfiles.d/etc.conf + echo 'd /run/systemd/resolve 0755 root root -' >>${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf + echo 'f /run/systemd/resolve/resolv.conf 0644 root root' >>${D}${exec_prefix}/lib/tmpfiles.d/systemd.conf + ln -s ../run/systemd/resolve/resolv.conf ${D}${sysconfdir}/resolv-conf.systemd + else + sed -i -e "s%^L! /etc/resolv.conf.*$%L! /etc/resolv.conf - - - - ../run/systemd/resolve/resolv.conf%g" ${D}${exec_prefix}/lib/tmpfiles.d/etc.conf + ln -s ../run/systemd/resolve/resolv.conf ${D}${sysconfdir}/resolv-conf.systemd + fi + if ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'false', 'true', d)}; then + rm ${D}${exec_prefix}/lib/tmpfiles.d/x11.conf + rm -r ${D}${sysconfdir}/X11 + fi + + # If polkit is setup fixup permissions and ownership + if ${@bb.utils.contains('PACKAGECONFIG', 'polkit', 'true', 'false', d)}; then + if [ -d ${D}${datadir}/polkit-1/rules.d ]; then + chmod 700 ${D}${datadir}/polkit-1/rules.d + chown polkitd:root ${D}${datadir}/polkit-1/rules.d + fi + fi + + # create link for existing udev rules + ln -s ${base_bindir}/udevadm ${D}${base_sbindir}/udevadm + + # duplicate udevadm for postinst script + install -d ${D}${libexecdir} + ln ${D}${base_bindir}/udevadm ${D}${libexecdir}/${MLPREFIX}udevadm + + # install default policy for presets + # https://www.freedesktop.org/wiki/Software/systemd/Preset/#howto + install -Dm 0644 ${WORKDIR}/99-default.preset ${D}${systemd_unitdir}/system-preset/99-default.preset +} + +python populate_packages_prepend (){ + systemdlibdir = d.getVar("rootlibdir") + do_split_packages(d, systemdlibdir, '^lib(.*)\.so\.*', 'lib%s', 'Systemd %s library', extra_depends='', allow_links=True) +} +PACKAGES_DYNAMIC += "^lib(udev|systemd|nss).*" + +PACKAGE_BEFORE_PN = "\ + ${PN}-gui \ + ${PN}-vconsole-setup \ + ${PN}-initramfs \ + ${PN}-analyze \ + ${PN}-kernel-install \ + ${PN}-rpm-macros \ + ${PN}-binfmt \ + ${PN}-zsh-completion \ + ${PN}-container \ + ${PN}-journal-gatewayd \ + ${PN}-journal-upload \ + ${PN}-journal-remote \ + ${PN}-extra-utils \ + udev \ + udev-hwdb \ +" + +SUMMARY_${PN}-container = "Tools for containers and VMs" +DESCRIPTION_${PN}-container = "Systemd tools to spawn and manage containers and virtual machines." + +SUMMARY_${PN}-journal-gatewayd = "HTTP server for journal events" +DESCRIPTION_${PN}-journal-gatewayd = "systemd-journal-gatewayd serves journal events over the network. Clients must connect using HTTP. The server listens on port 19531 by default." + +SUMMARY_${PN}-journal-upload = "Send journal messages over the network" +DESCRIPTION_${PN}-journal-upload = "systemd-journal-upload uploads journal entries to a specified URL." + +SUMMARY_${PN}-journal-remote = "Receive journal messages over the network" +DESCRIPTION_${PN}-journal-remote = "systemd-journal-remote is a command to receive serialized journal events and store them to journal files." + +SYSTEMD_PACKAGES = "${@bb.utils.contains('PACKAGECONFIG', 'binfmt', '${PN}-binfmt', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-gatewayd', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-remote', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'journal-upload', '${PN}-journal-upload', '', d)} \ +" +SYSTEMD_SERVICE_${PN}-binfmt = "systemd-binfmt.service" + +USERADD_PACKAGES = "${PN} ${PN}-extra-utils \ + ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-gateway', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'microhttpd', '${PN}-journal-remote', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'journal-upload', '${PN}-journal-upload', '', d)} \ +" +GROUPADD_PARAM_${PN} = "-r systemd-journal" +USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'coredump', '--system -d / -M --shell /bin/nologin systemd-coredump;', '', d)}" +USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'networkd', '--system -d / -M --shell /bin/nologin systemd-network;', '', d)}" +USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'polkit', '--system --no-create-home --user-group --home-dir ${sysconfdir}/polkit-1 polkitd;', '', d)}" +USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'resolved', '--system -d / -M --shell /bin/nologin systemd-resolve;', '', d)}" +USERADD_PARAM_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'timesyncd', '--system -d / -M --shell /bin/nologin systemd-timesync;', '', d)}" +USERADD_PARAM_${PN}-extra-utils = "--system -d / -M --shell /bin/nologin systemd-bus-proxy" +USERADD_PARAM_${PN}-journal-gateway = "--system -d / -M --shell /bin/nologin systemd-journal-gateway" +USERADD_PARAM_${PN}-journal-remote = "--system -d / -M --shell /bin/nologin systemd-journal-remote" +USERADD_PARAM_${PN}-journal-upload = "--system -d / -M --shell /bin/nologin systemd-journal-upload" + +FILES_${PN}-analyze = "${bindir}/systemd-analyze" + +FILES_${PN}-initramfs = "/init" +RDEPENDS_${PN}-initramfs = "${PN}" + +FILES_${PN}-gui = "${bindir}/systemadm" + +FILES_${PN}-vconsole-setup = "${rootlibexecdir}/systemd/systemd-vconsole-setup \ + ${systemd_unitdir}/system/systemd-vconsole-setup.service \ + ${systemd_unitdir}/system/sysinit.target.wants/systemd-vconsole-setup.service" + +RDEPENDS_${PN}-kernel-install += "bash" +FILES_${PN}-kernel-install = "${bindir}/kernel-install \ + ${sysconfdir}/kernel/ \ + ${exec_prefix}/lib/kernel \ + " +FILES_${PN}-rpm-macros = "${exec_prefix}/lib/rpm \ + " + +FILES_${PN}-zsh-completion = "${datadir}/zsh/site-functions" + +FILES_${PN}-binfmt = "${sysconfdir}/binfmt.d/ \ + ${exec_prefix}/lib/binfmt.d \ + ${rootlibexecdir}/systemd/systemd-binfmt \ + ${systemd_unitdir}/system/proc-sys-fs-binfmt_misc.* \ + ${systemd_unitdir}/system/systemd-binfmt.service" +RRECOMMENDS_${PN}-binfmt = "kernel-module-binfmt-misc" + +RRECOMMENDS_${PN}-vconsole-setup = "kbd kbd-consolefonts kbd-keymaps" + + +FILES_${PN}-journal-gatewayd = "${rootlibexecdir}/systemd/systemd-journal-gatewayd \ + ${systemd_system_unitdir}/systemd-journal-gatewayd.service \ + ${systemd_system_unitdir}/systemd-journal-gatewayd.socket \ + ${systemd_system_unitdir}/sockets.target.wants/systemd-journal-gatewayd.socket \ + ${datadir}/systemd/gatewayd/browse.html \ + " +SYSTEMD_SERVICE_${PN}-journal-gatewayd = "systemd-journal-gatewayd.socket" + +FILES_${PN}-journal-upload = "${rootlibexecdir}/systemd/systemd-journal-upload \ + ${systemd_system_unitdir}/systemd-journal-upload.service \ + ${sysconfdir}/systemd/journal-upload.conf \ + " +SYSTEMD_SERVICE_${PN}-journal-upload = "systemd-journal-upload.service" + +FILES_${PN}-journal-remote = "${rootlibexecdir}/systemd/systemd-journal-remote \ + ${sysconfdir}/systemd/journal-remote.conf \ + ${systemd_system_unitdir}/systemd-journal-remote.service \ + ${systemd_system_unitdir}/systemd-journal-remote.socket \ + " +SYSTEMD_SERVICE_${PN}-journal-remote = "systemd-journal-remote.socket" + + +FILES_${PN}-container = "${sysconfdir}/dbus-1/system.d/org.freedesktop.import1.conf \ + ${sysconfdir}/dbus-1/system.d/org.freedesktop.machine1.conf \ + ${sysconfdir}/systemd/system/multi-user.target.wants/machines.target \ + ${base_bindir}/machinectl \ + ${bindir}/systemd-nspawn \ + ${nonarch_libdir}/systemd/import-pubring.gpg \ + ${systemd_system_unitdir}/busnames.target.wants/org.freedesktop.import1.busname \ + ${systemd_system_unitdir}/busnames.target.wants/org.freedesktop.machine1.busname \ + ${systemd_system_unitdir}/local-fs.target.wants/var-lib-machines.mount \ + ${systemd_system_unitdir}/machines.target.wants/var-lib-machines.mount \ + ${systemd_system_unitdir}/remote-fs.target.wants/var-lib-machines.mount \ + ${systemd_system_unitdir}/machine.slice \ + ${systemd_system_unitdir}/machines.target \ + ${systemd_system_unitdir}/org.freedesktop.import1.busname \ + ${systemd_system_unitdir}/org.freedesktop.machine1.busname \ + ${systemd_system_unitdir}/systemd-importd.service \ + ${systemd_system_unitdir}/systemd-machined.service \ + ${systemd_system_unitdir}/dbus-org.freedesktop.machine1.service \ + ${systemd_system_unitdir}/var-lib-machines.mount \ + ${rootlibexecdir}/systemd/systemd-import \ + ${rootlibexecdir}/systemd/systemd-importd \ + ${rootlibexecdir}/systemd/systemd-machined \ + ${rootlibexecdir}/systemd/systemd-pull \ + ${exec_prefix}/lib/tmpfiles.d/systemd-nspawn.conf \ + ${systemd_system_unitdir}/systemd-nspawn@.service \ + ${libdir}/libnss_mymachines.so.2 \ + ${datadir}/dbus-1/system-services/org.freedesktop.import1.service \ + ${datadir}/dbus-1/system-services/org.freedesktop.machine1.service \ + ${datadir}/dbus-1/system.d/org.freedesktop.import1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.machine1.conf \ + ${datadir}/polkit-1/actions/org.freedesktop.import1.policy \ + ${datadir}/polkit-1/actions/org.freedesktop.machine1.policy \ + " + +RRECOMMENDS_${PN}-container += "\ + ${PN}-journal-upload \ + ${PN}-journal-remote \ + ${PN}-journal-gatewayd \ + " + +FILES_${PN}-extra-utils = "\ + ${base_bindir}/systemd-escape \ + ${base_bindir}/systemd-inhibit \ + ${bindir}/systemd-detect-virt \ + ${bindir}/systemd-path \ + ${bindir}/systemd-run \ + ${bindir}/systemd-cat \ + ${bindir}/systemd-delta \ + ${bindir}/systemd-cgls \ + ${bindir}/systemd-cgtop \ + ${bindir}/systemd-stdio-bridge \ + ${base_bindir}/systemd-ask-password \ + ${base_bindir}/systemd-tty-ask-password-agent \ + ${systemd_unitdir}/system/systemd-ask-password-console.path \ + ${systemd_unitdir}/system/systemd-ask-password-console.service \ + ${systemd_unitdir}/system/systemd-ask-password-wall.path \ + ${systemd_unitdir}/system/systemd-ask-password-wall.service \ + ${systemd_unitdir}/system/sysinit.target.wants/systemd-ask-password-console.path \ + ${systemd_unitdir}/system/sysinit.target.wants/systemd-ask-password-wall.path \ + ${systemd_unitdir}/system/multi-user.target.wants/systemd-ask-password-wall.path \ + ${rootlibexecdir}/systemd/systemd-resolve-host \ + ${rootlibexecdir}/systemd/systemd-ac-power \ + ${rootlibexecdir}/systemd/systemd-activate \ + ${rootlibexecdir}/systemd/systemd-bus-proxyd \ + ${systemd_unitdir}/system/systemd-bus-proxyd.service \ + ${systemd_unitdir}/system/systemd-bus-proxyd.socket \ + ${rootlibexecdir}/systemd/systemd-socket-proxyd \ + ${rootlibexecdir}/systemd/systemd-reply-password \ + ${rootlibexecdir}/systemd/systemd-sleep \ + ${rootlibexecdir}/systemd/system-sleep \ + ${systemd_unitdir}/system/systemd-hibernate.service \ + ${systemd_unitdir}/system/systemd-hybrid-sleep.service \ + ${systemd_unitdir}/system/systemd-suspend.service \ + ${systemd_unitdir}/system/sleep.target \ + ${rootlibexecdir}/systemd/systemd-initctl \ + ${systemd_unitdir}/system/systemd-initctl.service \ + ${systemd_unitdir}/system/systemd-initctl.socket \ + ${systemd_unitdir}/system/sockets.target.wants/systemd-initctl.socket \ + ${rootlibexecdir}/systemd/system-generators/systemd-gpt-auto-generator \ + ${rootlibexecdir}/systemd/systemd-cgroups-agent \ +" + +CONFFILES_${PN} = "${sysconfdir}/systemd/coredump.conf \ + ${sysconfdir}/systemd/journald.conf \ + ${sysconfdir}/systemd/logind.conf \ + ${sysconfdir}/systemd/networkd.conf \ + ${sysconfdir}/systemd/pstore.conf \ + ${sysconfdir}/systemd/resolved.conf \ + ${sysconfdir}/systemd/sleep.conf \ + ${sysconfdir}/systemd/system.conf \ + ${sysconfdir}/systemd/timesyncd.conf \ + ${sysconfdir}/systemd/user.conf \ +" + +FILES_${PN} = " ${base_bindir}/* \ + ${base_sbindir}/shutdown \ + ${base_sbindir}/halt \ + ${base_sbindir}/poweroff \ + ${base_sbindir}/runlevel \ + ${base_sbindir}/telinit \ + ${base_sbindir}/resolvconf \ + ${base_sbindir}/reboot \ + ${base_sbindir}/init \ + ${datadir}/dbus-1/services \ + ${datadir}/dbus-1/system-services \ + ${datadir}/polkit-1 \ + ${datadir}/${BPN} \ + ${datadir}/factory \ + ${sysconfdir}/dbus-1/ \ + ${sysconfdir}/modules-load.d/ \ + ${sysconfdir}/pam.d/ \ + ${sysconfdir}/sysctl.d/ \ + ${sysconfdir}/systemd/ \ + ${sysconfdir}/tmpfiles.d/ \ + ${sysconfdir}/xdg/ \ + ${sysconfdir}/init.d/README \ + ${sysconfdir}/resolv-conf.systemd \ + ${sysconfdir}/X11/xinit/xinitrc.d/* \ + ${rootlibexecdir}/systemd/* \ + ${systemd_unitdir}/* \ + ${base_libdir}/security/*.so \ + /cgroup \ + ${bindir}/systemd* \ + ${bindir}/busctl \ + ${bindir}/coredumpctl \ + ${bindir}/localectl \ + ${bindir}/hostnamectl \ + ${bindir}/resolvectl \ + ${bindir}/timedatectl \ + ${bindir}/bootctl \ + ${exec_prefix}/lib/tmpfiles.d/*.conf \ + ${exec_prefix}/lib/systemd \ + ${exec_prefix}/lib/modules-load.d \ + ${exec_prefix}/lib/sysctl.d \ + ${exec_prefix}/lib/sysusers.d \ + ${exec_prefix}/lib/environment.d \ + ${localstatedir} \ + ${rootlibexecdir}/udev/rules.d/70-uaccess.rules \ + ${rootlibexecdir}/udev/rules.d/71-seat.rules \ + ${rootlibexecdir}/udev/rules.d/73-seat-late.rules \ + ${rootlibexecdir}/udev/rules.d/99-systemd.rules \ + ${rootlibexecdir}/modprobe.d/systemd.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.timedate1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.locale1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.network1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.resolve1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.systemd1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.hostname1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.login1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.timesync1.conf \ + ${datadir}/dbus-1/system.d/org.freedesktop.portable1.conf \ + " + +FILES_${PN}-dev += "${base_libdir}/security/*.la ${datadir}/dbus-1/interfaces/ ${sysconfdir}/rpm/macros.systemd" + +RDEPENDS_${PN} += "kmod dbus util-linux-mount util-linux-umount udev (= ${EXTENDPKGV}) util-linux-agetty util-linux-fsck" +RDEPENDS_${PN} += "${@bb.utils.contains('PACKAGECONFIG', 'serial-getty-generator', '', 'systemd-serialgetty', d)}" +RDEPENDS_${PN} += "volatile-binds" + +RRECOMMENDS_${PN} += "systemd-extra-utils \ + udev-hwdb \ + e2fsprogs-e2fsck \ + kernel-module-autofs4 kernel-module-unix kernel-module-ipv6 kernel-module-sch-fq-codel \ + os-release \ + systemd-conf \ +" + +INSANE_SKIP_${PN} += "dev-so libdir" +INSANE_SKIP_${PN}-dbg += "libdir" +INSANE_SKIP_${PN}-doc += " libdir" + +RPROVIDES_udev = "hotplug" + +RDEPENDS_udev-hwdb += "udev" + +FILES_udev += "${base_sbindir}/udevd \ + ${rootlibexecdir}/systemd/network/99-default.link \ + ${rootlibexecdir}/systemd/systemd-udevd \ + ${rootlibexecdir}/udev/accelerometer \ + ${rootlibexecdir}/udev/ata_id \ + ${rootlibexecdir}/udev/cdrom_id \ + ${rootlibexecdir}/udev/collect \ + ${rootlibexecdir}/udev/fido_id \ + ${rootlibexecdir}/udev/findkeyboards \ + ${rootlibexecdir}/udev/keyboard-force-release.sh \ + ${rootlibexecdir}/udev/keymap \ + ${rootlibexecdir}/udev/mtd_probe \ + ${rootlibexecdir}/udev/scsi_id \ + ${rootlibexecdir}/udev/v4l_id \ + ${rootlibexecdir}/udev/keymaps \ + ${rootlibexecdir}/udev/rules.d/50-udev-default.rules \ + ${rootlibexecdir}/udev/rules.d/60-autosuspend.rules \ + ${rootlibexecdir}/udev/rules.d/60-autosuspend-chromiumos.rules \ + ${rootlibexecdir}/udev/rules.d/60-block.rules \ + ${rootlibexecdir}/udev/rules.d/60-cdrom_id.rules \ + ${rootlibexecdir}/udev/rules.d/60-drm.rules \ + ${rootlibexecdir}/udev/rules.d/60-evdev.rules \ + ${rootlibexecdir}/udev/rules.d/60-fido-id.rules \ + ${rootlibexecdir}/udev/rules.d/60-input-id.rules \ + ${rootlibexecdir}/udev/rules.d/60-persistent-alsa.rules \ + ${rootlibexecdir}/udev/rules.d/60-persistent-input.rules \ + ${rootlibexecdir}/udev/rules.d/60-persistent-storage.rules \ + ${rootlibexecdir}/udev/rules.d/60-persistent-storage-tape.rules \ + ${rootlibexecdir}/udev/rules.d/60-persistent-v4l.rules \ + ${rootlibexecdir}/udev/rules.d/60-sensor.rules \ + ${rootlibexecdir}/udev/rules.d/60-serial.rules \ + ${rootlibexecdir}/udev/rules.d/61-autosuspend-manual.rules \ + ${rootlibexecdir}/udev/rules.d/64-btrfs.rules \ + ${rootlibexecdir}/udev/rules.d/70-joystick.rules \ + ${rootlibexecdir}/udev/rules.d/70-mouse.rules \ + ${rootlibexecdir}/udev/rules.d/70-power-switch.rules \ + ${rootlibexecdir}/udev/rules.d/70-touchpad.rules \ + ${rootlibexecdir}/udev/rules.d/75-net-description.rules \ + ${rootlibexecdir}/udev/rules.d/75-probe_mtd.rules \ + ${rootlibexecdir}/udev/rules.d/78-sound-card.rules \ + ${rootlibexecdir}/udev/rules.d/80-drivers.rules \ + ${rootlibexecdir}/udev/rules.d/80-net-setup-link.rules \ + ${rootlibexecdir}/udev/rules.d/90-vconsole.rules \ + ${sysconfdir}/udev \ + ${sysconfdir}/init.d/systemd-udevd \ + ${systemd_unitdir}/system/*udev* \ + ${systemd_unitdir}/system/*.wants/*udev* \ + ${base_bindir}/systemd-hwdb \ + ${base_bindir}/udevadm \ + ${base_sbindir}/udevadm \ + ${libexecdir}/${MLPREFIX}udevadm \ + ${datadir}/bash-completion/completions/udevadm \ + ${systemd_unitdir}/system/systemd-hwdb-update.service \ + " + +FILES_udev-hwdb = "${rootlibexecdir}/udev/hwdb.d \ + " + +RCONFLICTS_${PN} = "tiny-init ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'resolvconf', '', d)}" + +INITSCRIPT_PACKAGES = "udev" +INITSCRIPT_NAME_udev = "systemd-udevd" +INITSCRIPT_PARAMS_udev = "start 03 S ." + +python __anonymous() { + if not bb.utils.contains('DISTRO_FEATURES', 'sysvinit', True, False, d): + d.setVar("INHIBIT_UPDATERCD_BBCLASS", "1") +} + +python do_warn_musl() { + if d.getVar('TCLIBC') == "musl": + bb.warn("Using systemd with musl is not recommended since it is not supported upstream and some patches are known to be problematic.") +} +addtask warn_musl before do_configure + +ALTERNATIVE_${PN} = "halt reboot shutdown poweroff runlevel ${@bb.utils.contains('PACKAGECONFIG', 'resolved', 'resolv-conf', '', d)}" + +ALTERNATIVE_TARGET[resolv-conf] = "${sysconfdir}/resolv-conf.systemd" +ALTERNATIVE_LINK_NAME[resolv-conf] = "${sysconfdir}/resolv.conf" +ALTERNATIVE_PRIORITY[resolv-conf] ?= "50" + +ALTERNATIVE_TARGET[halt] = "${base_bindir}/systemctl" +ALTERNATIVE_LINK_NAME[halt] = "${base_sbindir}/halt" +ALTERNATIVE_PRIORITY[halt] ?= "300" + +ALTERNATIVE_TARGET[reboot] = "${base_bindir}/systemctl" +ALTERNATIVE_LINK_NAME[reboot] = "${base_sbindir}/reboot" +ALTERNATIVE_PRIORITY[reboot] ?= "300" + +ALTERNATIVE_TARGET[shutdown] = "${base_bindir}/systemctl" +ALTERNATIVE_LINK_NAME[shutdown] = "${base_sbindir}/shutdown" +ALTERNATIVE_PRIORITY[shutdown] ?= "300" + +ALTERNATIVE_TARGET[poweroff] = "${base_bindir}/systemctl" +ALTERNATIVE_LINK_NAME[poweroff] = "${base_sbindir}/poweroff" +ALTERNATIVE_PRIORITY[poweroff] ?= "300" + +ALTERNATIVE_TARGET[runlevel] = "${base_bindir}/systemctl" +ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel" +ALTERNATIVE_PRIORITY[runlevel] ?= "300" + +pkg_postinst_${PN}_libc-glibc () { + sed -e '/^hosts:/s/\s*\//' \ + -e 's/\(^hosts:.*\)\(\\)\(.*\)\(\\)\(.*\)/\1\2 myhostname \3\4\5/' \ + -i $D${sysconfdir}/nsswitch.conf +} + +pkg_prerm_${PN}_libc-glibc () { + sed -e '/^hosts:/s/\s*\//' \ + -e '/^hosts:/s/\s*myhostname//' \ + -i $D${sysconfdir}/nsswitch.conf +} + +PACKAGE_WRITE_DEPS += "qemu-native" +pkg_postinst_udev-hwdb () { + if test -n "$D"; then + $INTERCEPT_DIR/postinst_intercept update_udev_hwdb ${PKG} mlprefix=${MLPREFIX} binprefix=${MLPREFIX} rootlibexecdir="${rootlibexecdir}" PREFERRED_PROVIDER_udev="${PREFERRED_PROVIDER_udev}" + else + udevadm hwdb --update + fi +} + +pkg_prerm_udev-hwdb () { + rm -f $D${sysconfdir}/udev/hwdb.bin +} diff --git a/poky/meta/recipes-core/sysvinit/sysvinit/crypt-lib.patch b/poky/meta/recipes-core/sysvinit/sysvinit/crypt-lib.patch index bd4444b81..5c39fb001 100644 --- a/poky/meta/recipes-core/sysvinit/sysvinit/crypt-lib.patch +++ b/poky/meta/recipes-core/sysvinit/sysvinit/crypt-lib.patch @@ -1,4 +1,4 @@ -From 7276275d9a08d5ae268fb263027bbc60bc0ab2e8 Mon Sep 17 00:00:00 2001 +From c3a068cf24a22bea7349849ec111ae8d91a54db4 Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Wed, 14 Jul 2010 14:35:52 -0400 Subject: [PATCH] sysvinit - Remove sulogin dependency on /usr/lib*/libcrypt.a @@ -11,24 +11,26 @@ Upstream-Status: Inappropriate [configuration] # - jdike@linux.intel.com --- - src/Makefile | 9 ++------- - 1 file changed, 2 insertions(+), 7 deletions(-) + src/Makefile | 11 ++--------- + 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/src/Makefile b/src/Makefile -index 1380d7f..628e77f 100644 +index 5b1a5a2..b686394 100644 --- a/src/Makefile +++ b/src/Makefile -@@ -97,13 +97,8 @@ else +@@ -97,15 +97,8 @@ else endif # Additional libs for GNU libc. --ifneq ($(wildcard /usr/lib*/libcrypt.*),) +-ifneq ($(wildcard $(ROOT)/usr/lib*/libcrypt.*),) - SULOGINLIBS += -lcrypt -endif - -# Additional libs for GNU libc / multiarch on Debian based systems. --ifneq ($(wildcard /usr/lib/*/libcrypt.*),) +-ifneq ($(wildcard $(ROOT)/usr/lib/*/libcrypt.*),) +-ifneq ($(findstring -lcrypt, $(SULOGINLIBS)), -lcrypt) - SULOGINLIBS += -lcrypt +-endif +ifneq ($(LCRYPT),) + SULOGINLIBS += $(LCRYPT) endif diff --git a/poky/meta/recipes-core/sysvinit/sysvinit/install.patch b/poky/meta/recipes-core/sysvinit/sysvinit/install.patch index 2930fb019..90563a629 100644 --- a/poky/meta/recipes-core/sysvinit/sysvinit/install.patch +++ b/poky/meta/recipes-core/sysvinit/sysvinit/install.patch @@ -1,4 +1,4 @@ -From b6cc66ab245ceb6bca0116dff7a41f6d7677b96a Mon Sep 17 00:00:00 2001 +From 5e35aa105e7a2e85db2a89fee4114090b1ac55be Mon Sep 17 00:00:00 2001 From: Qing He Date: Fri, 18 Jun 2010 09:40:30 +0800 Subject: [PATCH] sysvinit: upgrade to version 2.88dsf @@ -10,7 +10,7 @@ Upstream-Status: Pending 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/src/Makefile b/src/Makefile -index 9f9f09e..1380d7f 100644 +index 1b368dc..5b1a5a2 100644 --- a/src/Makefile +++ b/src/Makefile @@ -77,7 +77,14 @@ else @@ -29,7 +29,7 @@ index 9f9f09e..1380d7f 100644 ifeq ($(WITH_SELINUX),yes) SELINUX_DEF = -DWITH_SELINUX -@@ -189,42 +196,42 @@ clobber: cleanobjs +@@ -191,43 +198,43 @@ clobber: cleanobjs distclean: clobber install: all @@ -50,6 +50,7 @@ index 9f9f09e..1380d7f 100644 + $(INSTALL_EXEC) $$i $(ROOT)$(bindir)/ ; \ done # $(INSTALL_DIR) $(ROOT)/etc/ + $(INSTALL_DIR) $(ROOT)/etc/inittab.d # $(INSTALL_EXEC) ../doc/initscript.sample $(ROOT)/etc/ - ln -sf halt $(ROOT)/sbin/reboot - ln -sf halt $(ROOT)/sbin/poweroff diff --git a/poky/meta/recipes-core/sysvinit/sysvinit_2.96.bb b/poky/meta/recipes-core/sysvinit/sysvinit_2.96.bb deleted file mode 100644 index d2b85ed9c..000000000 --- a/poky/meta/recipes-core/sysvinit/sysvinit_2.96.bb +++ /dev/null @@ -1,114 +0,0 @@ -SUMMARY = "System-V like init" -DESCRIPTION = "This package is required to boot in most configurations. It provides the /sbin/init program. This is the first process started on boot, and the last process terminated before the system halts." -HOMEPAGE = "http://savannah.nongnu.org/projects/sysvinit/" -SECTION = "base" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \ - file://COPYRIGHT;endline=15;md5=a1d3b3526501d3546d530bbe6ab6cdbe \ - " - -RDEPENDS_${PN} = "${PN}-inittab" - -SRC_URI = "${SAVANNAH_GNU_MIRROR}/sysvinit/sysvinit-${PV}.tar.xz \ - file://install.patch \ - file://crypt-lib.patch \ - file://pidof-add-m-option.patch \ - file://realpath.patch \ - file://0001-include-sys-sysmacros.h-for-major-minor-defines-in-g.patch \ - file://rcS-default \ - file://rc \ - file://rcS \ - file://bootlogd.init \ - file://01_bootlogd \ - " -SRC_URI[md5sum] = "48cebffebf2a96ab09bec14bf9976016" -SRC_URI[sha256sum] = "2a2e26b72aa235a23ab1c8471005f890309ce1196c83fbc9413c57b9ab62b587" - -S = "${WORKDIR}/sysvinit-${PV}" -B = "${S}/src" - -inherit update-alternatives features_check -DEPENDS_append = " update-rc.d-native base-passwd virtual/crypt" -do_package_setscene[depends] = "${MLPREFIX}base-passwd:do_populate_sysroot" - -REQUIRED_DISTRO_FEATURES = "sysvinit" - -ALTERNATIVE_${PN} = "init mountpoint halt reboot runlevel shutdown poweroff last lastb mesg utmpdump wall" - -ALTERNATIVE_PRIORITY = "200" - -ALTERNATIVE_LINK_NAME[init] = "${base_sbindir}/init" -ALTERNATIVE_PRIORITY[init] = "50" - -ALTERNATIVE_LINK_NAME[mountpoint] = "${base_bindir}/mountpoint" -ALTERNATIVE_PRIORITY[mountpoint] = "20" - -ALTERNATIVE_LINK_NAME[halt] = "${base_sbindir}/halt" -ALTERNATIVE_LINK_NAME[reboot] = "${base_sbindir}/reboot" -ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel" -ALTERNATIVE_LINK_NAME[shutdown] = "${base_sbindir}/shutdown" -ALTERNATIVE_LINK_NAME[poweroff] = "${base_sbindir}/poweroff" - -ALTERNATIVE_${PN}-pidof = "pidof" -ALTERNATIVE_LINK_NAME[pidof] = "${base_bindir}/pidof" - -ALTERNATIVE_${PN}-sulogin = "sulogin" -ALTERNATIVE_LINK_NAME[sulogin] = "${base_sbindir}/sulogin" - -ALTERNATIVE_${PN}-doc = "mountpoint.1 last.1 lastb.1 mesg.1 wall.1 sulogin.8 utmpdump.1" - -ALTERNATIVE_LINK_NAME[last.1] = "${mandir}/man1/last.1" -ALTERNATIVE_LINK_NAME[lastb.1] = "${mandir}/man1/lastb.1" -ALTERNATIVE_LINK_NAME[mesg.1] = "${mandir}/man1/mesg.1" -ALTERNATIVE_LINK_NAME[mountpoint.1] = "${mandir}/man1/mountpoint.1" -ALTERNATIVE_LINK_NAME[sulogin.8] = "${mandir}/man8/sulogin.8" -ALTERNATIVE_LINK_NAME[utmpdump.1] = "${mandir}/man1/utmpdump.1" -ALTERNATIVE_LINK_NAME[wall.1] = "${mandir}/man1/wall.1" - -PACKAGES =+ "sysvinit-pidof sysvinit-sulogin" -FILES_${PN} += "${base_sbindir}/* ${base_bindir}/*" -FILES_sysvinit-pidof = "${base_bindir}/pidof.sysvinit ${base_sbindir}/killall5" -FILES_sysvinit-sulogin = "${base_sbindir}/sulogin.sysvinit" - -RDEPENDS_${PN} += "sysvinit-pidof initd-functions base-passwd" - -CFLAGS_prepend = "-D_GNU_SOURCE " -export LCRYPT = "-lcrypt" -EXTRA_OEMAKE += "'base_bindir=${base_bindir}' \ - 'base_sbindir=${base_sbindir}' \ - 'bindir=${bindir}' \ - 'sbindir=${sbindir}' \ - 'sysconfdir=${sysconfdir}' \ - 'includedir=${includedir}' \ - 'mandir=${mandir}' \ - MNTPOINT=yes" - -do_install () { - oe_runmake 'ROOT=${D}' install - - install -d ${D}${sysconfdir} \ - ${D}${sysconfdir}/default \ - ${D}${sysconfdir}/init.d - for level in S 0 1 2 3 4 5 6; do - install -d ${D}${sysconfdir}/rc$level.d - done - - install -m 0644 ${WORKDIR}/rcS-default ${D}${sysconfdir}/default/rcS - install -m 0755 ${WORKDIR}/rc ${D}${sysconfdir}/init.d - install -m 0755 ${WORKDIR}/rcS ${D}${sysconfdir}/init.d - install -m 0755 ${WORKDIR}/bootlogd.init ${D}${sysconfdir}/init.d/bootlogd - ln -sf bootlogd ${D}${sysconfdir}/init.d/stop-bootlogd - - update-rc.d -r ${D} bootlogd start 07 S . - update-rc.d -r ${D} stop-bootlogd start 99 2 3 4 5 . - - install -d ${D}${sysconfdir}/default/volatiles - install -m 0644 ${WORKDIR}/01_bootlogd ${D}${sysconfdir}/default/volatiles - - chown root:shutdown ${D}${base_sbindir}/halt ${D}${base_sbindir}/shutdown - chmod o-x,u+s ${D}${base_sbindir}/halt ${D}${base_sbindir}/shutdown - - # Already provided by e2fsprogs; sysvinit's version is a copy from there - rm ${D}${base_sbindir}/logsave - rm ${D}${mandir}/man8/logsave.8 -} diff --git a/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb b/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb new file mode 100644 index 000000000..80ab9d70e --- /dev/null +++ b/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb @@ -0,0 +1,113 @@ +SUMMARY = "System-V like init" +DESCRIPTION = "This package is required to boot in most configurations. It provides the /sbin/init program. This is the first process started on boot, and the last process terminated before the system halts." +HOMEPAGE = "http://savannah.nongnu.org/projects/sysvinit/" +SECTION = "base" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \ + file://COPYRIGHT;endline=15;md5=a1d3b3526501d3546d530bbe6ab6cdbe \ + " + +RDEPENDS_${PN} = "${PN}-inittab" + +SRC_URI = "${SAVANNAH_GNU_MIRROR}/sysvinit/sysvinit-${PV}.tar.xz \ + file://install.patch \ + file://crypt-lib.patch \ + file://pidof-add-m-option.patch \ + file://realpath.patch \ + file://0001-include-sys-sysmacros.h-for-major-minor-defines-in-g.patch \ + file://rcS-default \ + file://rc \ + file://rcS \ + file://bootlogd.init \ + file://01_bootlogd \ + " +SRC_URI[sha256sum] = "2d5996857519bfd8634d2e1debabb3238fb38440f65fbfdc46420ee8bdf25110" + +S = "${WORKDIR}/sysvinit-${PV}" +B = "${S}/src" + +inherit update-alternatives features_check +DEPENDS_append = " update-rc.d-native base-passwd virtual/crypt" +do_package_setscene[depends] = "${MLPREFIX}base-passwd:do_populate_sysroot" + +REQUIRED_DISTRO_FEATURES = "sysvinit" + +ALTERNATIVE_${PN} = "init mountpoint halt reboot runlevel shutdown poweroff last lastb mesg utmpdump wall" + +ALTERNATIVE_PRIORITY = "200" + +ALTERNATIVE_LINK_NAME[init] = "${base_sbindir}/init" +ALTERNATIVE_PRIORITY[init] = "50" + +ALTERNATIVE_LINK_NAME[mountpoint] = "${base_bindir}/mountpoint" +ALTERNATIVE_PRIORITY[mountpoint] = "20" + +ALTERNATIVE_LINK_NAME[halt] = "${base_sbindir}/halt" +ALTERNATIVE_LINK_NAME[reboot] = "${base_sbindir}/reboot" +ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel" +ALTERNATIVE_LINK_NAME[shutdown] = "${base_sbindir}/shutdown" +ALTERNATIVE_LINK_NAME[poweroff] = "${base_sbindir}/poweroff" + +ALTERNATIVE_${PN}-pidof = "pidof" +ALTERNATIVE_LINK_NAME[pidof] = "${base_bindir}/pidof" + +ALTERNATIVE_${PN}-sulogin = "sulogin" +ALTERNATIVE_LINK_NAME[sulogin] = "${base_sbindir}/sulogin" + +ALTERNATIVE_${PN}-doc = "mountpoint.1 last.1 lastb.1 mesg.1 wall.1 sulogin.8 utmpdump.1" + +ALTERNATIVE_LINK_NAME[last.1] = "${mandir}/man1/last.1" +ALTERNATIVE_LINK_NAME[lastb.1] = "${mandir}/man1/lastb.1" +ALTERNATIVE_LINK_NAME[mesg.1] = "${mandir}/man1/mesg.1" +ALTERNATIVE_LINK_NAME[mountpoint.1] = "${mandir}/man1/mountpoint.1" +ALTERNATIVE_LINK_NAME[sulogin.8] = "${mandir}/man8/sulogin.8" +ALTERNATIVE_LINK_NAME[utmpdump.1] = "${mandir}/man1/utmpdump.1" +ALTERNATIVE_LINK_NAME[wall.1] = "${mandir}/man1/wall.1" + +PACKAGES =+ "sysvinit-pidof sysvinit-sulogin" +FILES_${PN} += "${base_sbindir}/* ${base_bindir}/*" +FILES_sysvinit-pidof = "${base_bindir}/pidof.sysvinit ${base_sbindir}/killall5" +FILES_sysvinit-sulogin = "${base_sbindir}/sulogin.sysvinit" + +RDEPENDS_${PN} += "sysvinit-pidof initd-functions base-passwd" + +CFLAGS_prepend = "-D_GNU_SOURCE " +export LCRYPT = "-lcrypt" +EXTRA_OEMAKE += "'base_bindir=${base_bindir}' \ + 'base_sbindir=${base_sbindir}' \ + 'bindir=${bindir}' \ + 'sbindir=${sbindir}' \ + 'sysconfdir=${sysconfdir}' \ + 'includedir=${includedir}' \ + 'mandir=${mandir}' \ + MNTPOINT=yes" + +do_install () { + oe_runmake 'ROOT=${D}' install + + install -d ${D}${sysconfdir} \ + ${D}${sysconfdir}/default \ + ${D}${sysconfdir}/init.d + for level in S 0 1 2 3 4 5 6; do + install -d ${D}${sysconfdir}/rc$level.d + done + + install -m 0644 ${WORKDIR}/rcS-default ${D}${sysconfdir}/default/rcS + install -m 0755 ${WORKDIR}/rc ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/rcS ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/bootlogd.init ${D}${sysconfdir}/init.d/bootlogd + ln -sf bootlogd ${D}${sysconfdir}/init.d/stop-bootlogd + + update-rc.d -r ${D} bootlogd start 07 S . + update-rc.d -r ${D} stop-bootlogd start 99 2 3 4 5 . + + install -d ${D}${sysconfdir}/default/volatiles + install -m 0644 ${WORKDIR}/01_bootlogd ${D}${sysconfdir}/default/volatiles + + chown root:shutdown ${D}${base_sbindir}/halt ${D}${base_sbindir}/shutdown + chmod o-x,u+s ${D}${base_sbindir}/halt ${D}${base_sbindir}/shutdown + + # Already provided by e2fsprogs; sysvinit's version is a copy from there + rm ${D}${base_sbindir}/logsave + rm ${D}${mandir}/man8/logsave.8 +} diff --git a/poky/meta/recipes-core/volatile-binds/files/volatile-binds.service.in b/poky/meta/recipes-core/volatile-binds/files/volatile-binds.service.in index b23355a71..e2ad39f25 100644 --- a/poky/meta/recipes-core/volatile-binds/files/volatile-binds.service.in +++ b/poky/meta/recipes-core/volatile-binds/files/volatile-binds.service.in @@ -10,7 +10,6 @@ ConditionPathIsReadWrite=!@where@ [Service] Type=oneshot RemainAfterExit=Yes -StandardOutput=syslog TimeoutSec=0 ExecStart=/sbin/mount-copybind @what@ @where@ ExecStop=/bin/umount @where@ diff --git a/poky/meta/recipes-devtools/binutils/binutils-2.34.inc b/poky/meta/recipes-devtools/binutils/binutils-2.34.inc deleted file mode 100644 index f65fdb732..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils-2.34.inc +++ /dev/null @@ -1,47 +0,0 @@ -LIC_FILES_CHKSUM="\ - file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552\ - file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674\ - file://COPYING3;md5=d32239bcb673463ab874e80d47fae504\ - file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6\ - file://gas/COPYING;md5=d32239bcb673463ab874e80d47fae504\ - file://include/COPYING;md5=59530bdf33659b29e73d4adb9f9f6552\ - file://include/COPYING3;md5=d32239bcb673463ab874e80d47fae504\ - file://libiberty/COPYING.LIB;md5=a916467b91076e631dd8edb7424769c7\ - file://bfd/COPYING;md5=d32239bcb673463ab874e80d47fae504\ - " - -def binutils_branch_version(d): - pvsplit = d.getVar('PV').split('.') - return pvsplit[0] + "_" + pvsplit[1] - -# When upgrading to 2.35, please make sure there is no trailing .0, so -# that upstream version check can work correctly. -PV = "2.34" -CVE_VERSION = "2.34" -BINUPV = "${@binutils_branch_version(d)}" -#BRANCH = "binutils-${BINUPV}-branch" -BRANCH ?= "binutils-2_34-branch" - -UPSTREAM_CHECK_GITTAGREGEX = "binutils-(?P\d+_(\d_?)*)" - -SRCREV ?= "d4b50999b3b287b5f984ade2f8734aa8c9359440" -BINUTILS_GIT_URI ?= "git://sourceware.org/git/binutils-gdb.git;branch=${BRANCH};protocol=git" -SRC_URI = "\ - ${BINUTILS_GIT_URI} \ - file://0004-configure-widen-the-regexp-for-SH-architectures.patch \ - file://0005-Point-scripts-location-to-libdir.patch \ - file://0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch \ - file://0007-Use-libtool-2.4.patch \ - file://0008-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch \ - file://0009-warn-for-uses-of-system-directories-when-cross-linki.patch \ - file://0010-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch \ - file://0011-Change-default-emulation-for-mips64-linux.patch \ - file://0012-Add-support-for-Netlogic-XLP.patch \ - file://0013-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch \ - file://0014-Detect-64-bit-MIPS-targets.patch \ - file://0015-sync-with-OE-libtool-changes.patch \ - file://0016-Check-for-clang-before-checking-gcc-version.patch \ - file://0017-binutils-drop-redundant-program_name-definition-fno-.patch \ - file://CVE-2020-0551.patch \ -" -S = "${WORKDIR}/git" diff --git a/poky/meta/recipes-devtools/binutils/binutils-2.35.inc b/poky/meta/recipes-devtools/binutils/binutils-2.35.inc new file mode 100644 index 000000000..5ae9ec422 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils-2.35.inc @@ -0,0 +1,45 @@ +LIC_FILES_CHKSUM="\ + file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552\ + file://COPYING.LIB;md5=9f604d8a4f8e74f4f5140845a21b6674\ + file://COPYING3;md5=d32239bcb673463ab874e80d47fae504\ + file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6\ + file://gas/COPYING;md5=d32239bcb673463ab874e80d47fae504\ + file://include/COPYING;md5=59530bdf33659b29e73d4adb9f9f6552\ + file://include/COPYING3;md5=d32239bcb673463ab874e80d47fae504\ + file://libiberty/COPYING.LIB;md5=a916467b91076e631dd8edb7424769c7\ + file://bfd/COPYING;md5=d32239bcb673463ab874e80d47fae504\ + " + +def binutils_branch_version(d): + pvsplit = d.getVar('PV').split('.') + return pvsplit[0] + "_" + pvsplit[1] + +# When upgrading to 2.35, please make sure there is no trailing .0, so +# that upstream version check can work correctly. +PV = "2.35" +CVE_VERSION = "2.35" +BINUPV = "${@binutils_branch_version(d)}" +#BRANCH = "binutils-${BINUPV}-branch" +BRANCH ?= "binutils-2_35-branch" + +UPSTREAM_CHECK_GITTAGREGEX = "binutils-(?P\d+_(\d_?)*)" + +SRCREV ?= "89a9065674a14a8bd94bb326b27d19a2f3583efb" +BINUTILS_GIT_URI ?= "git://sourceware.org/git/binutils-gdb.git;branch=${BRANCH};protocol=git" +SRC_URI = "\ + ${BINUTILS_GIT_URI} \ + file://0004-configure-widen-the-regexp-for-SH-architectures.patch \ + file://0005-Point-scripts-location-to-libdir.patch \ + file://0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch \ + file://0007-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch \ + file://0008-warn-for-uses-of-system-directories-when-cross-linki.patch \ + file://0009-Change-default-emulation-for-mips64-linux.patch \ + file://0010-Add-support-for-Netlogic-XLP.patch \ + file://0011-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch \ + file://0012-Detect-64-bit-MIPS-targets.patch \ + file://0013-Use-libtool-2.4.patch \ + file://0014-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch \ + file://0015-sync-with-OE-libtool-changes.patch \ + file://0016-Check-for-clang-before-checking-gcc-version.patch \ +" +S = "${WORKDIR}/git" diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.34.bb b/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.34.bb deleted file mode 100644 index 5dbaa0301..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.34.bb +++ /dev/null @@ -1,3 +0,0 @@ -require binutils.inc -require binutils-${PV}.inc -require binutils-cross-canadian.inc diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.35.bb b/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.35.bb new file mode 100644 index 000000000..5dbaa0301 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils-cross-canadian_2.35.bb @@ -0,0 +1,3 @@ +require binutils.inc +require binutils-${PV}.inc +require binutils-cross-canadian.inc diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.34.bb b/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.34.bb deleted file mode 100644 index 07a8e7c41..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.34.bb +++ /dev/null @@ -1,85 +0,0 @@ -require binutils.inc -require binutils-${PV}.inc - -BPN = "binutils" - -DEPENDS += "dejagnu-native expect-native" -DEPENDS += "binutils-native" - -deltask do_compile -deltask do_install - -inherit nopackages - -do_configure[dirs] += "${B}/ld ${B}/bfd" -do_configure() { - # create config.h, oe enables initfini-array by default - echo "#define HAVE_INITFINI_ARRAY" > ${B}/ld/config.h - # use the bfd_stdint.h from binutils-native, this is the same of the one - # generated by binutils-cross - cp ${RECIPE_SYSROOT_NATIVE}/usr/include/bfd_stdint.h ${B}/bfd/ -} - -# target depends -DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils" -DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}gcc" -DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}compilerlibs" -DEPENDS += "virtual/${MLPREFIX}libc" - -python check_prepare() { - def suffix_sys(sys): - if sys.endswith("-linux"): - return sys + "-gnu" - return sys - - def generate_site_exp(d, suite): - content = [] - content.append('set srcdir "{0}/{1}"'.format(d.getVar("S"), suite)) - content.append('set objdir "{0}/{1}"'.format(d.getVar("B"), suite)) - content.append('set build_alias "{0}"'.format(d.getVar("BUILD_SYS"))) - content.append('set build_triplet {0}'.format(d.getVar("BUILD_SYS"))) - # use BUILD here since HOST=TARGET - content.append('set host_alias "{0}"'.format(d.getVar("BUILD_SYS"))) - content.append('set host_triplet {0}'.format(d.getVar("BUILD_SYS"))) - content.append('set target_alias "{0}"'.format(d.getVar("TARGET_SYS"))) - content.append('set target_triplet {0}'.format(suffix_sys(d.getVar("TARGET_SYS")))) - content.append("set development true") - content.append("set experimental false") - - content.append(d.expand('set CXXFILT "${TARGET_PREFIX}c++filt"')) - content.append(d.expand('set CC "${TARGET_PREFIX}gcc --sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) - content.append(d.expand('set CXX "${TARGET_PREFIX}g++ --sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) - content.append(d.expand('set CFLAGS_FOR_TARGET "--sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) - - if suite == "ld" and d.getVar("TUNE_ARCH") == "mips64": - # oe patches binutils to have the default mips64 abi as 64bit, but - # skips gas causing issues with the ld test suite (which uses gas) - content.append('set ASFLAGS "-64"') - - return "\n".join(content) - - for i in ["binutils", "gas", "ld"]: - builddir = os.path.join(d.getVar("B"), i) - if not os.path.isdir(builddir): - os.makedirs(builddir) - with open(os.path.join(builddir, "site.exp"), "w") as f: - f.write(generate_site_exp(d, i)) -} - -CHECK_TARGETS ??= "binutils gas ld" - -do_check[dirs] = "${B} ${B}/binutils ${B}/gas ${B}/ld" -do_check[prefuncs] += "check_prepare" -do_check[nostamp] = "1" -do_check() { - export LC_ALL=C - for i in ${CHECK_TARGETS}; do - (cd ${B}/$i; runtest \ - --tool $i \ - --srcdir ${S}/$i/testsuite \ - --ignore 'plugin.exp' \ - || true) - done -} -addtask check after do_configure - diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.35.bb b/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.35.bb new file mode 100644 index 000000000..07a8e7c41 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils-cross-testsuite_2.35.bb @@ -0,0 +1,85 @@ +require binutils.inc +require binutils-${PV}.inc + +BPN = "binutils" + +DEPENDS += "dejagnu-native expect-native" +DEPENDS += "binutils-native" + +deltask do_compile +deltask do_install + +inherit nopackages + +do_configure[dirs] += "${B}/ld ${B}/bfd" +do_configure() { + # create config.h, oe enables initfini-array by default + echo "#define HAVE_INITFINI_ARRAY" > ${B}/ld/config.h + # use the bfd_stdint.h from binutils-native, this is the same of the one + # generated by binutils-cross + cp ${RECIPE_SYSROOT_NATIVE}/usr/include/bfd_stdint.h ${B}/bfd/ +} + +# target depends +DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}binutils" +DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}gcc" +DEPENDS += "virtual/${MLPREFIX}${TARGET_PREFIX}compilerlibs" +DEPENDS += "virtual/${MLPREFIX}libc" + +python check_prepare() { + def suffix_sys(sys): + if sys.endswith("-linux"): + return sys + "-gnu" + return sys + + def generate_site_exp(d, suite): + content = [] + content.append('set srcdir "{0}/{1}"'.format(d.getVar("S"), suite)) + content.append('set objdir "{0}/{1}"'.format(d.getVar("B"), suite)) + content.append('set build_alias "{0}"'.format(d.getVar("BUILD_SYS"))) + content.append('set build_triplet {0}'.format(d.getVar("BUILD_SYS"))) + # use BUILD here since HOST=TARGET + content.append('set host_alias "{0}"'.format(d.getVar("BUILD_SYS"))) + content.append('set host_triplet {0}'.format(d.getVar("BUILD_SYS"))) + content.append('set target_alias "{0}"'.format(d.getVar("TARGET_SYS"))) + content.append('set target_triplet {0}'.format(suffix_sys(d.getVar("TARGET_SYS")))) + content.append("set development true") + content.append("set experimental false") + + content.append(d.expand('set CXXFILT "${TARGET_PREFIX}c++filt"')) + content.append(d.expand('set CC "${TARGET_PREFIX}gcc --sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) + content.append(d.expand('set CXX "${TARGET_PREFIX}g++ --sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) + content.append(d.expand('set CFLAGS_FOR_TARGET "--sysroot=${STAGING_DIR_TARGET} ${TUNE_CCARGS}"')) + + if suite == "ld" and d.getVar("TUNE_ARCH") == "mips64": + # oe patches binutils to have the default mips64 abi as 64bit, but + # skips gas causing issues with the ld test suite (which uses gas) + content.append('set ASFLAGS "-64"') + + return "\n".join(content) + + for i in ["binutils", "gas", "ld"]: + builddir = os.path.join(d.getVar("B"), i) + if not os.path.isdir(builddir): + os.makedirs(builddir) + with open(os.path.join(builddir, "site.exp"), "w") as f: + f.write(generate_site_exp(d, i)) +} + +CHECK_TARGETS ??= "binutils gas ld" + +do_check[dirs] = "${B} ${B}/binutils ${B}/gas ${B}/ld" +do_check[prefuncs] += "check_prepare" +do_check[nostamp] = "1" +do_check() { + export LC_ALL=C + for i in ${CHECK_TARGETS}; do + (cd ${B}/$i; runtest \ + --tool $i \ + --srcdir ${S}/$i/testsuite \ + --ignore 'plugin.exp' \ + || true) + done +} +addtask check after do_configure + diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross_2.34.bb b/poky/meta/recipes-devtools/binutils/binutils-cross_2.34.bb deleted file mode 100644 index fbd1f7d25..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils-cross_2.34.bb +++ /dev/null @@ -1,3 +0,0 @@ -require binutils.inc -require binutils-${PV}.inc -require binutils-cross.inc diff --git a/poky/meta/recipes-devtools/binutils/binutils-cross_2.35.bb b/poky/meta/recipes-devtools/binutils/binutils-cross_2.35.bb new file mode 100644 index 000000000..fbd1f7d25 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils-cross_2.35.bb @@ -0,0 +1,3 @@ +require binutils.inc +require binutils-${PV}.inc +require binutils-cross.inc diff --git a/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.34.bb b/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.34.bb deleted file mode 100644 index 37f4d6d2e..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.34.bb +++ /dev/null @@ -1,13 +0,0 @@ -require binutils-cross_${PV}.bb - -inherit crosssdk - -PN = "binutils-crosssdk-${SDK_SYS}" - -PROVIDES = "virtual/${TARGET_PREFIX}binutils-crosssdk" - -SRC_URI += "file://0001-binutils-crosssdk-Generate-relocatable-SDKs.patch" - -do_configure_prepend () { - sed -i 's#/usr/local/lib /lib /usr/lib#${SDKPATHNATIVE}/lib ${SDKPATHNATIVE}/usr/lib /usr/local/lib /lib /usr/lib#' ${S}/ld/configure.tgt -} diff --git a/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.35.bb b/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.35.bb new file mode 100644 index 000000000..37f4d6d2e --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils-crosssdk_2.35.bb @@ -0,0 +1,13 @@ +require binutils-cross_${PV}.bb + +inherit crosssdk + +PN = "binutils-crosssdk-${SDK_SYS}" + +PROVIDES = "virtual/${TARGET_PREFIX}binutils-crosssdk" + +SRC_URI += "file://0001-binutils-crosssdk-Generate-relocatable-SDKs.patch" + +do_configure_prepend () { + sed -i 's#/usr/local/lib /lib /usr/lib#${SDKPATHNATIVE}/lib ${SDKPATHNATIVE}/usr/lib /usr/local/lib /lib /usr/lib#' ${S}/ld/configure.tgt +} diff --git a/poky/meta/recipes-devtools/binutils/binutils/0001-binutils-crosssdk-Generate-relocatable-SDKs.patch b/poky/meta/recipes-devtools/binutils/binutils/0001-binutils-crosssdk-Generate-relocatable-SDKs.patch index 14e84215e..4c1d11291 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0001-binutils-crosssdk-Generate-relocatable-SDKs.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0001-binutils-crosssdk-Generate-relocatable-SDKs.patch @@ -1,24 +1,40 @@ -From b05f0be13aadf0b26a0b39dfe7daf2c47a300338 Mon Sep 17 00:00:00 2001 +From a0b23b160d6cfa7be4437c6e623633d76395f2ad Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Mon, 2 Mar 2015 01:58:54 +0000 -Subject: [PATCH] binutils-crosssdk: Generate relocatable SDKs +Subject: [PATCH 01/16] binutils-crosssdk: Generate relocatable SDKs This patch will modify the ELF linker scripts so that the crosssdk linker will generate binaries with a 4096 bytes PT_INTERP section. When the binaries will be relocated, at SDK install time, the interpreter path can be easily changed by the relocating script. +generate larger .interp section for gold linker as well + Upstream-Status: Inappropriate [SDK specific] Signed-off-by: Laurentiu Palcu Signed-off-by: Khem Raj --- + gold/layout.cc | 2 +- ld/genscripts.sh | 3 +++ ld/scripttempl/elf.sc | 4 ++-- - 2 files changed, 5 insertions(+), 2 deletions(-) + 3 files changed, 6 insertions(+), 3 deletions(-) +diff --git a/gold/layout.cc b/gold/layout.cc +index 13e533aaf21..b0afff16e2e 100644 +--- a/gold/layout.cc ++++ b/gold/layout.cc +@@ -5019,7 +5019,7 @@ Layout::create_interp(const Target* target) + gold_assert(interp != NULL); + } + +- size_t len = strlen(interp) + 1; ++ size_t len = 4096; + + Output_section_data* odata = new Output_data_const(interp, len, 1); + diff --git a/ld/genscripts.sh b/ld/genscripts.sh -index 03392d265c..435689ea14 100755 +index 03392d265c7..435689ea144 100755 --- a/ld/genscripts.sh +++ b/ld/genscripts.sh @@ -304,6 +304,7 @@ DATA_ALIGNMENT_u="${DATA_ALIGNMENT_u-${DATA_ALIGNMENT_r}}" @@ -43,10 +59,10 @@ index 03392d265c..435689ea14 100755 DATA_ALIGNMENT=${DATA_ALIGNMENT_} RELOCATING=" " diff --git a/ld/scripttempl/elf.sc b/ld/scripttempl/elf.sc -index 0b8b32a440..ee6b71075d 100644 +index eb74743e5c3..c9a8a47615f 100644 --- a/ld/scripttempl/elf.sc +++ b/ld/scripttempl/elf.sc -@@ -140,8 +140,8 @@ if test -z "$DATA_SEGMENT_ALIGN"; then +@@ -143,8 +143,8 @@ if test -z "$DATA_SEGMENT_ALIGN"; then DATA_SEGMENT_RELRO_END=". = DATA_SEGMENT_RELRO_END (${SEPARATE_GOTPLT-0}, .);" fi fi @@ -57,3 +73,6 @@ index 0b8b32a440..ee6b71075d 100644 fi if test -z "$PLT"; then IPLT=".iplt ${RELOCATING-0} : { *(.iplt) }" +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0002-binutils-cross-Do-not-generate-linker-script-directo.patch b/poky/meta/recipes-devtools/binutils/binutils/0002-binutils-cross-Do-not-generate-linker-script-directo.patch index 91de94b76..41dff05b2 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0002-binutils-cross-Do-not-generate-linker-script-directo.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0002-binutils-cross-Do-not-generate-linker-script-directo.patch @@ -1,7 +1,8 @@ -From 6d79f81336e7cbe727b5a51134f5d314a455379d Mon Sep 17 00:00:00 2001 +From 0d14f8f333a9b519202246ce779f3e380491826c Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Mon, 6 Mar 2017 23:37:05 -0800 -Subject: [PATCH] binutils-cross: Do not generate linker script directories +Subject: [PATCH 02/17] binutils-cross: Do not generate linker script + directories We don't place target libraries within ${exec_prefix}, we'd always place these within the target sysroot within the standard library directories. Worse, the @@ -22,7 +23,7 @@ Signed-off-by: Khem Raj 1 file changed, 25 deletions(-) diff --git a/ld/genscripts.sh b/ld/genscripts.sh -index 435689ea14..cff8a1467f 100755 +index 435689ea144..cff8a1467f9 100755 --- a/ld/genscripts.sh +++ b/ld/genscripts.sh @@ -235,31 +235,6 @@ append_to_lib_path() @@ -57,3 +58,6 @@ index 435689ea14..cff8a1467f 100755 if [ "x${LIB_PATH}" = "x" ] && [ "x${USE_LIBPATH}" = xyes ] ; then libs=${NATIVE_LIB_DIRS} if [ "x${NATIVE}" = "xyes" ] ; then +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch b/poky/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch index 4b4d73d21..28daf58f7 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch @@ -1,8 +1,8 @@ -From d057b215f114f6158b4010ab44a19cae9dcc8386 Mon Sep 17 00:00:00 2001 +From 3bbec749ec9fe681ade6812c48c541a752fcffd5 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Wed, 19 Feb 2020 09:51:16 -0800 -Subject: [PATCH] binutils-nativesdk: Search for alternative ld.so.conf in SDK - installation +Subject: [PATCH 03/17] binutils-nativesdk: Search for alternative ld.so.conf + in SDK installation We need binutils to look at our ld.so.conf file within the SDK to ensure we search the SDK's libdirs as well as those from the host system. @@ -29,7 +29,7 @@ Signed-off-by: Khem Raj 5 files changed, 7 insertions(+), 3 deletions(-) diff --git a/ld/Makefile.am b/ld/Makefile.am -index 4a9b8404b7..1c132d3ce4 100644 +index 02c4fc16395..a5762227f0a 100644 --- a/ld/Makefile.am +++ b/ld/Makefile.am @@ -42,7 +42,8 @@ ZLIBINC = @zlibinc@ @@ -43,10 +43,10 @@ index 4a9b8404b7..1c132d3ce4 100644 NO_WERROR = @NO_WERROR@ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS) diff --git a/ld/Makefile.in b/ld/Makefile.in -index 46d9b14077..e453bc1b33 100644 +index 2fe12e14f63..8f0c83ac8d2 100644 --- a/ld/Makefile.in +++ b/ld/Makefile.in -@@ -555,7 +555,8 @@ ZLIB = @zlibdir@ -lz +@@ -548,7 +548,8 @@ ZLIB = @zlibdir@ -lz ZLIBINC = @zlibinc@ ELF_CLFAGS = -DELF_LIST_OPTIONS=@elf_list_options@ \ -DELF_SHLIB_LIST_OPTIONS=@elf_shlib_list_options@ \ @@ -55,25 +55,25 @@ index 46d9b14077..e453bc1b33 100644 + -DSYSCONFDIR="\"$(sysconfdir)\"" AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS) - @ENABLE_PLUGINS_FALSE@PLUGIN_C = + diff --git a/ld/ldelf.c b/ld/ldelf.c -index 2e27cf48a8..a095d6aac5 100644 +index bada3ade2d7..b4784009d7a 100644 --- a/ld/ldelf.c +++ b/ld/ldelf.c -@@ -907,7 +907,7 @@ ldelf_check_ld_so_conf (const struct bfd_link_needed_list *l, int force, +@@ -911,7 +911,7 @@ ldelf_check_ld_so_conf (const struct bfd_link_needed_list *l, int force, info.path = NULL; info.len = info.alloc = 0; - tmppath = concat (ld_sysroot, prefix, "/etc/ld.so.conf", -+ tmppath = concat (ld_sysconfdir, "/ld.so.conf", ++ tmppath = concat (ld_sysconfdir, "/etc/ld.so.conf", (const char *) NULL); if (!ldelf_parse_ld_so_conf (&info, tmppath)) { diff --git a/ld/ldmain.c b/ld/ldmain.c -index c4af10f4e9..da1ad17763 100644 +index 08be9030cb5..f5c5a336320 100644 --- a/ld/ldmain.c +++ b/ld/ldmain.c -@@ -69,6 +69,7 @@ char *program_name; +@@ -70,6 +70,7 @@ char *program_name; /* The prefix for system library directories. */ const char *ld_sysroot; @@ -82,7 +82,7 @@ index c4af10f4e9..da1ad17763 100644 /* The canonical representation of ld_sysroot. */ char *ld_canon_sysroot; diff --git a/ld/ldmain.h b/ld/ldmain.h -index 0f05821d1e..54c36a94ce 100644 +index ac7db5720d5..1cbe1771912 100644 --- a/ld/ldmain.h +++ b/ld/ldmain.h @@ -23,6 +23,7 @@ @@ -93,3 +93,6 @@ index 0f05821d1e..54c36a94ce 100644 extern char *ld_canon_sysroot; extern int ld_canon_sysroot_len; extern FILE *saved_script_handle; +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0004-configure-widen-the-regexp-for-SH-architectures.patch b/poky/meta/recipes-devtools/binutils/binutils/0004-configure-widen-the-regexp-for-SH-architectures.patch index d10a144ed..2381c7b85 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0004-configure-widen-the-regexp-for-SH-architectures.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0004-configure-widen-the-regexp-for-SH-architectures.patch @@ -1,7 +1,7 @@ -From f7ad8a44c10f01e03680f093fd7af71c9f5e8606 Mon Sep 17 00:00:00 2001 +From 361c2c313196c095d12d17cecf0a069107dd629b Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Mon, 2 Mar 2015 01:07:33 +0000 -Subject: [PATCH] configure: widen the regexp for SH architectures +Subject: [PATCH 04/17] configure: widen the regexp for SH architectures gprof needs to know about uclibc @@ -14,10 +14,10 @@ Signed-off-by: Khem Raj 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configure b/configure -index 91dc42f6c7..7c1b747e7c 100755 +index 54d0339ab9e..6782f8b6ab8 100755 --- a/configure +++ b/configure -@@ -3915,7 +3915,7 @@ case "${target}" in +@@ -3937,7 +3937,7 @@ case "${target}" in nvptx*-*-*) noconfigdirs="$noconfigdirs target-libssp target-libstdc++-v3 target-libobjc" ;; @@ -27,10 +27,10 @@ index 91dc42f6c7..7c1b747e7c 100755 sh*-*-elf) ;; diff --git a/configure.ac b/configure.ac -index 4bd869a63a..7187b34dfc 100644 +index a910c4fd6ba..55beb1dea46 100644 --- a/configure.ac +++ b/configure.ac -@@ -1159,7 +1159,7 @@ case "${target}" in +@@ -1178,7 +1178,7 @@ case "${target}" in nvptx*-*-*) noconfigdirs="$noconfigdirs target-libssp target-libstdc++-v3 target-libobjc" ;; @@ -39,3 +39,6 @@ index 4bd869a63a..7187b34dfc 100644 case "${target}" in sh*-*-elf) ;; +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0005-Point-scripts-location-to-libdir.patch b/poky/meta/recipes-devtools/binutils/binutils/0005-Point-scripts-location-to-libdir.patch index 3ba0ba63f..c31a0bc9a 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0005-Point-scripts-location-to-libdir.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0005-Point-scripts-location-to-libdir.patch @@ -1,7 +1,7 @@ -From 08b088b7dd3d9707ed66948a7271ffb438eeddf5 Mon Sep 17 00:00:00 2001 +From 39b478b7e52fb1b892a392f3c3750b9023cfabc8 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Mon, 2 Mar 2015 01:09:58 +0000 -Subject: [PATCH] Point scripts location to libdir +Subject: [PATCH 05/17] Point scripts location to libdir Upstream-Status: Inappropriate [debian patch] @@ -12,10 +12,10 @@ Signed-off-by: Khem Raj 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ld/Makefile.am b/ld/Makefile.am -index 1c132d3ce4..8d82cf2e16 100644 +index a5762227f0a..e1c665898f3 100644 --- a/ld/Makefile.am +++ b/ld/Makefile.am -@@ -64,7 +64,7 @@ endif +@@ -51,7 +51,7 @@ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS) # We put the scripts in the directory $(scriptdir)/ldscripts. # We can't put the scripts in $(datadir) because the SEARCH_DIR # directives need to be different for native and cross linkers. @@ -25,10 +25,10 @@ index 1c132d3ce4..8d82cf2e16 100644 EMUL = @EMUL@ EMULATION_OFILES = @EMULATION_OFILES@ diff --git a/ld/Makefile.in b/ld/Makefile.in -index e453bc1b33..2dcd72f809 100644 +index 8f0c83ac8d2..acc24ec4130 100644 --- a/ld/Makefile.in +++ b/ld/Makefile.in -@@ -573,7 +573,7 @@ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS) +@@ -556,7 +556,7 @@ AM_CFLAGS = $(WARN_CFLAGS) $(ELF_CLFAGS) # We put the scripts in the directory $(scriptdir)/ldscripts. # We can't put the scripts in $(datadir) because the SEARCH_DIR # directives need to be different for native and cross linkers. @@ -37,3 +37,6 @@ index e453bc1b33..2dcd72f809 100644 BASEDIR = $(srcdir)/.. BFDDIR = $(BASEDIR)/bfd INCDIR = $(BASEDIR)/include +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch b/poky/meta/recipes-devtools/binutils/binutils/0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch index d92072920..654b64ff7 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0006-Only-generate-an-RPATH-entry-if-LD_RUN_PATH-is-not-e.patch @@ -1,7 +1,8 @@ -From 13791636abf518f0db209dc51c29445d80421f2e Mon Sep 17 00:00:00 2001 +From 59640ddf11104a604ccf7c078a48359d711c2f9c Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Mon, 2 Mar 2015 01:27:17 +0000 -Subject: [PATCH] Only generate an RPATH entry if LD_RUN_PATH is not empty +Subject: [PATCH 06/17] Only generate an RPATH entry if LD_RUN_PATH is not + empty for cases where -rpath isn't specified. debian (#151024) @@ -14,10 +15,10 @@ Signed-off-by: Khem Raj 1 file changed, 4 insertions(+) diff --git a/ld/ldelf.c b/ld/ldelf.c -index a095d6aac5..a2455a8793 100644 +index b4784009d7a..541772f4604 100644 --- a/ld/ldelf.c +++ b/ld/ldelf.c -@@ -1229,6 +1229,8 @@ ldelf_after_open (int use_libpath, int native, int is_linux, int is_freebsd, +@@ -1247,6 +1247,8 @@ ldelf_after_open (int use_libpath, int native, int is_linux, int is_freebsd, && command_line.rpath == NULL) { path = (const char *) getenv ("LD_RUN_PATH"); @@ -26,7 +27,7 @@ index a095d6aac5..a2455a8793 100644 if (path && ldelf_search_needed (path, &n, force, is_linux, elfsize)) -@@ -1573,6 +1575,8 @@ ldelf_before_allocation (char *audit, char *depaudit, +@@ -1605,6 +1607,8 @@ ldelf_before_allocation (char *audit, char *depaudit, rpath = command_line.rpath; if (rpath == NULL) rpath = (const char *) getenv ("LD_RUN_PATH"); @@ -35,3 +36,6 @@ index a095d6aac5..a2455a8793 100644 for (abfd = link_info.input_bfds; abfd; abfd = abfd->link.next) if (bfd_get_flavour (abfd) == bfd_target_elf_flavour) +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0007-Use-libtool-2.4.patch b/poky/meta/recipes-devtools/binutils/binutils/0007-Use-libtool-2.4.patch deleted file mode 100644 index 89da62fbf..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0007-Use-libtool-2.4.patch +++ /dev/null @@ -1,21234 +0,0 @@ -From b8fafd8ce6c47c9a551ed796176d884a34930086 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Sun, 14 Feb 2016 17:04:07 +0000 -Subject: [PATCH 07/15] Use libtool 2.4 - -get libtool sysroot support - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - bfd/configure | 1322 +++++++++++++++----- - bfd/configure.ac | 2 +- - binutils/configure | 1320 +++++++++++++++----- - gas/configure | 1320 +++++++++++++++----- - gprof/configure | 1320 +++++++++++++++----- - ld/configure | 1695 ++++++++++++++++++------- - libctf/configure | 1319 +++++++++++++++----- - libtool.m4 | 1080 +++++++++++----- - ltmain.sh | 2925 +++++++++++++++++++++++++++++--------------- - ltoptions.m4 | 2 +- - ltversion.m4 | 12 +- - lt~obsolete.m4 | 2 +- - opcodes/configure | 1320 +++++++++++++++----- - 13 files changed, 9939 insertions(+), 3700 deletions(-) - -diff --git a/bfd/configure b/bfd/configure -index bc576b7894a..cd036638081 100755 ---- a/bfd/configure -+++ b/bfd/configure -@@ -704,6 +704,9 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL -+ac_ct_AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -822,6 +825,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_plugins - enable_largefile -@@ -1504,6 +1508,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - --with-mmap try using mmap for BFD input files if available - --with-separate-debug-dir=DIR - Look for global separate debug info in DIR -@@ -5693,8 +5699,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5734,7 +5740,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6420,8 +6426,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6470,6 +6476,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6486,6 +6566,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6654,7 +6739,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6808,6 +6894,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6821,11 +6922,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6841,7 +7095,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6861,11 +7115,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6881,7 +7139,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6900,6 +7158,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6911,16 +7173,72 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ - -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a - -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } - -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi - - - -@@ -7262,8 +7580,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -7299,6 +7617,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -7340,6 +7659,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -7351,7 +7682,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -7377,8 +7708,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -7388,8 +7719,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7426,6 +7757,16 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ - - - -@@ -7442,6 +7783,45 @@ fi - - - -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7653,6 +8033,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -8216,6 +8713,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -8380,7 +8879,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8469,7 +8969,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8767,8 +9267,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8934,6 +9432,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8996,7 +9500,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -9053,13 +9557,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -9120,6 +9628,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9470,7 +9983,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9569,12 +10083,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9588,8 +10102,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9607,8 +10121,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9654,8 +10168,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9785,7 +10299,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9798,22 +10318,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9825,7 +10352,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9838,22 +10371,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9898,20 +10438,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9972,7 +10555,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9980,7 +10563,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9996,7 +10579,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -10020,10 +10603,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -10102,23 +10685,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -10203,7 +10799,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -10222,9 +10818,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10800,8 +11396,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10834,13 +11431,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10932,7 +11587,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11728,7 +12383,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11731 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11772,10 +12427,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11834,7 +12489,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11837 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11878,10 +12533,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -14554,7 +15209,7 @@ SHARED_LDFLAGS= - if test "$enable_shared" = "yes"; then - x=`sed -n -e 's/^[ ]*PICFLAG[ ]*=[ ]*//p' < ../libiberty/Makefile | sed -n '$p'` - if test -n "$x"; then -- SHARED_LIBADD="-L`pwd`/../libiberty/pic -liberty" -+ SHARED_LIBADD="`pwd`/../libiberty/pic/libiberty.a" - fi - - case "${host}" in -@@ -17176,13 +17831,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -17197,14 +17859,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -17237,12 +17902,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -17297,8 +17962,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -17308,12 +17978,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -17329,7 +18001,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -17365,6 +18036,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -18153,7 +18825,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -18256,19 +18929,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -18298,6 +18994,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -18307,6 +19009,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -18421,12 +19126,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -18513,9 +19218,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -18531,6 +19233,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -18563,210 +19268,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -diff --git a/bfd/configure.ac b/bfd/configure.ac -index c5bfbd5d129..e666b1cc106 100644 ---- a/bfd/configure.ac -+++ b/bfd/configure.ac -@@ -290,7 +290,7 @@ changequote(,)dnl - x=`sed -n -e 's/^[ ]*PICFLAG[ ]*=[ ]*//p' < ../libiberty/Makefile | sed -n '$p'` - changequote([,])dnl - if test -n "$x"; then -- SHARED_LIBADD="-L`pwd`/../libiberty/pic -liberty" -+ SHARED_LIBADD="`pwd`/../libiberty/pic/libiberty.a" - fi - - case "${host}" in -diff --git a/binutils/configure b/binutils/configure -index a8dfd4bd68c..82316496953 100755 ---- a/binutils/configure -+++ b/binutils/configure -@@ -690,8 +690,11 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL - RANLIB -+ac_ct_AR - AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -808,6 +811,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_plugins - enable_largefile -@@ -1485,6 +1489,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - --with-debuginfod Enable debuginfo lookups with debuginfod - (auto/yes/no) - --with-system-zlib use installed libz -@@ -5461,8 +5467,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5502,7 +5508,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6188,8 +6194,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6238,6 +6244,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6254,6 +6334,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6422,7 +6507,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6576,6 +6662,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6591,9 +6692,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6609,7 +6863,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6629,11 +6883,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6649,7 +6907,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6668,6 +6926,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6679,12 +6941,10 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} - - - -@@ -6696,6 +6956,64 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ -+ -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a -+ -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } -+ -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. - set dummy ${ac_tool_prefix}strip; ac_word=$2 -@@ -7030,8 +7348,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -7067,6 +7385,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -7108,6 +7427,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -7119,7 +7450,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -7145,8 +7476,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -7156,8 +7487,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7194,6 +7525,21 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -7210,6 +7556,40 @@ fi - - - -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7421,6 +7801,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -7984,6 +8481,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -8179,7 +8678,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8268,7 +8768,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8566,8 +9066,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8733,6 +9231,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8795,7 +9299,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -8852,13 +9356,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -8919,6 +9427,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9269,7 +9782,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9368,12 +9882,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9387,8 +9901,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9406,8 +9920,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9453,8 +9967,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9584,7 +10098,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9597,22 +10117,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9624,7 +10151,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9637,22 +10170,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9697,20 +10237,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9771,7 +10354,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9779,7 +10362,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9795,7 +10378,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -9819,10 +10402,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -9901,23 +10484,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -10002,7 +10598,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -10021,9 +10617,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10599,8 +11195,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10633,13 +11230,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10731,7 +11386,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11527,7 +12182,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11530 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11571,10 +12226,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11633,7 +12288,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11636 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11677,10 +12332,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -16024,13 +16679,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -16045,14 +16707,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -16085,12 +16750,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -16145,8 +16810,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -16156,12 +16826,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -16177,7 +16849,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -16213,6 +16884,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -16979,7 +17651,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -17082,19 +17755,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -17124,6 +17820,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -17133,6 +17835,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -17247,12 +17952,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -17339,9 +18044,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -17357,6 +18059,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -17389,210 +18094,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -diff --git a/gas/configure b/gas/configure -index 60c1a055ae2..0e0ce4c0e23 100755 ---- a/gas/configure -+++ b/gas/configure -@@ -681,8 +681,11 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL - RANLIB -+ac_ct_AR - AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -799,6 +802,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_plugins - enable_largefile -@@ -1490,6 +1494,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - --with-cpu=CPU default cpu variant is CPU (currently only supported - on ARC) - --with-system-zlib use installed libz -@@ -5277,8 +5283,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5318,7 +5324,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6004,8 +6010,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6054,6 +6060,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6070,6 +6150,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6238,7 +6323,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6392,6 +6478,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6407,9 +6508,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6425,7 +6679,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6445,11 +6699,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6465,7 +6723,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6484,6 +6742,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6495,12 +6757,10 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} - - - -@@ -6512,6 +6772,64 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ -+ -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a -+ -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } -+ -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. - set dummy ${ac_tool_prefix}strip; ac_word=$2 -@@ -6846,8 +7164,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -6883,6 +7201,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -6924,6 +7243,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -6935,7 +7266,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -6961,8 +7292,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -6972,8 +7303,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7010,6 +7341,21 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -7026,6 +7372,40 @@ fi - - - -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7237,6 +7617,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -7800,6 +8297,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -7995,7 +8494,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8084,7 +8584,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8382,8 +8882,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8549,6 +9047,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8611,7 +9115,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -8668,13 +9172,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -8735,6 +9243,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9085,7 +9598,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9184,12 +9698,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9203,8 +9717,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9222,8 +9736,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9269,8 +9783,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9400,7 +9914,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9413,22 +9933,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9440,7 +9967,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9453,22 +9986,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9513,20 +10053,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9587,7 +10170,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9595,7 +10178,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9611,7 +10194,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -9635,10 +10218,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -9717,23 +10300,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -9818,7 +10414,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -9837,9 +10433,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10415,8 +11011,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10449,13 +11046,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10547,7 +11202,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11343,7 +11998,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11346 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11387,10 +12042,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11449,7 +12104,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11452 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11493,10 +12148,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -15928,13 +16583,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -15949,14 +16611,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -15989,12 +16654,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -16049,8 +16714,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -16060,12 +16730,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -16081,7 +16753,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -16117,6 +16788,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -16890,7 +17562,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -16993,19 +17666,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -17035,6 +17731,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -17044,6 +17746,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -17158,12 +17863,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -17250,9 +17955,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -17268,6 +17970,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -17300,210 +18005,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -diff --git a/gprof/configure b/gprof/configure -index c23692e5c3d..9f936a5b03d 100755 ---- a/gprof/configure -+++ b/gprof/configure -@@ -662,8 +662,11 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL - RANLIB -+ac_ct_AR - AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -780,6 +783,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_plugins - enable_largefile -@@ -1442,6 +1446,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - - Some influential environment variables: - CC C compiler command -@@ -5124,8 +5130,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5165,7 +5171,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -5851,8 +5857,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -5901,6 +5907,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -5917,6 +5997,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6085,7 +6170,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6239,6 +6325,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6252,11 +6353,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6272,7 +6526,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6292,11 +6546,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6312,7 +6570,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6331,6 +6589,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6342,16 +6604,72 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ - -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a - -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } - -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi - - - -@@ -6693,8 +7011,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -6730,6 +7048,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -6771,6 +7090,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -6782,7 +7113,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -6808,8 +7139,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -6819,8 +7150,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -6857,6 +7188,20 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ -+ -+ -+ -+ - - - -@@ -6873,6 +7218,41 @@ fi - - - -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7084,6 +7464,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -7647,6 +8144,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -7842,7 +8341,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -7931,7 +8431,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8229,8 +8729,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8396,6 +8894,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8458,7 +8962,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -8515,13 +9019,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -8582,6 +9090,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -8932,7 +9445,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9031,12 +9545,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9050,8 +9564,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9069,8 +9583,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9116,8 +9630,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9247,7 +9761,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9260,22 +9780,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9287,7 +9814,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9300,22 +9833,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9360,20 +9900,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9434,7 +10017,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9442,7 +10025,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9458,7 +10041,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -9482,10 +10065,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -9564,23 +10147,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -9665,7 +10261,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -9684,9 +10280,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10262,8 +10858,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10296,13 +10893,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10394,7 +11049,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11190,7 +11845,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11193 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11234,10 +11889,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11296,7 +11951,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11299 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11340,10 +11995,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -13309,13 +13964,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -13330,14 +13992,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -13370,12 +14035,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -13430,8 +14095,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -13441,12 +14111,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -13462,7 +14134,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -13498,6 +14169,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -14263,7 +14935,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -14366,19 +15039,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -14408,6 +15104,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -14417,6 +15119,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -14531,12 +15236,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -14623,9 +15328,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -14641,6 +15343,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -14673,210 +15378,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -diff --git a/ld/configure b/ld/configure -index 811134a503b..f432f4637d0 100755 ---- a/ld/configure -+++ b/ld/configure -@@ -691,8 +691,11 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL - RANLIB -+ac_ct_AR - AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -819,6 +822,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_plugins - enable_largefile -@@ -1512,6 +1516,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - --with-lib-path=dir1:dir2... set default LIB_PATH - --with-sysroot=DIR Search for usr/lib et al within DIR. - --with-system-zlib use installed libz -@@ -5965,8 +5971,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -6006,7 +6012,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6692,8 +6698,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6742,6 +6748,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6758,6 +6838,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6926,7 +7011,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -7080,6 +7166,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -7095,9 +7196,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -7113,7 +7367,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -7133,11 +7387,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -7153,7 +7411,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -7172,6 +7430,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -7183,12 +7445,12 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+ -+ - - - -@@ -7198,6 +7460,62 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ -+ -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a -+ -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } -+ -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi -+ -+ -+ -+ -+ - - - if test -n "$ac_tool_prefix"; then -@@ -7534,8 +7852,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -7571,6 +7889,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -7612,6 +7931,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -7623,7 +7954,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -7649,8 +7980,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -7660,8 +7991,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7698,6 +8029,19 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ -+ -+ -+ - - - -@@ -7714,6 +8058,42 @@ fi - - - -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7925,6 +8305,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -8488,6 +8985,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -8556,6 +9055,16 @@ done - - - -+func_stripname_cnf () -+{ -+ case ${2} in -+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -+ esac -+} # func_stripname_cnf -+ -+ -+ - - - # Set options -@@ -8684,7 +9193,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8773,7 +9283,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -9071,8 +9581,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -9238,6 +9746,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -9300,7 +9814,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -9357,13 +9871,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -9424,6 +9942,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9774,7 +10297,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9873,12 +10397,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9892,8 +10416,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9911,8 +10435,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9958,8 +10482,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -10089,7 +10613,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -10102,22 +10632,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -10129,7 +10666,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -10142,22 +10685,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -10201,21 +10751,64 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is -- # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ # no search path for DLLs. -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -10276,7 +10869,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -10284,7 +10877,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -10300,7 +10893,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -10324,10 +10917,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -10406,23 +10999,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -10507,7 +11113,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -10526,9 +11132,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -11104,8 +11710,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -11138,13 +11745,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -11236,7 +11901,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -12032,7 +12697,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 12035 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -12076,10 +12741,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -12138,7 +12803,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 12141 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -12182,10 +12847,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -12577,6 +13242,7 @@ $RM -r conftest* - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC -+ lt_save_CFLAGS=$CFLAGS - lt_save_LD=$LD - lt_save_GCC=$GCC - GCC=$GXX -@@ -12594,6 +13260,7 @@ $RM -r conftest* - fi - test -z "${LDCXX+set}" || LD=$LDCXX - CC=${CXX-"c++"} -+ CFLAGS=$CXXFLAGS - compiler=$CC - compiler_CXX=$CC - for cc_temp in $compiler""; do -@@ -12876,7 +13543,13 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie - allow_undefined_flag_CXX='-berok' - # Determine the default libpath from the value encoded in an empty - # executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath__CXX+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -12889,22 +13562,29 @@ main () - _ACEOF - if ac_fn_cxx_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath__CXX"; then -+ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath__CXX"; then -+ lt_cv_aix_libpath__CXX="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath__CXX -+fi - - hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" - -@@ -12917,7 +13597,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath__CXX+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -12930,22 +13616,29 @@ main () - _ACEOF - if ac_fn_cxx_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath__CXX"; then -+ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath__CXX"; then -+ lt_cv_aix_libpath__CXX="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath__CXX -+fi - - hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -12988,29 +13681,75 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - ;; - - cygwin* | mingw* | pw32* | cegcc*) -- # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, -- # as there is no search path for DLLs. -- hardcode_libdir_flag_spec_CXX='-L$libdir' -- export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' -- allow_undefined_flag_CXX=unsupported -- always_export_symbols_CXX=no -- enable_shared_with_static_runtimes_CXX=yes -- -- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then -- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -- # If the export-symbols file already is a .def file (1st line -- # is EXPORTS), use it as is; otherwise, prepend... -- archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -- cp $export_symbols $output_objdir/$soname.def; -- else -- echo EXPORTS > $output_objdir/$soname.def; -- cat $export_symbols >> $output_objdir/$soname.def; -- fi~ -- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -- else -- ld_shlibs_CXX=no -- fi -- ;; -+ case $GXX,$cc_basename in -+ ,cl* | no,cl*) -+ # Native MSVC -+ # hardcode_libdir_flag_spec is actually meaningless, as there is -+ # no search path for DLLs. -+ hardcode_libdir_flag_spec_CXX=' ' -+ allow_undefined_flag_CXX=unsupported -+ always_export_symbols_CXX=yes -+ file_list_spec_CXX='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' -+ enable_shared_with_static_runtimes_CXX=yes -+ # Don't use ranlib -+ old_postinstall_cmds_CXX='chmod 644 $oldlib' -+ postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ func_to_tool_file "$lt_outputfile"~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # g++ -+ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, -+ # as there is no search path for DLLs. -+ hardcode_libdir_flag_spec_CXX='-L$libdir' -+ export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' -+ allow_undefined_flag_CXX=unsupported -+ always_export_symbols_CXX=no -+ enable_shared_with_static_runtimes_CXX=yes -+ -+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then -+ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -+ # If the export-symbols file already is a .def file (1st line -+ # is EXPORTS), use it as is; otherwise, prepend... -+ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ cp $export_symbols $output_objdir/$soname.def; -+ else -+ echo EXPORTS > $output_objdir/$soname.def; -+ cat $export_symbols >> $output_objdir/$soname.def; -+ fi~ -+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -+ else -+ ld_shlibs_CXX=no -+ fi -+ ;; -+ esac -+ ;; - darwin* | rhapsody*) - - -@@ -13116,7 +13855,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - ;; - *) - if test "$GXX" = yes; then -- archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - ld_shlibs_CXX=no -@@ -13187,10 +13926,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) -- archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' -+ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) -- archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' -+ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi -@@ -13231,9 +13970,9 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then -- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else -- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' -+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' - fi - fi - link_all_deplibs_CXX=yes -@@ -13303,20 +14042,20 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - prelink_cmds_CXX='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ -- compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' -+ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' - old_archive_cmds_CXX='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ -- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ -+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ - $RANLIB $oldlib' - archive_cmds_CXX='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ -- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' -+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - archive_expsym_cmds_CXX='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ -- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' -+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - ;; - *) # Version 6 and above use weak symbols - archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' -@@ -13511,7 +14250,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - *) -- archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - esac - -@@ -13557,7 +14296,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - archive_cmds_need_lc_CXX=yes - no_undefined_flag_CXX=' -zdefs' -@@ -13598,9 +14337,9 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - no_undefined_flag_CXX=' ${wl}-z ${wl}defs' - if $CC --version | $GREP -v '^2\.7' > /dev/null; then -- archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' -+ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when -@@ -13735,6 +14474,13 @@ private: - }; - _LT_EOF - -+ -+_lt_libdeps_save_CFLAGS=$CFLAGS -+case "$CC $CFLAGS " in #( -+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; -+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; -+esac -+ - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? -@@ -13748,7 +14494,7 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - pre_test_object_deps_done=no - - for p in `eval "$output_verbose_link_cmd"`; do -- case $p in -+ case ${prev}${p} in - - -L* | -R* | -l*) - # Some compilers place space between "-{L,R}" and the path. -@@ -13757,13 +14503,22 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - test $p = "-R"; then - prev=$p - continue -- else -- prev= - fi - -+ # Expand the sysroot to ease extracting the directories later. -+ if test -z "$prev"; then -+ case $p in -+ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -+ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -+ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; -+ esac -+ fi -+ case $p in -+ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; -+ esac - if test "$pre_test_object_deps_done" = no; then -- case $p in -- -L* | -R*) -+ case ${prev} in -+ -L | -R) - # Internal compiler library paths should come after those - # provided the user. The postdeps already come after the - # user supplied libs so there is no need to process them. -@@ -13783,8 +14538,10 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - postdeps_CXX="${postdeps_CXX} ${prev}${p}" - fi - fi -+ prev= - ;; - -+ *.lto.$objext) ;; # Ignore GCC LTO objects - *.$objext) - # This assumes that the test object file only shows up - # once in the compiler output. -@@ -13820,6 +14577,7 @@ else - fi - - $RM -f confest.$objext -+CFLAGS=$_lt_libdeps_save_CFLAGS - - # PORTME: override above test on systems where it is broken - case $host_os in -@@ -13855,7 +14613,7 @@ linux*) - - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as -@@ -13920,8 +14678,6 @@ fi - lt_prog_compiler_pic_CXX= - lt_prog_compiler_static_CXX= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then -@@ -14026,6 +14782,11 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - ;; - esac - ;; -+ mingw* | cygwin* | os2* | pw32* | cegcc*) -+ # This hack is so that the source file can tell whether it is being -+ # built for inclusion in a dll (and should export symbols for example). -+ lt_prog_compiler_pic_CXX='-DDLL_EXPORT' -+ ;; - dgux*) - case $cc_basename in - ec++*) -@@ -14178,7 +14939,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - ;; - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - lt_prog_compiler_pic_CXX='-KPIC' - lt_prog_compiler_static_CXX='-Bstatic' -@@ -14243,10 +15004,17 @@ case $host_os in - lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic_CXX" >&5 --$as_echo "$lt_prog_compiler_pic_CXX" >&6; } -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic_CXX+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 -+$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } -+lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX - - # - # Check to make sure the PIC flag actually works. -@@ -14304,6 +15072,8 @@ fi - - - -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -14481,6 +15251,7 @@ fi - $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } - - export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' - case $host_os in - aix[4-9]*) - # If we're using GNU nm, then we don't want the "-C" option. -@@ -14495,15 +15266,20 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie - ;; - pw32*) - export_symbols_cmds_CXX="$ltdll_cmds" -- ;; -+ ;; - cygwin* | mingw* | cegcc*) -- export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -- ;; -+ case $cc_basename in -+ cl*) ;; -+ *) -+ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' -+ ;; -+ esac -+ ;; - *) - export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' -- ;; -+ ;; - esac -- exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 - $as_echo "$ld_shlibs_CXX" >&6; } -@@ -14766,8 +15542,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -14799,13 +15576,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -14896,7 +15731,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -15355,6 +16190,7 @@ fi - fi # test -n "$compiler" - - CC=$lt_save_CC -+ CFLAGS=$lt_save_CFLAGS - LDCXX=$LD - LD=$lt_save_LD - GCC=$lt_save_GCC -@@ -18477,13 +19313,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -18498,14 +19341,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -18538,12 +19384,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -18582,8 +19428,8 @@ old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote - compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' - GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' - archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' -@@ -18610,12 +19456,12 @@ hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_ - hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' - inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' - link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path_CXX='`$ECHO "$fix_srcfile_path_CXX" | $SED "$delay_single_quote_subst"`' - always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' - exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' - include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' - prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' -+postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' - file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' - hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' - compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' -@@ -18653,8 +19499,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -18664,12 +19515,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -18685,7 +19538,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -18707,8 +19559,8 @@ LD_CXX \ - reload_flag_CXX \ - compiler_CXX \ - lt_prog_compiler_no_builtin_flag_CXX \ --lt_prog_compiler_wl_CXX \ - lt_prog_compiler_pic_CXX \ -+lt_prog_compiler_wl_CXX \ - lt_prog_compiler_static_CXX \ - lt_cv_prog_compiler_c_o_CXX \ - export_dynamic_flag_spec_CXX \ -@@ -18720,7 +19572,6 @@ no_undefined_flag_CXX \ - hardcode_libdir_flag_spec_CXX \ - hardcode_libdir_flag_spec_ld_CXX \ - hardcode_libdir_separator_CXX \ --fix_srcfile_path_CXX \ - exclude_expsyms_CXX \ - include_expsyms_CXX \ - file_list_spec_CXX \ -@@ -18754,6 +19605,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -18768,7 +19620,8 @@ archive_expsym_cmds_CXX \ - module_cmds_CXX \ - module_expsym_cmds_CXX \ - export_symbols_cmds_CXX \ --prelink_cmds_CXX; do -+prelink_cmds_CXX \ -+postlink_cmds_CXX; do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[\\\\\\\`\\"\\\$]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" -@@ -19533,7 +20386,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -19636,19 +20490,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -19678,6 +20555,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -19687,6 +20570,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -19801,12 +20687,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -19893,9 +20779,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -19911,6 +20794,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -19957,210 +20843,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -@@ -20188,12 +21033,12 @@ with_gcc=$GCC_CXX - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl_CXX -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic_CXX - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl_CXX -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static_CXX - -@@ -20280,9 +21125,6 @@ inherit_rpath=$inherit_rpath_CXX - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs_CXX - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path_CXX -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols_CXX - -@@ -20298,6 +21140,9 @@ include_expsyms=$lt_include_expsyms_CXX - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds_CXX - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds_CXX -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec_CXX - -diff --git a/libctf/configure b/libctf/configure -index e5493b31691..9977e4f7a42 100755 ---- a/libctf/configure -+++ b/libctf/configure -@@ -663,6 +663,8 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -778,6 +780,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_largefile - enable_werror_always -@@ -1436,6 +1439,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - --with-system-zlib use installed libz - - Some influential environment variables: -@@ -5324,8 +5329,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5436,7 +5441,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6122,8 +6127,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6172,6 +6177,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6188,6 +6267,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6356,7 +6440,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6510,6 +6595,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6525,9 +6625,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6543,7 +6796,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6563,11 +6816,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6583,7 +6840,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6602,6 +6859,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6613,16 +6874,72 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ - -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a - -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } - -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi - - - -@@ -6964,8 +7281,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -7001,6 +7318,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -7042,6 +7360,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -7053,7 +7383,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -7079,8 +7409,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -7090,8 +7420,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7128,6 +7458,16 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ - - - -@@ -7144,6 +7484,45 @@ fi - - - -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7355,6 +7734,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -7918,6 +8414,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -8083,7 +8581,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8172,7 +8671,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8470,8 +8969,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8637,6 +9134,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8699,7 +9202,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -8756,13 +9259,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -8823,6 +9330,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9173,7 +9685,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9272,12 +9785,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9291,8 +9804,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9310,8 +9823,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9357,8 +9870,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9488,7 +10001,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9501,22 +10020,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9528,7 +10054,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9541,22 +10073,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9601,20 +10140,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9675,7 +10257,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9683,7 +10265,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9699,7 +10281,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -9723,10 +10305,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -9805,23 +10387,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -9906,7 +10501,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -9925,9 +10520,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10503,8 +11098,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10537,13 +11133,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10635,7 +11289,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11431,7 +12085,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11434 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11475,10 +12129,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11537,7 +12191,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11540 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11581,10 +12235,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -13994,13 +14648,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -14015,14 +14676,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -14055,12 +14719,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -14115,8 +14779,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -14126,12 +14795,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -14147,7 +14818,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -14183,6 +14853,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -14939,7 +15610,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -15042,19 +15714,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -15084,6 +15779,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -15093,6 +15794,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -15207,12 +15911,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -15299,9 +16003,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -15317,6 +16018,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -15349,210 +16053,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - -diff --git a/libtool.m4 b/libtool.m4 -index 434530059fa..e45fdc6998c 100644 ---- a/libtool.m4 -+++ b/libtool.m4 -@@ -1,7 +1,8 @@ - # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is free software; the Free Software Foundation gives -@@ -10,7 +11,8 @@ - - m4_define([_LT_COPYING], [dnl - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -37,7 +39,7 @@ m4_define([_LT_COPYING], [dnl - # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - ]) - --# serial 56 LT_INIT -+# serial 57 LT_INIT - - - # LT_PREREQ(VERSION) -@@ -92,7 +94,8 @@ _LT_SET_OPTIONS([$0], [$1]) - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - AC_SUBST(LIBTOOL)dnl - - _LT_SETUP -@@ -166,10 +169,13 @@ _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl - dnl - m4_require([_LT_FILEUTILS_DEFAULTS])dnl - m4_require([_LT_CHECK_SHELL_FEATURES])dnl -+m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl - m4_require([_LT_CMD_RELOAD])dnl - m4_require([_LT_CHECK_MAGIC_METHOD])dnl -+m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl - m4_require([_LT_CMD_OLD_ARCHIVE])dnl - m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl -+m4_require([_LT_WITH_SYSROOT])dnl - - _LT_CONFIG_LIBTOOL_INIT([ - # See if we are running on zsh, and set the options which allow our -@@ -199,7 +205,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -632,7 +638,7 @@ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl - m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) - configured by $[0], generated by m4_PACKAGE_STRING. - --Copyright (C) 2009 Free Software Foundation, Inc. -+Copyright (C) 2010 Free Software Foundation, Inc. - This config.lt script is free software; the Free Software Foundation - gives unlimited permision to copy, distribute and modify it." - -@@ -746,15 +752,12 @@ _LT_EOF - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) - -- _LT_PROG_XSI_SHELLFNS -+ _LT_PROG_REPLACE_SHELLFNS - -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - ], -@@ -980,6 +983,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD - echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD - $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD -+ echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD -+ $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -1069,30 +1074,41 @@ m4_defun([_LT_DARWIN_LINKER_FEATURES], - fi - ]) - --# _LT_SYS_MODULE_PATH_AIX --# ----------------------- -+# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) -+# ---------------------------------- - # Links a minimal program and checks the executable - # for the system default hardcoded library path. In most cases, - # this is /usr/lib:/lib, but when the MPI compilers are used - # the location of the communication and MPI libs are included too. - # If we don't find anything, use the default library path according - # to the aix ld manual. -+# Store the results from the different compilers for each TAGNAME. -+# Allow to override them for all tags through lt_cv_aix_libpath. - m4_defun([_LT_SYS_MODULE_PATH_AIX], - [m4_require([_LT_DECL_SED])dnl --AC_LINK_IFELSE([AC_LANG_SOURCE([AC_LANG_PROGRAM])],[ --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi],[]) --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], -+ [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ -+ lt_aix_libpath_sed='[ -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }]' -+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then -+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi],[]) -+ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then -+ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" -+ fi -+ ]) -+ aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) -+fi - ])# _LT_SYS_MODULE_PATH_AIX - - -@@ -1117,7 +1133,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - - AC_MSG_CHECKING([how to print strings]) - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -1161,6 +1177,39 @@ _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) - ])# _LT_PROG_ECHO_BACKSLASH - - -+# _LT_WITH_SYSROOT -+# ---------------- -+AC_DEFUN([_LT_WITH_SYSROOT], -+[AC_MSG_CHECKING([for sysroot]) -+AC_ARG_WITH([libtool-sysroot], -+[ --with-libtool-sysroot[=DIR] Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified).], -+[], [with_libtool_sysroot=no]) -+ -+dnl lt_sysroot will always be passed unquoted. We quote it here -+dnl in case the user passed a directory name. -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ AC_MSG_RESULT([${with_libtool_sysroot}]) -+ AC_MSG_ERROR([The sysroot must be an absolute path.]) -+ ;; -+esac -+ -+ AC_MSG_RESULT([${lt_sysroot:-no}]) -+_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl -+[dependent libraries, and in which our libraries should be installed.])]) -+ - # _LT_ENABLE_LOCK - # --------------- - m4_defun([_LT_ENABLE_LOCK], -@@ -1320,14 +1369,47 @@ need_locks="$enable_libtool_lock" - ])# _LT_ENABLE_LOCK - - -+# _LT_PROG_AR -+# ----------- -+m4_defun([_LT_PROG_AR], -+[AC_CHECK_TOOLS(AR, [ar], false) -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+_LT_DECL([], [AR], [1], [The archiver]) -+_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) -+ -+AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], -+ [lt_cv_ar_at_file=no -+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM], -+ [echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' -+ AC_TRY_EVAL([lt_ar_try]) -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ AC_TRY_EVAL([lt_ar_try]) -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a -+ ]) -+ ]) -+ -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi -+_LT_DECL([], [archiver_list_spec], [1], -+ [How to feed a file listing to the archiver]) -+])# _LT_PROG_AR -+ -+ - # _LT_CMD_OLD_ARCHIVE - # ------------------- - m4_defun([_LT_CMD_OLD_ARCHIVE], --[AC_CHECK_TOOL(AR, ar, false) --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru --_LT_DECL([], [AR], [1], [The archiver]) --_LT_DECL([], [AR_FLAGS], [1]) -+[_LT_PROG_AR - - AC_CHECK_TOOL(STRIP, strip, :) - test -z "$STRIP" && STRIP=: -@@ -1623,7 +1705,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --[#line __oline__ "configure" -+[#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -1667,10 +1749,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -2210,8 +2292,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -2244,13 +2327,71 @@ m4_if([$1], [],[ - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -2342,7 +2483,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -2950,6 +3091,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -3016,7 +3162,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -3167,6 +3314,21 @@ tpf*) - ;; - esac - ]) -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -3174,7 +3336,11 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - _LT_DECL([], [deplibs_check_method], [1], - [Method to check whether dependent libraries are shared objects]) - _LT_DECL([], [file_magic_cmd], [1], -- [Command to use when deplibs_check_method == "file_magic"]) -+ [Command to use when deplibs_check_method = "file_magic"]) -+_LT_DECL([], [file_magic_glob], [1], -+ [How to find potential files when deplibs_check_method = "file_magic"]) -+_LT_DECL([], [want_nocaseglob], [1], -+ [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) - ])# _LT_CHECK_MAGIC_METHOD - - -@@ -3277,6 +3443,67 @@ dnl aclocal-1.4 backwards compatibility: - dnl AC_DEFUN([AM_PROG_NM], []) - dnl AC_DEFUN([AC_PROG_NM], []) - -+# _LT_CHECK_SHAREDLIB_FROM_LINKLIB -+# -------------------------------- -+# how to determine the name of the shared library -+# associated with a specific link library. -+# -- PORTME fill in with the dynamic library characteristics -+m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], -+[m4_require([_LT_DECL_EGREP]) -+m4_require([_LT_DECL_OBJDUMP]) -+m4_require([_LT_DECL_DLLTOOL]) -+AC_CACHE_CHECK([how to associate runtime and link libraries], -+lt_cv_sharedlib_from_linklib_cmd, -+[lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+]) -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+_LT_DECL([], [sharedlib_from_linklib_cmd], [1], -+ [Command to associate shared and link libraries]) -+])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB -+ -+ -+# _LT_PATH_MANIFEST_TOOL -+# ---------------------- -+# locate the manifest tool -+m4_defun([_LT_PATH_MANIFEST_TOOL], -+[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], -+ [lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&AS_MESSAGE_LOG_FD -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest*]) -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl -+])# _LT_PATH_MANIFEST_TOOL -+ - - # LT_LIB_M - # -------- -@@ -3403,8 +3630,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -3440,6 +3667,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -3473,6 +3701,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT@&t@_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT@&t@_DLSYM_CONST -+#else -+# define LT@&t@_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -3484,7 +3724,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT@&t@_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -3510,15 +3750,15 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" - if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD - fi -@@ -3551,6 +3791,13 @@ else - AC_MSG_RESULT(ok) - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ - _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], - [Take the output of nm and produce a listing of raw symbols and C names]) - _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], -@@ -3561,6 +3808,8 @@ _LT_DECL([global_symbol_to_c_name_address], - _LT_DECL([global_symbol_to_c_name_address_lib_prefix], - [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], - [Transform the output of nm in a C name address pair when lib prefix is needed]) -+_LT_DECL([], [nm_file_list_spec], [1], -+ [Specify filename containing input files for $NM]) - ]) # _LT_CMD_GLOBAL_SYMBOLS - - -@@ -3572,7 +3821,6 @@ _LT_TAGVAR(lt_prog_compiler_wl, $1)= - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_static, $1)= - --AC_MSG_CHECKING([for $compiler option to produce PIC]) - m4_if([$1], [CXX], [ - # C++ specific cases for pic, static, wl, etc. - if test "$GXX" = yes; then -@@ -3678,6 +3926,12 @@ m4_if([$1], [CXX], [ - ;; - esac - ;; -+ mingw* | cygwin* | os2* | pw32* | cegcc*) -+ # This hack is so that the source file can tell whether it is being -+ # built for inclusion in a dll (and should export symbols for example). -+ m4_if([$1], [GCJ], [], -+ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) -+ ;; - dgux*) - case $cc_basename in - ec++*) -@@ -3830,7 +4084,7 @@ m4_if([$1], [CXX], [ - ;; - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' -@@ -4053,6 +4307,12 @@ m4_if([$1], [CXX], [ - _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' - _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' -+ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' -+ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -4115,7 +4375,7 @@ m4_if([$1], [CXX], [ - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; - *) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; -@@ -4172,9 +4432,11 @@ case $host_os in - _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" - ;; - esac --AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) --_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], -- [How to pass a linker flag through the compiler]) -+ -+AC_CACHE_CHECK([for $compiler option to produce PIC], -+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], -+ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) -+_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) - - # - # Check to make sure the PIC flag actually works. -@@ -4193,6 +4455,8 @@ fi - _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], - [Additional compiler flags for building library objects]) - -+_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], -+ [How to pass a linker flag through the compiler]) - # - # Check to make sure the static flag actually works. - # -@@ -4213,6 +4477,7 @@ _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], - m4_defun([_LT_LINKER_SHLIBS], - [AC_REQUIRE([LT_PATH_LD])dnl - AC_REQUIRE([LT_PATH_NM])dnl -+m4_require([_LT_PATH_MANIFEST_TOOL])dnl - m4_require([_LT_FILEUTILS_DEFAULTS])dnl - m4_require([_LT_DECL_EGREP])dnl - m4_require([_LT_DECL_SED])dnl -@@ -4221,6 +4486,7 @@ m4_require([_LT_TAG_COMPILER])dnl - AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) - m4_if([$1], [CXX], [ - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' -+ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] - case $host_os in - aix[[4-9]]*) - # If we're using GNU nm, then we don't want the "-C" option. -@@ -4235,15 +4501,20 @@ m4_if([$1], [CXX], [ - ;; - pw32*) - _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" -- ;; -+ ;; - cygwin* | mingw* | cegcc*) -- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' -- ;; -+ case $cc_basename in -+ cl*) ;; -+ *) -+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' -+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] -+ ;; -+ esac -+ ;; - *) - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' -- ;; -+ ;; - esac -- _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] - ], [ - runpath_var= - _LT_TAGVAR(allow_undefined_flag, $1)= -@@ -4411,7 +4682,8 @@ _LT_EOF - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' -+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' -+ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -4510,12 +4782,12 @@ _LT_EOF - _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' -- _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -4529,8 +4801,8 @@ _LT_EOF - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -4548,8 +4820,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi -@@ -4595,8 +4867,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi -@@ -4726,7 +4998,7 @@ _LT_EOF - _LT_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- _LT_SYS_MODULE_PATH_AIX -+ _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" - else -@@ -4737,7 +5009,7 @@ _LT_EOF - else - # Determine the default libpath from the value encoded in an - # empty executable. -- _LT_SYS_MODULE_PATH_AIX -+ _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. -@@ -4781,20 +5053,63 @@ _LT_EOF - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' -- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' -- # FIXME: Should let the user specify the lib program. -- _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' -- _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' -- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' -+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -+ _LT_TAGVAR(always_export_symbols, $1)=yes -+ _LT_TAGVAR(file_list_spec, $1)='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' -+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -+ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' -+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' -+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' -+ # FIXME: Should let the user specify the lib program. -+ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -4828,7 +5143,7 @@ _LT_EOF - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no -@@ -4836,7 +5151,7 @@ _LT_EOF - - hpux9*) - if test "$GCC" = yes; then -- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -4852,7 +5167,7 @@ _LT_EOF - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -4876,10 +5191,10 @@ _LT_EOF - _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -4926,16 +5241,31 @@ _LT_EOF - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- AC_LINK_IFELSE([AC_LANG_SOURCE([int foo(void) {}])], -- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- ) -- LDFLAGS="$save_LDFLAGS" -+ # This should be the same for all languages, so no per-tag cache variable. -+ AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], -+ [lt_cv_irix_exported_symbol], -+ [save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ AC_LINK_IFELSE( -+ [AC_LANG_SOURCE( -+ [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], -+ [C++], [[int foo (void) { return 0; }]], -+ [Fortran 77], [[ -+ subroutine foo -+ end]], -+ [Fortran], [[ -+ subroutine foo -+ end]])])], -+ [lt_cv_irix_exported_symbol=yes], -+ [lt_cv_irix_exported_symbol=no]) -+ LDFLAGS="$save_LDFLAGS"]) -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -5020,7 +5350,7 @@ _LT_EOF - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' - else - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' -@@ -5039,9 +5369,9 @@ _LT_EOF - _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -5313,8 +5643,6 @@ _LT_TAGDECL([], [inherit_rpath], [0], - to runtime path list]) - _LT_TAGDECL([], [link_all_deplibs], [0], - [Whether libtool must link a program against all its dependency libraries]) --_LT_TAGDECL([], [fix_srcfile_path], [1], -- [Fix the shell variable $srcfile for the compiler]) - _LT_TAGDECL([], [always_export_symbols], [0], - [Set to "yes" if exported symbols are required]) - _LT_TAGDECL([], [export_symbols_cmds], [2], -@@ -5325,6 +5653,8 @@ _LT_TAGDECL([], [include_expsyms], [1], - [Symbols that must always be exported]) - _LT_TAGDECL([], [prelink_cmds], [2], - [Commands necessary for linking programs (against libraries) with templates]) -+_LT_TAGDECL([], [postlink_cmds], [2], -+ [Commands necessary for finishing linking programs]) - _LT_TAGDECL([], [file_list_spec], [1], - [Specify filename containing input files]) - dnl FIXME: Not yet implemented -@@ -5426,6 +5756,7 @@ CC="$lt_save_CC" - m4_defun([_LT_LANG_CXX_CONFIG], - [m4_require([_LT_FILEUTILS_DEFAULTS])dnl - m4_require([_LT_DECL_EGREP])dnl -+m4_require([_LT_PATH_MANIFEST_TOOL])dnl - if test -n "$CXX" && ( test "X$CXX" != "Xno" && - ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || - (test "X$CXX" != "Xg++"))) ; then -@@ -5487,6 +5818,7 @@ if test "$_lt_caught_CXX_error" != yes; then - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC -+ lt_save_CFLAGS=$CFLAGS - lt_save_LD=$LD - lt_save_GCC=$GCC - GCC=$GXX -@@ -5504,6 +5836,7 @@ if test "$_lt_caught_CXX_error" != yes; then - fi - test -z "${LDCXX+set}" || LD=$LDCXX - CC=${CXX-"c++"} -+ CFLAGS=$CXXFLAGS - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) -@@ -5667,7 +6000,7 @@ if test "$_lt_caught_CXX_error" != yes; then - _LT_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an empty - # executable. -- _LT_SYS_MODULE_PATH_AIX -+ _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -5679,7 +6012,7 @@ if test "$_lt_caught_CXX_error" != yes; then - else - # Determine the default libpath from the value encoded in an - # empty executable. -- _LT_SYS_MODULE_PATH_AIX -+ _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. -@@ -5721,29 +6054,75 @@ if test "$_lt_caught_CXX_error" != yes; then - ;; - - cygwin* | mingw* | pw32* | cegcc*) -- # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, -- # as there is no search path for DLLs. -- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' -- _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' -- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -- _LT_TAGVAR(always_export_symbols, $1)=no -- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -- -- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -- # If the export-symbols file already is a .def file (1st line -- # is EXPORTS), use it as is; otherwise, prepend... -- _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -- cp $export_symbols $output_objdir/$soname.def; -- else -- echo EXPORTS > $output_objdir/$soname.def; -- cat $export_symbols >> $output_objdir/$soname.def; -- fi~ -- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -- else -- _LT_TAGVAR(ld_shlibs, $1)=no -- fi -- ;; -+ case $GXX,$cc_basename in -+ ,cl* | no,cl*) -+ # Native MSVC -+ # hardcode_libdir_flag_spec is actually meaningless, as there is -+ # no search path for DLLs. -+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' -+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -+ _LT_TAGVAR(always_export_symbols, $1)=yes -+ _LT_TAGVAR(file_list_spec, $1)='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' -+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -+ # Don't use ranlib -+ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' -+ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ func_to_tool_file "$lt_outputfile"~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # g++ -+ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, -+ # as there is no search path for DLLs. -+ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' -+ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' -+ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported -+ _LT_TAGVAR(always_export_symbols, $1)=no -+ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes -+ -+ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -+ # If the export-symbols file already is a .def file (1st line -+ # is EXPORTS), use it as is; otherwise, prepend... -+ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ cp $export_symbols $output_objdir/$soname.def; -+ else -+ echo EXPORTS > $output_objdir/$soname.def; -+ cat $export_symbols >> $output_objdir/$soname.def; -+ fi~ -+ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -+ else -+ _LT_TAGVAR(ld_shlibs, $1)=no -+ fi -+ ;; -+ esac -+ ;; - darwin* | rhapsody*) - _LT_DARWIN_LINKER_FEATURES($1) - ;; -@@ -5818,7 +6197,7 @@ if test "$_lt_caught_CXX_error" != yes; then - ;; - *) - if test "$GXX" = yes; then -- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no -@@ -5889,10 +6268,10 @@ if test "$_lt_caught_CXX_error" != yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi -@@ -5933,9 +6312,9 @@ if test "$_lt_caught_CXX_error" != yes; then - *) - if test "$GXX" = yes; then - if test "$with_gnu_ld" = no; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - else -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' - fi - fi - _LT_TAGVAR(link_all_deplibs, $1)=yes -@@ -6005,20 +6384,20 @@ if test "$_lt_caught_CXX_error" != yes; then - _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ -- compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' -+ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' - _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ -- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ -+ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ - $RANLIB $oldlib' - _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ -- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' -+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ -- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' -+ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' - ;; - *) # Version 6 and above use weak symbols - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' -@@ -6213,7 +6592,7 @@ if test "$_lt_caught_CXX_error" != yes; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - *) -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - ;; - esac - -@@ -6259,7 +6638,7 @@ if test "$_lt_caught_CXX_error" != yes; then - - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(archive_cmds_need_lc,$1)=yes - _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' -@@ -6300,9 +6679,9 @@ if test "$_lt_caught_CXX_error" != yes; then - if test "$GXX" = yes && test "$with_gnu_ld" = no; then - _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' - if $CC --version | $GREP -v '^2\.7' > /dev/null; then -- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' -+ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when -@@ -6431,6 +6810,7 @@ if test "$_lt_caught_CXX_error" != yes; then - fi # test -n "$compiler" - - CC=$lt_save_CC -+ CFLAGS=$lt_save_CFLAGS - LDCXX=$LD - LD=$lt_save_LD - GCC=$lt_save_GCC -@@ -6445,6 +6825,29 @@ AC_LANG_POP - ])# _LT_LANG_CXX_CONFIG - - -+# _LT_FUNC_STRIPNAME_CNF -+# ---------------------- -+# func_stripname_cnf prefix suffix name -+# strip PREFIX and SUFFIX off of NAME. -+# PREFIX and SUFFIX must not contain globbing or regex special -+# characters, hashes, percent signs, but SUFFIX may contain a leading -+# dot (in which case that matches only a dot). -+# -+# This function is identical to the (non-XSI) version of func_stripname, -+# except this one can be used by m4 code that may be executed by configure, -+# rather than the libtool script. -+m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl -+AC_REQUIRE([_LT_DECL_SED]) -+AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) -+func_stripname_cnf () -+{ -+ case ${2} in -+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -+ esac -+} # func_stripname_cnf -+])# _LT_FUNC_STRIPNAME_CNF -+ - # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) - # --------------------------------- - # Figure out "hidden" library dependencies from verbose -@@ -6453,6 +6856,7 @@ AC_LANG_POP - # objects, libraries and library flags. - m4_defun([_LT_SYS_HIDDEN_LIBDEPS], - [m4_require([_LT_FILEUTILS_DEFAULTS])dnl -+AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl - # Dependencies to place before and after the object being linked: - _LT_TAGVAR(predep_objects, $1)= - _LT_TAGVAR(postdep_objects, $1)= -@@ -6503,6 +6907,13 @@ public class foo { - }; - _LT_EOF - ]) -+ -+_lt_libdeps_save_CFLAGS=$CFLAGS -+case "$CC $CFLAGS " in #( -+*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; -+*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; -+esac -+ - dnl Parse the compiler output and extract the necessary - dnl objects, libraries and library flags. - if AC_TRY_EVAL(ac_compile); then -@@ -6514,7 +6925,7 @@ if AC_TRY_EVAL(ac_compile); then - pre_test_object_deps_done=no - - for p in `eval "$output_verbose_link_cmd"`; do -- case $p in -+ case ${prev}${p} in - - -L* | -R* | -l*) - # Some compilers place space between "-{L,R}" and the path. -@@ -6523,13 +6934,22 @@ if AC_TRY_EVAL(ac_compile); then - test $p = "-R"; then - prev=$p - continue -- else -- prev= - fi - -+ # Expand the sysroot to ease extracting the directories later. -+ if test -z "$prev"; then -+ case $p in -+ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -+ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -+ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; -+ esac -+ fi -+ case $p in -+ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; -+ esac - if test "$pre_test_object_deps_done" = no; then -- case $p in -- -L* | -R*) -+ case ${prev} in -+ -L | -R) - # Internal compiler library paths should come after those - # provided the user. The postdeps already come after the - # user supplied libs so there is no need to process them. -@@ -6549,8 +6969,10 @@ if AC_TRY_EVAL(ac_compile); then - _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" - fi - fi -+ prev= - ;; - -+ *.lto.$objext) ;; # Ignore GCC LTO objects - *.$objext) - # This assumes that the test object file only shows up - # once in the compiler output. -@@ -6586,6 +7008,7 @@ else - fi - - $RM -f confest.$objext -+CFLAGS=$_lt_libdeps_save_CFLAGS - - # PORTME: override above test on systems where it is broken - m4_if([$1], [CXX], -@@ -6622,7 +7045,7 @@ linux*) - - solaris*) - case $cc_basename in -- CC*) -+ CC* | sunCC*) - # The more standards-conforming stlport4 library is - # incompatible with the Cstd library. Avoid specifying - # it if it's in CXXFLAGS. Ignore libCrun as -@@ -6735,7 +7158,9 @@ if test "$_lt_disable_F77" != yes; then - # Allow CC to be a program name with arguments. - lt_save_CC="$CC" - lt_save_GCC=$GCC -+ lt_save_CFLAGS=$CFLAGS - CC=${F77-"f77"} -+ CFLAGS=$FFLAGS - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) -@@ -6789,6 +7214,7 @@ if test "$_lt_disable_F77" != yes; then - - GCC=$lt_save_GCC - CC="$lt_save_CC" -+ CFLAGS="$lt_save_CFLAGS" - fi # test "$_lt_disable_F77" != yes - - AC_LANG_POP -@@ -6865,7 +7291,9 @@ if test "$_lt_disable_FC" != yes; then - # Allow CC to be a program name with arguments. - lt_save_CC="$CC" - lt_save_GCC=$GCC -+ lt_save_CFLAGS=$CFLAGS - CC=${FC-"f95"} -+ CFLAGS=$FCFLAGS - compiler=$CC - GCC=$ac_cv_fc_compiler_gnu - -@@ -6921,7 +7349,8 @@ if test "$_lt_disable_FC" != yes; then - fi # test -n "$compiler" - - GCC=$lt_save_GCC -- CC="$lt_save_CC" -+ CC=$lt_save_CC -+ CFLAGS=$lt_save_CFLAGS - fi # test "$_lt_disable_FC" != yes - - AC_LANG_POP -@@ -6958,10 +7387,12 @@ _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. --lt_save_CC="$CC" -+lt_save_CC=$CC -+lt_save_CFLAGS=$CFLAGS - lt_save_GCC=$GCC - GCC=yes - CC=${GCJ-"gcj"} -+CFLAGS=$GCJFLAGS - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_TAGVAR(LD, $1)="$LD" -@@ -6992,7 +7423,8 @@ fi - AC_LANG_RESTORE - - GCC=$lt_save_GCC --CC="$lt_save_CC" -+CC=$lt_save_CC -+CFLAGS=$lt_save_CFLAGS - ])# _LT_LANG_GCJ_CONFIG - - -@@ -7027,9 +7459,11 @@ _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC="$CC" -+lt_save_CFLAGS=$CFLAGS - lt_save_GCC=$GCC - GCC= - CC=${RC-"windres"} -+CFLAGS= - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) -@@ -7042,7 +7476,8 @@ fi - - GCC=$lt_save_GCC - AC_LANG_RESTORE --CC="$lt_save_CC" -+CC=$lt_save_CC -+CFLAGS=$lt_save_CFLAGS - ])# _LT_LANG_RC_CONFIG - - -@@ -7101,6 +7536,15 @@ _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) - AC_SUBST([OBJDUMP]) - ]) - -+# _LT_DECL_DLLTOOL -+# ---------------- -+# Ensure DLLTOOL variable is set. -+m4_defun([_LT_DECL_DLLTOOL], -+[AC_CHECK_TOOL(DLLTOOL, dlltool, false) -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) -+AC_SUBST([DLLTOOL]) -+]) - - # _LT_DECL_SED - # ------------ -@@ -7194,8 +7638,8 @@ m4_defun([_LT_CHECK_SHELL_FEATURES], - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -7234,206 +7678,162 @@ _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl - ])# _LT_CHECK_SHELL_FEATURES - - --# _LT_PROG_XSI_SHELLFNS --# --------------------- --# Bourne and XSI compatible variants of some useful shell functions. --m4_defun([_LT_PROG_XSI_SHELLFNS], --[case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $[*] )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -+# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) -+# ------------------------------------------------------ -+# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and -+# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. -+m4_defun([_LT_PROG_FUNCTION_REPLACE], -+[dnl { -+sed -e '/^$1 ()$/,/^} # $1 /c\ -+$1 ()\ -+{\ -+m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) -+} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+]) - --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" - --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -+# _LT_PROG_REPLACE_SHELLFNS -+# ------------------------- -+# Replace existing portable implementations of several shell functions with -+# equivalent extended shell implementations where those features are available.. -+m4_defun([_LT_PROG_REPLACE_SHELLFNS], -+[if test x"$xsi_shell" = xyes; then -+ _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl -+ case ${1} in -+ */*) func_dirname_result="${1%/*}${2}" ;; -+ * ) func_dirname_result="${3}" ;; -+ esac]) -+ -+ _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl -+ func_basename_result="${1##*/}"]) -+ -+ _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl -+ case ${1} in -+ */*) func_dirname_result="${1%/*}${2}" ;; -+ * ) func_dirname_result="${3}" ;; -+ esac -+ func_basename_result="${1##*/}"]) - --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -+ _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl -+ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -+ # positional parameters, so assign one to ordinary parameter first. -+ func_stripname_result=${3} -+ func_stripname_result=${func_stripname_result#"${1}"} -+ func_stripname_result=${func_stripname_result%"${2}"}]) - --dnl func_dirname_and_basename --dnl A portable version of this function is already defined in general.m4sh --dnl so there is no need for it here. -+ _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl -+ func_split_long_opt_name=${1%%=*} -+ func_split_long_opt_arg=${1#*=}]) - --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -+ _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl -+ func_split_short_opt_arg=${1#??} -+ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) - --# sed scripts: --my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[[^=]]*=//' -+ _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl -+ case ${1} in -+ *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -+ *) func_lo2o_result=${1} ;; -+ esac]) - --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -+ _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) - --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -+ _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) - --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[[^.]]*$/.lo/'` --} -+ _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) -+fi - --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$[@]"` --} -+if test x"$lt_shell_append" = xyes; then -+ _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) - --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` --} -+ _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl -+ func_quote_for_eval "${2}" -+dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ -+ eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) - --_LT_EOF --esac -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi - --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -+if test x"$_lt_function_replace_fail" = x":"; then -+ AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) -+fi -+]) - --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$[1]+=\$[2]" --} --_LT_EOF -+# _LT_PATH_CONVERSION_FUNCTIONS -+# ----------------------------- -+# Determine which file name conversion functions should be used by -+# func_to_host_file (and, implicitly, by func_to_host_path). These are needed -+# for certain cross-compile configurations and native mingw. -+m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], -+[AC_REQUIRE([AC_CANONICAL_HOST])dnl -+AC_REQUIRE([AC_CANONICAL_BUILD])dnl -+AC_MSG_CHECKING([how to convert $build file names to $host format]) -+AC_CACHE_VAL(lt_cv_to_host_file_cmd, -+[case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac - ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$[1]=\$$[1]\$[2]" --} -- --_LT_EOF -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac - ;; -- esac -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+]) -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) -+_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], -+ [0], [convert $build file names to $host format])dnl -+ -+AC_MSG_CHECKING([how to convert $build file names to toolchain format]) -+AC_CACHE_VAL(lt_cv_to_tool_file_cmd, -+[#assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac - ]) -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) -+_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], -+ [0], [convert $build files to toolchain format])dnl -+])# _LT_PATH_CONVERSION_FUNCTIONS -diff --git a/ltmain.sh b/ltmain.sh -index 9503ec85d70..70e856e0659 100644 ---- a/ltmain.sh -+++ b/ltmain.sh -@@ -1,10 +1,9 @@ --# Generated from ltmain.m4sh. - --# libtool (GNU libtool 1.3134 2009-11-29) 2.2.7a -+# libtool (GNU libtool) 2.4 - # Written by Gordon Matzigkeit , 1996 - - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, --# 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2007, 2008, 2009, 2010 Free Software Foundation, Inc. - # This is free software; see the source for copying conditions. There is NO - # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -@@ -38,7 +37,6 @@ - # -n, --dry-run display commands without modifying any files - # --features display basic configuration information and exit - # --mode=MODE use operation mode MODE --# --no-finish let install mode avoid finish commands - # --preserve-dup-deps don't remove duplicate dependency libraries - # --quiet, --silent don't print informational messages - # --no-quiet, --no-silent -@@ -71,17 +69,19 @@ - # compiler: $LTCC - # compiler flags: $LTCFLAGS - # linker: $LD (gnu? $with_gnu_ld) --# $progname: (GNU libtool 1.3134 2009-11-29) 2.2.7a -+# $progname: (GNU libtool) 2.4 - # automake: $automake_version - # autoconf: $autoconf_version - # - # Report bugs to . -+# GNU libtool home page: . -+# General help using GNU software: . - - PROGRAM=libtool - PACKAGE=libtool --VERSION=2.2.7a --TIMESTAMP=" 1.3134 2009-11-29" --package_revision=1.3134 -+VERSION=2.4 -+TIMESTAMP="" -+package_revision=1.3293 - - # Be Bourne compatible - if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then -@@ -106,9 +106,6 @@ _LTECHO_EOF' - } - - # NLS nuisances: We save the old values to restore during execute mode. --# Only set LANG and LC_ALL to C if already set. --# These must not be set unconditionally because not all systems understand --# e.g. LANG=C (notably SCO). - lt_user_locale= - lt_safe_locale= - for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES -@@ -121,15 +118,13 @@ do - lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" - fi" - done -+LC_ALL=C -+LANGUAGE=C -+export LANGUAGE LC_ALL - - $lt_unset CDPATH - - -- -- -- -- -- - # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh - # is ksh but when the shell is invoked as "sh" and the current value of - # the _XPG environment variable is not equal to 1 (one), the special -@@ -140,7 +135,7 @@ progpath="$0" - - - : ${CP="cp -f"} --: ${ECHO=$as_echo} -+test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} - : ${EGREP="/bin/grep -E"} - : ${FGREP="/bin/grep -F"} - : ${GREP="/bin/grep"} -@@ -149,7 +144,7 @@ progpath="$0" - : ${MKDIR="mkdir"} - : ${MV="mv -f"} - : ${RM="rm -f"} --: ${SED="/mount/endor/wildenhu/local-x86_64/bin/sed"} -+: ${SED="/bin/sed"} - : ${SHELL="${CONFIG_SHELL-/bin/sh}"} - : ${Xsed="$SED -e 1s/^X//"} - -@@ -169,6 +164,27 @@ IFS=" $lt_nl" - dirname="s,/[^/]*$,," - basename="s,^.*/,," - -+# func_dirname file append nondir_replacement -+# Compute the dirname of FILE. If nonempty, add APPEND to the result, -+# otherwise set result to NONDIR_REPLACEMENT. -+func_dirname () -+{ -+ func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -+ if test "X$func_dirname_result" = "X${1}"; then -+ func_dirname_result="${3}" -+ else -+ func_dirname_result="$func_dirname_result${2}" -+ fi -+} # func_dirname may be replaced by extended shell implementation -+ -+ -+# func_basename file -+func_basename () -+{ -+ func_basename_result=`$ECHO "${1}" | $SED "$basename"` -+} # func_basename may be replaced by extended shell implementation -+ -+ - # func_dirname_and_basename file append nondir_replacement - # perform func_basename and func_dirname in a single function - # call: -@@ -183,17 +199,31 @@ basename="s,^.*/,," - # those functions but instead duplicate the functionality here. - func_dirname_and_basename () - { -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi -- func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` --} -+ # Extract subdirectory from the argument. -+ func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` -+ if test "X$func_dirname_result" = "X${1}"; then -+ func_dirname_result="${3}" -+ else -+ func_dirname_result="$func_dirname_result${2}" -+ fi -+ func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` -+} # func_dirname_and_basename may be replaced by extended shell implementation -+ -+ -+# func_stripname prefix suffix name -+# strip PREFIX and SUFFIX off of NAME. -+# PREFIX and SUFFIX must not contain globbing or regex special -+# characters, hashes, percent signs, but SUFFIX may contain a leading -+# dot (in which case that matches only a dot). -+# func_strip_suffix prefix name -+func_stripname () -+{ -+ case ${2} in -+ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -+ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -+ esac -+} # func_stripname may be replaced by extended shell implementation - --# Generated shell functions inserted here. - - # These SED scripts presuppose an absolute path with a trailing slash. - pathcar='s,^/\([^/]*\).*$,\1,' -@@ -376,6 +406,15 @@ sed_quote_subst='s/\([`"$\\]\)/\\\1/g' - # Same as above, but do not quote variable references. - double_quote_subst='s/\(["`\\]\)/\\\1/g' - -+# Sed substitution that turns a string into a regex matching for the -+# string literally. -+sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' -+ -+# Sed substitution that converts a w32 file name or path -+# which contains forward slashes, into one that contains -+# (escaped) backslashes. A very naive implementation. -+lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' -+ - # Re-`\' parameter expansions in output of double_quote_subst that were - # `\'-ed in input to the same. If an odd number of `\' preceded a '$' - # in input to double_quote_subst, that '$' was protected from expansion. -@@ -404,7 +443,7 @@ opt_warning=: - # name if it has been set yet. - func_echo () - { -- $ECHO "$progname${mode+: }$mode: $*" -+ $ECHO "$progname: ${opt_mode+$opt_mode: }$*" - } - - # func_verbose arg... -@@ -430,14 +469,14 @@ func_echo_all () - # Echo program name prefixed message to standard error. - func_error () - { -- $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 -+ $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 - } - - # func_warning arg... - # Echo program name prefixed warning message to standard error. - func_warning () - { -- $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 -+ $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 - - # bash bug again: - : -@@ -656,19 +695,35 @@ func_show_eval_locale () - fi - } - -- -- -+# func_tr_sh -+# Turn $1 into a string suitable for a shell variable name. -+# Result is stored in $func_tr_sh_result. All characters -+# not in the set a-zA-Z0-9_ are replaced with '_'. Further, -+# if $1 begins with a digit, a '_' is prepended as well. -+func_tr_sh () -+{ -+ case $1 in -+ [0-9]* | *[!a-zA-Z0-9_]*) -+ func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` -+ ;; -+ * ) -+ func_tr_sh_result=$1 -+ ;; -+ esac -+} - - - # func_version - # Echo version message to standard output and exit. - func_version () - { -+ $opt_debug -+ - $SED -n '/(C)/!b go - :more - /\./!{ - N -- s/\n# // -+ s/\n# / / - b more - } - :go -@@ -685,7 +740,9 @@ func_version () - # Echo short help message to standard output and exit. - func_usage () - { -- $SED -n '/^# Usage:/,/^# *-h/ { -+ $opt_debug -+ -+ $SED -n '/^# Usage:/,/^# *.*--help/ { - s/^# // - s/^# *$// - s/\$progname/'$progname'/ -@@ -701,7 +758,10 @@ func_usage () - # unless 'noexit' is passed as argument. - func_help () - { -+ $opt_debug -+ - $SED -n '/^# Usage:/,/# Report bugs to/ { -+ :print - s/^# // - s/^# *$// - s*\$progname*'$progname'* -@@ -714,7 +774,11 @@ func_help () - s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ - s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ - p -- }' < "$progpath" -+ d -+ } -+ /^# .* home page:/b print -+ /^# General help using/b print -+ ' < "$progpath" - ret=$? - if test -z "$1"; then - exit $ret -@@ -726,12 +790,39 @@ func_help () - # exit_cmd. - func_missing_arg () - { -- func_error "missing argument for $1" -+ $opt_debug -+ -+ func_error "missing argument for $1." - exit_cmd=exit - } - --exit_cmd=: - -+# func_split_short_opt shortopt -+# Set func_split_short_opt_name and func_split_short_opt_arg shell -+# variables after splitting SHORTOPT after the 2nd character. -+func_split_short_opt () -+{ -+ my_sed_short_opt='1s/^\(..\).*$/\1/;q' -+ my_sed_short_rest='1s/^..\(.*\)$/\1/;q' -+ -+ func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` -+ func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` -+} # func_split_short_opt may be replaced by extended shell implementation -+ -+ -+# func_split_long_opt longopt -+# Set func_split_long_opt_name and func_split_long_opt_arg shell -+# variables after splitting LONGOPT at the `=' sign. -+func_split_long_opt () -+{ -+ my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' -+ my_sed_long_arg='1s/^--[^=]*=//' -+ -+ func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` -+ func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` -+} # func_split_long_opt may be replaced by extended shell implementation -+ -+exit_cmd=: - - - -@@ -741,26 +832,64 @@ magic="%%%MAGIC variable%%%" - magic_exe="%%%MAGIC EXE variable%%%" - - # Global variables. --# $mode is unset - nonopt= --execute_dlfiles= - preserve_args= - lo2o="s/\\.lo\$/.${objext}/" - o2lo="s/\\.${objext}\$/.lo/" - extracted_archives= - extracted_serial=0 - --opt_dry_run=false --opt_finish=: --opt_duplicate_deps=false --opt_silent=false --opt_debug=: -- - # If this variable is set in any of the actions, the command in it - # will be execed at the end. This prevents here-documents from being - # left over by shells. - exec_cmd= - -+# func_append var value -+# Append VALUE to the end of shell variable VAR. -+func_append () -+{ -+ eval "${1}=\$${1}\${2}" -+} # func_append may be replaced by extended shell implementation -+ -+# func_append_quoted var value -+# Quote VALUE and append to the end of shell variable VAR, separated -+# by a space. -+func_append_quoted () -+{ -+ func_quote_for_eval "${2}" -+ eval "${1}=\$${1}\\ \$func_quote_for_eval_result" -+} # func_append_quoted may be replaced by extended shell implementation -+ -+ -+# func_arith arithmetic-term... -+func_arith () -+{ -+ func_arith_result=`expr "${@}"` -+} # func_arith may be replaced by extended shell implementation -+ -+ -+# func_len string -+# STRING may not start with a hyphen. -+func_len () -+{ -+ func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` -+} # func_len may be replaced by extended shell implementation -+ -+ -+# func_lo2o object -+func_lo2o () -+{ -+ func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` -+} # func_lo2o may be replaced by extended shell implementation -+ -+ -+# func_xform libobj-or-source -+func_xform () -+{ -+ func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` -+} # func_xform may be replaced by extended shell implementation -+ -+ - # func_fatal_configuration arg... - # Echo program name prefixed message to standard error, followed by - # a configuration failure hint, and exit. -@@ -850,130 +979,204 @@ func_enable_tag () - esac - } - --# Parse options once, thoroughly. This comes as soon as possible in --# the script to make things like `libtool --version' happen quickly. -+# func_check_version_match -+# Ensure that we are using m4 macros, and libtool script from the same -+# release of libtool. -+func_check_version_match () - { -+ if test "$package_revision" != "$macro_revision"; then -+ if test "$VERSION" != "$macro_version"; then -+ if test -z "$macro_version"; then -+ cat >&2 <<_LT_EOF -+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the -+$progname: definition of this LT_INIT comes from an older release. -+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION -+$progname: and run autoconf again. -+_LT_EOF -+ else -+ cat >&2 <<_LT_EOF -+$progname: Version mismatch error. This is $PACKAGE $VERSION, but the -+$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. -+$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION -+$progname: and run autoconf again. -+_LT_EOF -+ fi -+ else -+ cat >&2 <<_LT_EOF -+$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, -+$progname: but the definition of this LT_INIT comes from revision $macro_revision. -+$progname: You should recreate aclocal.m4 with macros from revision $package_revision -+$progname: of $PACKAGE $VERSION and run autoconf again. -+_LT_EOF -+ fi - -- # Shorthand for --mode=foo, only valid as the first argument -- case $1 in -- clean|clea|cle|cl) -- shift; set dummy --mode clean ${1+"$@"}; shift -- ;; -- compile|compil|compi|comp|com|co|c) -- shift; set dummy --mode compile ${1+"$@"}; shift -- ;; -- execute|execut|execu|exec|exe|ex|e) -- shift; set dummy --mode execute ${1+"$@"}; shift -- ;; -- finish|finis|fini|fin|fi|f) -- shift; set dummy --mode finish ${1+"$@"}; shift -- ;; -- install|instal|insta|inst|ins|in|i) -- shift; set dummy --mode install ${1+"$@"}; shift -- ;; -- link|lin|li|l) -- shift; set dummy --mode link ${1+"$@"}; shift -- ;; -- uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) -- shift; set dummy --mode uninstall ${1+"$@"}; shift -- ;; -- esac -+ exit $EXIT_MISMATCH -+ fi -+} -+ -+ -+# Shorthand for --mode=foo, only valid as the first argument -+case $1 in -+clean|clea|cle|cl) -+ shift; set dummy --mode clean ${1+"$@"}; shift -+ ;; -+compile|compil|compi|comp|com|co|c) -+ shift; set dummy --mode compile ${1+"$@"}; shift -+ ;; -+execute|execut|execu|exec|exe|ex|e) -+ shift; set dummy --mode execute ${1+"$@"}; shift -+ ;; -+finish|finis|fini|fin|fi|f) -+ shift; set dummy --mode finish ${1+"$@"}; shift -+ ;; -+install|instal|insta|inst|ins|in|i) -+ shift; set dummy --mode install ${1+"$@"}; shift -+ ;; -+link|lin|li|l) -+ shift; set dummy --mode link ${1+"$@"}; shift -+ ;; -+uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) -+ shift; set dummy --mode uninstall ${1+"$@"}; shift -+ ;; -+esac - -- # Parse non-mode specific arguments: -- while test "$#" -gt 0; do -+ -+ -+# Option defaults: -+opt_debug=: -+opt_dry_run=false -+opt_config=false -+opt_preserve_dup_deps=false -+opt_features=false -+opt_finish=false -+opt_help=false -+opt_help_all=false -+opt_silent=: -+opt_verbose=: -+opt_silent=false -+opt_verbose=false -+ -+ -+# Parse options once, thoroughly. This comes as soon as possible in the -+# script to make things like `--version' happen as quickly as we can. -+{ -+ # this just eases exit handling -+ while test $# -gt 0; do - opt="$1" - shift -- - case $opt in -- --config) func_config ;; -- -- --debug) preserve_args="$preserve_args $opt" -+ --debug|-x) opt_debug='set -x' - func_echo "enabling shell trace mode" -- opt_debug='set -x' - $opt_debug - ;; -- -- -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break -- execute_dlfiles="$execute_dlfiles $1" -- shift -+ --dry-run|--dryrun|-n) -+ opt_dry_run=: - ;; -- -- --dry-run | -n) opt_dry_run=: ;; -- --features) func_features ;; -- --finish) mode="finish" ;; -- --no-finish) opt_finish=false ;; -- -- --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break -- case $1 in -- # Valid mode arguments: -- clean) ;; -- compile) ;; -- execute) ;; -- finish) ;; -- install) ;; -- link) ;; -- relink) ;; -- uninstall) ;; -- -- # Catch anything else as an error -- *) func_error "invalid argument for $opt" -- exit_cmd=exit -- break -- ;; -- esac -- -- mode="$1" -+ --config) -+ opt_config=: -+func_config -+ ;; -+ --dlopen|-dlopen) -+ optarg="$1" -+ opt_dlopen="${opt_dlopen+$opt_dlopen -+}$optarg" - shift - ;; -- - --preserve-dup-deps) -- opt_duplicate_deps=: ;; -- -- --quiet|--silent) preserve_args="$preserve_args $opt" -- opt_silent=: -- opt_verbose=false -+ opt_preserve_dup_deps=: - ;; -- -- --no-quiet|--no-silent) -- preserve_args="$preserve_args $opt" -- opt_silent=false -+ --features) -+ opt_features=: -+func_features - ;; -- -- --verbose| -v) preserve_args="$preserve_args $opt" -+ --finish) -+ opt_finish=: -+set dummy --mode finish ${1+"$@"}; shift -+ ;; -+ --help) -+ opt_help=: -+ ;; -+ --help-all) -+ opt_help_all=: -+opt_help=': help-all' -+ ;; -+ --mode) -+ test $# = 0 && func_missing_arg $opt && break -+ optarg="$1" -+ opt_mode="$optarg" -+case $optarg in -+ # Valid mode arguments: -+ clean|compile|execute|finish|install|link|relink|uninstall) ;; -+ -+ # Catch anything else as an error -+ *) func_error "invalid argument for $opt" -+ exit_cmd=exit -+ break -+ ;; -+esac -+ shift -+ ;; -+ --no-silent|--no-quiet) - opt_silent=false -- opt_verbose=: -+func_append preserve_args " $opt" - ;; -- -- --no-verbose) preserve_args="$preserve_args $opt" -+ --no-verbose) - opt_verbose=false -+func_append preserve_args " $opt" - ;; -- -- --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break -- preserve_args="$preserve_args $opt $1" -- func_enable_tag "$1" # tagname is set here -+ --silent|--quiet) -+ opt_silent=: -+func_append preserve_args " $opt" -+ opt_verbose=false -+ ;; -+ --verbose|-v) -+ opt_verbose=: -+func_append preserve_args " $opt" -+opt_silent=false -+ ;; -+ --tag) -+ test $# = 0 && func_missing_arg $opt && break -+ optarg="$1" -+ opt_tag="$optarg" -+func_append preserve_args " $opt $optarg" -+func_enable_tag "$optarg" - shift - ;; - -+ -\?|-h) func_usage ;; -+ --help) func_help ;; -+ --version) func_version ;; -+ - # Separate optargs to long options: -- -dlopen=*|--mode=*|--tag=*) -- func_opt_split "$opt" -- set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} -+ --*=*) -+ func_split_long_opt "$opt" -+ set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} - shift - ;; - -- -\?|-h) func_usage ;; -- --help) opt_help=: ;; -- --help-all) opt_help=': help-all' ;; -- --version) func_version ;; -- -- -*) func_fatal_help "unrecognized option \`$opt'" ;; -- -- *) nonopt="$opt" -- break -+ # Separate non-argument short options: -+ -\?*|-h*|-n*|-v*) -+ func_split_short_opt "$opt" -+ set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} -+ shift - ;; -+ -+ --) break ;; -+ -*) func_fatal_help "unrecognized option \`$opt'" ;; -+ *) set dummy "$opt" ${1+"$@"}; shift; break ;; - esac - done - -+ # Validate options: -+ -+ # save first non-option argument -+ if test "$#" -gt 0; then -+ nonopt="$opt" -+ shift -+ fi -+ -+ # preserve --debug -+ test "$opt_debug" = : || func_append preserve_args " --debug" - - case $host in - *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* ) -@@ -981,82 +1184,44 @@ func_enable_tag () - opt_duplicate_compiler_generated_deps=: - ;; - *) -- opt_duplicate_compiler_generated_deps=$opt_duplicate_deps -+ opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps - ;; - esac - -- # Having warned about all mis-specified options, bail out if -- # anything was wrong. -- $exit_cmd $EXIT_FAILURE --} -+ $opt_help || { -+ # Sanity checks first: -+ func_check_version_match - --# func_check_version_match --# Ensure that we are using m4 macros, and libtool script from the same --# release of libtool. --func_check_version_match () --{ -- if test "$package_revision" != "$macro_revision"; then -- if test "$VERSION" != "$macro_version"; then -- if test -z "$macro_version"; then -- cat >&2 <<_LT_EOF --$progname: Version mismatch error. This is $PACKAGE $VERSION, but the --$progname: definition of this LT_INIT comes from an older release. --$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION --$progname: and run autoconf again. --_LT_EOF -- else -- cat >&2 <<_LT_EOF --$progname: Version mismatch error. This is $PACKAGE $VERSION, but the --$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. --$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION --$progname: and run autoconf again. --_LT_EOF -- fi -- else -- cat >&2 <<_LT_EOF --$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, --$progname: but the definition of this LT_INIT comes from revision $macro_revision. --$progname: You should recreate aclocal.m4 with macros from revision $package_revision --$progname: of $PACKAGE $VERSION and run autoconf again. --_LT_EOF -+ if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then -+ func_fatal_configuration "not configured to build any kind of library" - fi - -- exit $EXIT_MISMATCH -- fi --} -- -+ # Darwin sucks -+ eval std_shrext=\"$shrext_cmds\" - --## ----------- ## --## Main. ## --## ----------- ## -- --$opt_help || { -- # Sanity checks first: -- func_check_version_match -- -- if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then -- func_fatal_configuration "not configured to build any kind of library" -- fi -+ # Only execute mode is allowed to have -dlopen flags. -+ if test -n "$opt_dlopen" && test "$opt_mode" != execute; then -+ func_error "unrecognized option \`-dlopen'" -+ $ECHO "$help" 1>&2 -+ exit $EXIT_FAILURE -+ fi - -- test -z "$mode" && func_fatal_error "error: you must specify a MODE." -+ # Change the help message to a mode-specific one. -+ generic_help="$help" -+ help="Try \`$progname --help --mode=$opt_mode' for more information." -+ } - - -- # Darwin sucks -- eval "std_shrext=\"$shrext_cmds\"" -+ # Bail if the options were screwed -+ $exit_cmd $EXIT_FAILURE -+} - - -- # Only execute mode is allowed to have -dlopen flags. -- if test -n "$execute_dlfiles" && test "$mode" != execute; then -- func_error "unrecognized option \`-dlopen'" -- $ECHO "$help" 1>&2 -- exit $EXIT_FAILURE -- fi - -- # Change the help message to a mode-specific one. -- generic_help="$help" -- help="Try \`$progname --help --mode=$mode' for more information." --} - -+## ----------- ## -+## Main. ## -+## ----------- ## - - # func_lalib_p file - # True iff FILE is a libtool `.la' library or `.lo' object file. -@@ -1121,12 +1286,9 @@ func_ltwrapper_executable_p () - # temporary ltwrapper_script. - func_ltwrapper_scriptname () - { -- func_ltwrapper_scriptname_result="" -- if func_ltwrapper_executable_p "$1"; then -- func_dirname_and_basename "$1" "" "." -- func_stripname '' '.exe' "$func_basename_result" -- func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" -- fi -+ func_dirname_and_basename "$1" "" "." -+ func_stripname '' '.exe' "$func_basename_result" -+ func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" - } - - # func_ltwrapper_p file -@@ -1149,7 +1311,7 @@ func_execute_cmds () - save_ifs=$IFS; IFS='~' - for cmd in $1; do - IFS=$save_ifs -- eval "cmd=\"$cmd\"" -+ eval cmd=\"$cmd\" - func_show_eval "$cmd" "${2-:}" - done - IFS=$save_ifs -@@ -1172,6 +1334,37 @@ func_source () - } - - -+# func_resolve_sysroot PATH -+# Replace a leading = in PATH with a sysroot. Store the result into -+# func_resolve_sysroot_result -+func_resolve_sysroot () -+{ -+ func_resolve_sysroot_result=$1 -+ case $func_resolve_sysroot_result in -+ =*) -+ func_stripname '=' '' "$func_resolve_sysroot_result" -+ func_resolve_sysroot_result=$lt_sysroot$func_stripname_result -+ ;; -+ esac -+} -+ -+# func_replace_sysroot PATH -+# If PATH begins with the sysroot, replace it with = and -+# store the result into func_replace_sysroot_result. -+func_replace_sysroot () -+{ -+ case "$lt_sysroot:$1" in -+ ?*:"$lt_sysroot"*) -+ func_stripname "$lt_sysroot" '' "$1" -+ func_replace_sysroot_result="=$func_stripname_result" -+ ;; -+ *) -+ # Including no sysroot. -+ func_replace_sysroot_result=$1 -+ ;; -+ esac -+} -+ - # func_infer_tag arg - # Infer tagged configuration to use if any are available and - # if one wasn't chosen via the "--tag" command line option. -@@ -1184,8 +1377,7 @@ func_infer_tag () - if test -n "$available_tags" && test -z "$tagname"; then - CC_quoted= - for arg in $CC; do -- func_quote_for_eval "$arg" -- CC_quoted="$CC_quoted $func_quote_for_eval_result" -+ func_append_quoted CC_quoted "$arg" - done - CC_expanded=`func_echo_all $CC` - CC_quoted_expanded=`func_echo_all $CC_quoted` -@@ -1204,8 +1396,7 @@ func_infer_tag () - CC_quoted= - for arg in $CC; do - # Double-quote args containing other shell metacharacters. -- func_quote_for_eval "$arg" -- CC_quoted="$CC_quoted $func_quote_for_eval_result" -+ func_append_quoted CC_quoted "$arg" - done - CC_expanded=`func_echo_all $CC` - CC_quoted_expanded=`func_echo_all $CC_quoted` -@@ -1274,6 +1465,486 @@ EOF - } - } - -+ -+################################################## -+# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS # -+################################################## -+ -+# func_convert_core_file_wine_to_w32 ARG -+# Helper function used by file name conversion functions when $build is *nix, -+# and $host is mingw, cygwin, or some other w32 environment. Relies on a -+# correctly configured wine environment available, with the winepath program -+# in $build's $PATH. -+# -+# ARG is the $build file name to be converted to w32 format. -+# Result is available in $func_convert_core_file_wine_to_w32_result, and will -+# be empty on error (or when ARG is empty) -+func_convert_core_file_wine_to_w32 () -+{ -+ $opt_debug -+ func_convert_core_file_wine_to_w32_result="$1" -+ if test -n "$1"; then -+ # Unfortunately, winepath does not exit with a non-zero error code, so we -+ # are forced to check the contents of stdout. On the other hand, if the -+ # command is not found, the shell will set an exit code of 127 and print -+ # *an error message* to stdout. So we must check for both error code of -+ # zero AND non-empty stdout, which explains the odd construction: -+ func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null` -+ if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then -+ func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | -+ $SED -e "$lt_sed_naive_backslashify"` -+ else -+ func_convert_core_file_wine_to_w32_result= -+ fi -+ fi -+} -+# end: func_convert_core_file_wine_to_w32 -+ -+ -+# func_convert_core_path_wine_to_w32 ARG -+# Helper function used by path conversion functions when $build is *nix, and -+# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly -+# configured wine environment available, with the winepath program in $build's -+# $PATH. Assumes ARG has no leading or trailing path separator characters. -+# -+# ARG is path to be converted from $build format to win32. -+# Result is available in $func_convert_core_path_wine_to_w32_result. -+# Unconvertible file (directory) names in ARG are skipped; if no directory names -+# are convertible, then the result may be empty. -+func_convert_core_path_wine_to_w32 () -+{ -+ $opt_debug -+ # unfortunately, winepath doesn't convert paths, only file names -+ func_convert_core_path_wine_to_w32_result="" -+ if test -n "$1"; then -+ oldIFS=$IFS -+ IFS=: -+ for func_convert_core_path_wine_to_w32_f in $1; do -+ IFS=$oldIFS -+ func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" -+ if test -n "$func_convert_core_file_wine_to_w32_result" ; then -+ if test -z "$func_convert_core_path_wine_to_w32_result"; then -+ func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" -+ else -+ func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" -+ fi -+ fi -+ done -+ IFS=$oldIFS -+ fi -+} -+# end: func_convert_core_path_wine_to_w32 -+ -+ -+# func_cygpath ARGS... -+# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when -+# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) -+# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or -+# (2), returns the Cygwin file name or path in func_cygpath_result (input -+# file name or path is assumed to be in w32 format, as previously converted -+# from $build's *nix or MSYS format). In case (3), returns the w32 file name -+# or path in func_cygpath_result (input file name or path is assumed to be in -+# Cygwin format). Returns an empty string on error. -+# -+# ARGS are passed to cygpath, with the last one being the file name or path to -+# be converted. -+# -+# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH -+# environment variable; do not put it in $PATH. -+func_cygpath () -+{ -+ $opt_debug -+ if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then -+ func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` -+ if test "$?" -ne 0; then -+ # on failure, ensure result is empty -+ func_cygpath_result= -+ fi -+ else -+ func_cygpath_result= -+ func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" -+ fi -+} -+#end: func_cygpath -+ -+ -+# func_convert_core_msys_to_w32 ARG -+# Convert file name or path ARG from MSYS format to w32 format. Return -+# result in func_convert_core_msys_to_w32_result. -+func_convert_core_msys_to_w32 () -+{ -+ $opt_debug -+ # awkward: cmd appends spaces to result -+ func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | -+ $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` -+} -+#end: func_convert_core_msys_to_w32 -+ -+ -+# func_convert_file_check ARG1 ARG2 -+# Verify that ARG1 (a file name in $build format) was converted to $host -+# format in ARG2. Otherwise, emit an error message, but continue (resetting -+# func_to_host_file_result to ARG1). -+func_convert_file_check () -+{ -+ $opt_debug -+ if test -z "$2" && test -n "$1" ; then -+ func_error "Could not determine host file name corresponding to" -+ func_error " \`$1'" -+ func_error "Continuing, but uninstalled executables may not work." -+ # Fallback: -+ func_to_host_file_result="$1" -+ fi -+} -+# end func_convert_file_check -+ -+ -+# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH -+# Verify that FROM_PATH (a path in $build format) was converted to $host -+# format in TO_PATH. Otherwise, emit an error message, but continue, resetting -+# func_to_host_file_result to a simplistic fallback value (see below). -+func_convert_path_check () -+{ -+ $opt_debug -+ if test -z "$4" && test -n "$3"; then -+ func_error "Could not determine the host path corresponding to" -+ func_error " \`$3'" -+ func_error "Continuing, but uninstalled executables may not work." -+ # Fallback. This is a deliberately simplistic "conversion" and -+ # should not be "improved". See libtool.info. -+ if test "x$1" != "x$2"; then -+ lt_replace_pathsep_chars="s|$1|$2|g" -+ func_to_host_path_result=`echo "$3" | -+ $SED -e "$lt_replace_pathsep_chars"` -+ else -+ func_to_host_path_result="$3" -+ fi -+ fi -+} -+# end func_convert_path_check -+ -+ -+# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG -+# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT -+# and appending REPL if ORIG matches BACKPAT. -+func_convert_path_front_back_pathsep () -+{ -+ $opt_debug -+ case $4 in -+ $1 ) func_to_host_path_result="$3$func_to_host_path_result" -+ ;; -+ esac -+ case $4 in -+ $2 ) func_append func_to_host_path_result "$3" -+ ;; -+ esac -+} -+# end func_convert_path_front_back_pathsep -+ -+ -+################################################## -+# $build to $host FILE NAME CONVERSION FUNCTIONS # -+################################################## -+# invoked via `$to_host_file_cmd ARG' -+# -+# In each case, ARG is the path to be converted from $build to $host format. -+# Result will be available in $func_to_host_file_result. -+ -+ -+# func_to_host_file ARG -+# Converts the file name ARG from $build format to $host format. Return result -+# in func_to_host_file_result. -+func_to_host_file () -+{ -+ $opt_debug -+ $to_host_file_cmd "$1" -+} -+# end func_to_host_file -+ -+ -+# func_to_tool_file ARG LAZY -+# converts the file name ARG from $build format to toolchain format. Return -+# result in func_to_tool_file_result. If the conversion in use is listed -+# in (the comma separated) LAZY, no conversion takes place. -+func_to_tool_file () -+{ -+ $opt_debug -+ case ,$2, in -+ *,"$to_tool_file_cmd",*) -+ func_to_tool_file_result=$1 -+ ;; -+ *) -+ $to_tool_file_cmd "$1" -+ func_to_tool_file_result=$func_to_host_file_result -+ ;; -+ esac -+} -+# end func_to_tool_file -+ -+ -+# func_convert_file_noop ARG -+# Copy ARG to func_to_host_file_result. -+func_convert_file_noop () -+{ -+ func_to_host_file_result="$1" -+} -+# end func_convert_file_noop -+ -+ -+# func_convert_file_msys_to_w32 ARG -+# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic -+# conversion to w32 is not available inside the cwrapper. Returns result in -+# func_to_host_file_result. -+func_convert_file_msys_to_w32 () -+{ -+ $opt_debug -+ func_to_host_file_result="$1" -+ if test -n "$1"; then -+ func_convert_core_msys_to_w32 "$1" -+ func_to_host_file_result="$func_convert_core_msys_to_w32_result" -+ fi -+ func_convert_file_check "$1" "$func_to_host_file_result" -+} -+# end func_convert_file_msys_to_w32 -+ -+ -+# func_convert_file_cygwin_to_w32 ARG -+# Convert file name ARG from Cygwin to w32 format. Returns result in -+# func_to_host_file_result. -+func_convert_file_cygwin_to_w32 () -+{ -+ $opt_debug -+ func_to_host_file_result="$1" -+ if test -n "$1"; then -+ # because $build is cygwin, we call "the" cygpath in $PATH; no need to use -+ # LT_CYGPATH in this case. -+ func_to_host_file_result=`cygpath -m "$1"` -+ fi -+ func_convert_file_check "$1" "$func_to_host_file_result" -+} -+# end func_convert_file_cygwin_to_w32 -+ -+ -+# func_convert_file_nix_to_w32 ARG -+# Convert file name ARG from *nix to w32 format. Requires a wine environment -+# and a working winepath. Returns result in func_to_host_file_result. -+func_convert_file_nix_to_w32 () -+{ -+ $opt_debug -+ func_to_host_file_result="$1" -+ if test -n "$1"; then -+ func_convert_core_file_wine_to_w32 "$1" -+ func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" -+ fi -+ func_convert_file_check "$1" "$func_to_host_file_result" -+} -+# end func_convert_file_nix_to_w32 -+ -+ -+# func_convert_file_msys_to_cygwin ARG -+# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. -+# Returns result in func_to_host_file_result. -+func_convert_file_msys_to_cygwin () -+{ -+ $opt_debug -+ func_to_host_file_result="$1" -+ if test -n "$1"; then -+ func_convert_core_msys_to_w32 "$1" -+ func_cygpath -u "$func_convert_core_msys_to_w32_result" -+ func_to_host_file_result="$func_cygpath_result" -+ fi -+ func_convert_file_check "$1" "$func_to_host_file_result" -+} -+# end func_convert_file_msys_to_cygwin -+ -+ -+# func_convert_file_nix_to_cygwin ARG -+# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed -+# in a wine environment, working winepath, and LT_CYGPATH set. Returns result -+# in func_to_host_file_result. -+func_convert_file_nix_to_cygwin () -+{ -+ $opt_debug -+ func_to_host_file_result="$1" -+ if test -n "$1"; then -+ # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. -+ func_convert_core_file_wine_to_w32 "$1" -+ func_cygpath -u "$func_convert_core_file_wine_to_w32_result" -+ func_to_host_file_result="$func_cygpath_result" -+ fi -+ func_convert_file_check "$1" "$func_to_host_file_result" -+} -+# end func_convert_file_nix_to_cygwin -+ -+ -+############################################# -+# $build to $host PATH CONVERSION FUNCTIONS # -+############################################# -+# invoked via `$to_host_path_cmd ARG' -+# -+# In each case, ARG is the path to be converted from $build to $host format. -+# The result will be available in $func_to_host_path_result. -+# -+# Path separators are also converted from $build format to $host format. If -+# ARG begins or ends with a path separator character, it is preserved (but -+# converted to $host format) on output. -+# -+# All path conversion functions are named using the following convention: -+# file name conversion function : func_convert_file_X_to_Y () -+# path conversion function : func_convert_path_X_to_Y () -+# where, for any given $build/$host combination the 'X_to_Y' value is the -+# same. If conversion functions are added for new $build/$host combinations, -+# the two new functions must follow this pattern, or func_init_to_host_path_cmd -+# will break. -+ -+ -+# func_init_to_host_path_cmd -+# Ensures that function "pointer" variable $to_host_path_cmd is set to the -+# appropriate value, based on the value of $to_host_file_cmd. -+to_host_path_cmd= -+func_init_to_host_path_cmd () -+{ -+ $opt_debug -+ if test -z "$to_host_path_cmd"; then -+ func_stripname 'func_convert_file_' '' "$to_host_file_cmd" -+ to_host_path_cmd="func_convert_path_${func_stripname_result}" -+ fi -+} -+ -+ -+# func_to_host_path ARG -+# Converts the path ARG from $build format to $host format. Return result -+# in func_to_host_path_result. -+func_to_host_path () -+{ -+ $opt_debug -+ func_init_to_host_path_cmd -+ $to_host_path_cmd "$1" -+} -+# end func_to_host_path -+ -+ -+# func_convert_path_noop ARG -+# Copy ARG to func_to_host_path_result. -+func_convert_path_noop () -+{ -+ func_to_host_path_result="$1" -+} -+# end func_convert_path_noop -+ -+ -+# func_convert_path_msys_to_w32 ARG -+# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic -+# conversion to w32 is not available inside the cwrapper. Returns result in -+# func_to_host_path_result. -+func_convert_path_msys_to_w32 () -+{ -+ $opt_debug -+ func_to_host_path_result="$1" -+ if test -n "$1"; then -+ # Remove leading and trailing path separator characters from ARG. MSYS -+ # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; -+ # and winepath ignores them completely. -+ func_stripname : : "$1" -+ func_to_host_path_tmp1=$func_stripname_result -+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" -+ func_to_host_path_result="$func_convert_core_msys_to_w32_result" -+ func_convert_path_check : ";" \ -+ "$func_to_host_path_tmp1" "$func_to_host_path_result" -+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" -+ fi -+} -+# end func_convert_path_msys_to_w32 -+ -+ -+# func_convert_path_cygwin_to_w32 ARG -+# Convert path ARG from Cygwin to w32 format. Returns result in -+# func_to_host_file_result. -+func_convert_path_cygwin_to_w32 () -+{ -+ $opt_debug -+ func_to_host_path_result="$1" -+ if test -n "$1"; then -+ # See func_convert_path_msys_to_w32: -+ func_stripname : : "$1" -+ func_to_host_path_tmp1=$func_stripname_result -+ func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` -+ func_convert_path_check : ";" \ -+ "$func_to_host_path_tmp1" "$func_to_host_path_result" -+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" -+ fi -+} -+# end func_convert_path_cygwin_to_w32 -+ -+ -+# func_convert_path_nix_to_w32 ARG -+# Convert path ARG from *nix to w32 format. Requires a wine environment and -+# a working winepath. Returns result in func_to_host_file_result. -+func_convert_path_nix_to_w32 () -+{ -+ $opt_debug -+ func_to_host_path_result="$1" -+ if test -n "$1"; then -+ # See func_convert_path_msys_to_w32: -+ func_stripname : : "$1" -+ func_to_host_path_tmp1=$func_stripname_result -+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" -+ func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" -+ func_convert_path_check : ";" \ -+ "$func_to_host_path_tmp1" "$func_to_host_path_result" -+ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" -+ fi -+} -+# end func_convert_path_nix_to_w32 -+ -+ -+# func_convert_path_msys_to_cygwin ARG -+# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. -+# Returns result in func_to_host_file_result. -+func_convert_path_msys_to_cygwin () -+{ -+ $opt_debug -+ func_to_host_path_result="$1" -+ if test -n "$1"; then -+ # See func_convert_path_msys_to_w32: -+ func_stripname : : "$1" -+ func_to_host_path_tmp1=$func_stripname_result -+ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" -+ func_cygpath -u -p "$func_convert_core_msys_to_w32_result" -+ func_to_host_path_result="$func_cygpath_result" -+ func_convert_path_check : : \ -+ "$func_to_host_path_tmp1" "$func_to_host_path_result" -+ func_convert_path_front_back_pathsep ":*" "*:" : "$1" -+ fi -+} -+# end func_convert_path_msys_to_cygwin -+ -+ -+# func_convert_path_nix_to_cygwin ARG -+# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a -+# a wine environment, working winepath, and LT_CYGPATH set. Returns result in -+# func_to_host_file_result. -+func_convert_path_nix_to_cygwin () -+{ -+ $opt_debug -+ func_to_host_path_result="$1" -+ if test -n "$1"; then -+ # Remove leading and trailing path separator characters from -+ # ARG. msys behavior is inconsistent here, cygpath turns them -+ # into '.;' and ';.', and winepath ignores them completely. -+ func_stripname : : "$1" -+ func_to_host_path_tmp1=$func_stripname_result -+ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" -+ func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" -+ func_to_host_path_result="$func_cygpath_result" -+ func_convert_path_check : : \ -+ "$func_to_host_path_tmp1" "$func_to_host_path_result" -+ func_convert_path_front_back_pathsep ":*" "*:" : "$1" -+ fi -+} -+# end func_convert_path_nix_to_cygwin -+ -+ - # func_mode_compile arg... - func_mode_compile () - { -@@ -1314,12 +1985,12 @@ func_mode_compile () - ;; - - -pie | -fpie | -fPIE) -- pie_flag="$pie_flag $arg" -+ func_append pie_flag " $arg" - continue - ;; - - -shared | -static | -prefer-pic | -prefer-non-pic) -- later="$later $arg" -+ func_append later " $arg" - continue - ;; - -@@ -1340,15 +2011,14 @@ func_mode_compile () - save_ifs="$IFS"; IFS=',' - for arg in $args; do - IFS="$save_ifs" -- func_quote_for_eval "$arg" -- lastarg="$lastarg $func_quote_for_eval_result" -+ func_append_quoted lastarg "$arg" - done - IFS="$save_ifs" - func_stripname ' ' '' "$lastarg" - lastarg=$func_stripname_result - - # Add the arguments to base_compile. -- base_compile="$base_compile $lastarg" -+ func_append base_compile " $lastarg" - continue - ;; - -@@ -1364,8 +2034,7 @@ func_mode_compile () - esac # case $arg_mode - - # Aesthetically quote the previous argument. -- func_quote_for_eval "$lastarg" -- base_compile="$base_compile $func_quote_for_eval_result" -+ func_append_quoted base_compile "$lastarg" - done # for arg - - case $arg_mode in -@@ -1496,17 +2165,16 @@ compiler." - $opt_dry_run || $RM $removelist - exit $EXIT_FAILURE - fi -- removelist="$removelist $output_obj" -+ func_append removelist " $output_obj" - $ECHO "$srcfile" > "$lockfile" - fi - - $opt_dry_run || $RM $removelist -- removelist="$removelist $lockfile" -+ func_append removelist " $lockfile" - trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 - -- if test -n "$fix_srcfile_path"; then -- eval "srcfile=\"$fix_srcfile_path\"" -- fi -+ func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 -+ srcfile=$func_to_tool_file_result - func_quote_for_eval "$srcfile" - qsrcfile=$func_quote_for_eval_result - -@@ -1526,7 +2194,7 @@ compiler." - - if test -z "$output_obj"; then - # Place PIC objects in $objdir -- command="$command -o $lobj" -+ func_append command " -o $lobj" - fi - - func_show_eval_locale "$command" \ -@@ -1573,11 +2241,11 @@ compiler." - command="$base_compile $qsrcfile $pic_flag" - fi - if test "$compiler_c_o" = yes; then -- command="$command -o $obj" -+ func_append command " -o $obj" - fi - - # Suppress compiler output if we already did a PIC compilation. -- command="$command$suppress_output" -+ func_append command "$suppress_output" - func_show_eval_locale "$command" \ - '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' - -@@ -1622,13 +2290,13 @@ compiler." - } - - $opt_help || { -- test "$mode" = compile && func_mode_compile ${1+"$@"} -+ test "$opt_mode" = compile && func_mode_compile ${1+"$@"} - } - - func_mode_help () - { - # We need to display help for each of the modes. -- case $mode in -+ case $opt_mode in - "") - # Generic help is extracted from the usage comments - # at the start of this file. -@@ -1659,8 +2327,8 @@ This mode accepts the following additional options: - - -o OUTPUT-FILE set the output file name to OUTPUT-FILE - -no-suppress do not suppress compiler output for multiple passes -- -prefer-pic try to building PIC objects only -- -prefer-non-pic try to building non-PIC objects only -+ -prefer-pic try to build PIC objects only -+ -prefer-non-pic try to build non-PIC objects only - -shared do not build a \`.o' file suitable for static linking - -static only build a \`.o' file suitable for static linking - -Wc,FLAG pass FLAG directly to the compiler -@@ -1804,7 +2472,7 @@ Otherwise, only FILE itself is deleted using RM." - ;; - - *) -- func_fatal_help "invalid operation mode \`$mode'" -+ func_fatal_help "invalid operation mode \`$opt_mode'" - ;; - esac - -@@ -1819,13 +2487,13 @@ if $opt_help; then - else - { - func_help noexit -- for mode in compile link execute install finish uninstall clean; do -+ for opt_mode in compile link execute install finish uninstall clean; do - func_mode_help - done - } | sed -n '1p; 2,$s/^Usage:/ or: /p' - { - func_help noexit -- for mode in compile link execute install finish uninstall clean; do -+ for opt_mode in compile link execute install finish uninstall clean; do - echo - func_mode_help - done -@@ -1854,13 +2522,16 @@ func_mode_execute () - func_fatal_help "you must specify a COMMAND" - - # Handle -dlopen flags immediately. -- for file in $execute_dlfiles; do -+ for file in $opt_dlopen; do - test -f "$file" \ - || func_fatal_help "\`$file' is not a file" - - dir= - case $file in - *.la) -+ func_resolve_sysroot "$file" -+ file=$func_resolve_sysroot_result -+ - # Check to see that this really is a libtool archive. - func_lalib_unsafe_p "$file" \ - || func_fatal_help "\`$lib' is not a valid libtool archive" -@@ -1882,7 +2553,7 @@ func_mode_execute () - dir="$func_dirname_result" - - if test -f "$dir/$objdir/$dlname"; then -- dir="$dir/$objdir" -+ func_append dir "/$objdir" - else - if test ! -f "$dir/$dlname"; then - func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" -@@ -1907,10 +2578,10 @@ func_mode_execute () - test -n "$absdir" && dir="$absdir" - - # Now add the directory to shlibpath_var. -- if eval test -z \"\$$shlibpath_var\"; then -- eval $shlibpath_var=\$dir -+ if eval "test -z \"\$$shlibpath_var\""; then -+ eval "$shlibpath_var=\"\$dir\"" - else -- eval $shlibpath_var=\$dir:\$$shlibpath_var -+ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" - fi - done - -@@ -1939,8 +2610,7 @@ func_mode_execute () - ;; - esac - # Quote arguments (to preserve shell metacharacters). -- func_quote_for_eval "$file" -- args="$args $func_quote_for_eval_result" -+ func_append_quoted args "$file" - done - - if test "X$opt_dry_run" = Xfalse; then -@@ -1972,22 +2642,59 @@ func_mode_execute () - fi - } - --test "$mode" = execute && func_mode_execute ${1+"$@"} -+test "$opt_mode" = execute && func_mode_execute ${1+"$@"} - - - # func_mode_finish arg... - func_mode_finish () - { - $opt_debug -- libdirs="$nonopt" -+ libs= -+ libdirs= - admincmds= - -- if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then -- for dir -- do -- libdirs="$libdirs $dir" -- done -+ for opt in "$nonopt" ${1+"$@"} -+ do -+ if test -d "$opt"; then -+ func_append libdirs " $opt" - -+ elif test -f "$opt"; then -+ if func_lalib_unsafe_p "$opt"; then -+ func_append libs " $opt" -+ else -+ func_warning "\`$opt' is not a valid libtool archive" -+ fi -+ -+ else -+ func_fatal_error "invalid argument \`$opt'" -+ fi -+ done -+ -+ if test -n "$libs"; then -+ if test -n "$lt_sysroot"; then -+ sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` -+ sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" -+ else -+ sysroot_cmd= -+ fi -+ -+ # Remove sysroot references -+ if $opt_dry_run; then -+ for lib in $libs; do -+ echo "removing references to $lt_sysroot and \`=' prefixes from $lib" -+ done -+ else -+ tmpdir=`func_mktempdir` -+ for lib in $libs; do -+ sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ -+ > $tmpdir/tmp-la -+ mv -f $tmpdir/tmp-la $lib -+ done -+ ${RM}r "$tmpdir" -+ fi -+ fi -+ -+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then - for libdir in $libdirs; do - if test -n "$finish_cmds"; then - # Do each command in the finish commands. -@@ -1997,7 +2704,7 @@ func_mode_finish () - if test -n "$finish_eval"; then - # Do the single finish_eval. - eval cmds=\"$finish_eval\" -- $opt_dry_run || eval "$cmds" || admincmds="$admincmds -+ $opt_dry_run || eval "$cmds" || func_append admincmds " - $cmds" - fi - done -@@ -2006,53 +2713,55 @@ func_mode_finish () - # Exit here if they wanted silent mode. - $opt_silent && exit $EXIT_SUCCESS - -- echo "----------------------------------------------------------------------" -- echo "Libraries have been installed in:" -- for libdir in $libdirs; do -- $ECHO " $libdir" -- done -- echo -- echo "If you ever happen to want to link against installed libraries" -- echo "in a given directory, LIBDIR, you must either use libtool, and" -- echo "specify the full pathname of the library, or use the \`-LLIBDIR'" -- echo "flag during linking and do at least one of the following:" -- if test -n "$shlibpath_var"; then -- echo " - add LIBDIR to the \`$shlibpath_var' environment variable" -- echo " during execution" -- fi -- if test -n "$runpath_var"; then -- echo " - add LIBDIR to the \`$runpath_var' environment variable" -- echo " during linking" -- fi -- if test -n "$hardcode_libdir_flag_spec"; then -- libdir=LIBDIR -- eval "flag=\"$hardcode_libdir_flag_spec\"" -+ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then -+ echo "----------------------------------------------------------------------" -+ echo "Libraries have been installed in:" -+ for libdir in $libdirs; do -+ $ECHO " $libdir" -+ done -+ echo -+ echo "If you ever happen to want to link against installed libraries" -+ echo "in a given directory, LIBDIR, you must either use libtool, and" -+ echo "specify the full pathname of the library, or use the \`-LLIBDIR'" -+ echo "flag during linking and do at least one of the following:" -+ if test -n "$shlibpath_var"; then -+ echo " - add LIBDIR to the \`$shlibpath_var' environment variable" -+ echo " during execution" -+ fi -+ if test -n "$runpath_var"; then -+ echo " - add LIBDIR to the \`$runpath_var' environment variable" -+ echo " during linking" -+ fi -+ if test -n "$hardcode_libdir_flag_spec"; then -+ libdir=LIBDIR -+ eval flag=\"$hardcode_libdir_flag_spec\" - -- $ECHO " - use the \`$flag' linker flag" -- fi -- if test -n "$admincmds"; then -- $ECHO " - have your system administrator run these commands:$admincmds" -- fi -- if test -f /etc/ld.so.conf; then -- echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" -- fi -- echo -+ $ECHO " - use the \`$flag' linker flag" -+ fi -+ if test -n "$admincmds"; then -+ $ECHO " - have your system administrator run these commands:$admincmds" -+ fi -+ if test -f /etc/ld.so.conf; then -+ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" -+ fi -+ echo - -- echo "See any operating system documentation about shared libraries for" -- case $host in -- solaris2.[6789]|solaris2.1[0-9]) -- echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" -- echo "pages." -- ;; -- *) -- echo "more information, such as the ld(1) and ld.so(8) manual pages." -- ;; -- esac -- echo "----------------------------------------------------------------------" -+ echo "See any operating system documentation about shared libraries for" -+ case $host in -+ solaris2.[6789]|solaris2.1[0-9]) -+ echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" -+ echo "pages." -+ ;; -+ *) -+ echo "more information, such as the ld(1) and ld.so(8) manual pages." -+ ;; -+ esac -+ echo "----------------------------------------------------------------------" -+ fi - exit $EXIT_SUCCESS - } - --test "$mode" = finish && func_mode_finish ${1+"$@"} -+test "$opt_mode" = finish && func_mode_finish ${1+"$@"} - - - # func_mode_install arg... -@@ -2077,7 +2786,7 @@ func_mode_install () - # The real first argument should be the name of the installation program. - # Aesthetically quote it. - func_quote_for_eval "$arg" -- install_prog="$install_prog$func_quote_for_eval_result" -+ func_append install_prog "$func_quote_for_eval_result" - install_shared_prog=$install_prog - case " $install_prog " in - *[\\\ /]cp\ *) install_cp=: ;; -@@ -2097,7 +2806,7 @@ func_mode_install () - do - arg2= - if test -n "$dest"; then -- files="$files $dest" -+ func_append files " $dest" - dest=$arg - continue - fi -@@ -2135,11 +2844,11 @@ func_mode_install () - - # Aesthetically quote the argument. - func_quote_for_eval "$arg" -- install_prog="$install_prog $func_quote_for_eval_result" -+ func_append install_prog " $func_quote_for_eval_result" - if test -n "$arg2"; then - func_quote_for_eval "$arg2" - fi -- install_shared_prog="$install_shared_prog $func_quote_for_eval_result" -+ func_append install_shared_prog " $func_quote_for_eval_result" - done - - test -z "$install_prog" && \ -@@ -2151,7 +2860,7 @@ func_mode_install () - if test -n "$install_override_mode" && $no_mode; then - if $install_cp; then :; else - func_quote_for_eval "$install_override_mode" -- install_shared_prog="$install_shared_prog -m $func_quote_for_eval_result" -+ func_append install_shared_prog " -m $func_quote_for_eval_result" - fi - fi - -@@ -2209,10 +2918,13 @@ func_mode_install () - case $file in - *.$libext) - # Do the static libraries later. -- staticlibs="$staticlibs $file" -+ func_append staticlibs " $file" - ;; - - *.la) -+ func_resolve_sysroot "$file" -+ file=$func_resolve_sysroot_result -+ - # Check to see that this really is a libtool archive. - func_lalib_unsafe_p "$file" \ - || func_fatal_help "\`$file' is not a valid libtool archive" -@@ -2226,23 +2938,30 @@ func_mode_install () - if test "X$destdir" = "X$libdir"; then - case "$current_libdirs " in - *" $libdir "*) ;; -- *) current_libdirs="$current_libdirs $libdir" ;; -+ *) func_append current_libdirs " $libdir" ;; - esac - else - # Note the libdir as a future libdir. - case "$future_libdirs " in - *" $libdir "*) ;; -- *) future_libdirs="$future_libdirs $libdir" ;; -+ *) func_append future_libdirs " $libdir" ;; - esac - fi - - func_dirname "$file" "/" "" - dir="$func_dirname_result" -- dir="$dir$objdir" -+ func_append dir "$objdir" - - if test -n "$relink_command"; then -+ # Strip any trailing slash from the destination. -+ func_stripname '' '/' "$libdir" -+ destlibdir=$func_stripname_result -+ -+ func_stripname '' '/' "$destdir" -+ s_destdir=$func_stripname_result -+ - # Determine the prefix the user has applied to our future dir. -- inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` -+ inst_prefix_dir=`$ECHO "X$s_destdir" | $Xsed -e "s%$destlibdir\$%%"` - - # Don't allow the user to place us outside of our expected - # location b/c this prevents finding dependent libraries that -@@ -2315,7 +3034,7 @@ func_mode_install () - func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' - - # Maybe install the static library, too. -- test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" -+ test -n "$old_library" && func_append staticlibs " $dir/$old_library" - ;; - - *.lo) -@@ -2503,7 +3222,7 @@ func_mode_install () - test -n "$future_libdirs" && \ - func_warning "remember to run \`$progname --finish$future_libdirs'" - -- if test -n "$current_libdirs" && $opt_finish; then -+ if test -n "$current_libdirs"; then - # Maybe just do a dry run. - $opt_dry_run && current_libdirs=" -n$current_libdirs" - exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' -@@ -2512,7 +3231,7 @@ func_mode_install () - fi - } - --test "$mode" = install && func_mode_install ${1+"$@"} -+test "$opt_mode" = install && func_mode_install ${1+"$@"} - - - # func_generate_dlsyms outputname originator pic_p -@@ -2559,6 +3278,18 @@ extern \"C\" { - #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" - #endif - -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - /* External symbol declarations for the compiler. */\ - " - -@@ -2570,21 +3301,22 @@ extern \"C\" { - # Add our own program objects to the symbol list. - progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` - for progfile in $progfiles; do -- func_verbose "extracting global C symbols from \`$progfile'" -- $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" -+ func_to_tool_file "$progfile" func_convert_file_msys_to_w32 -+ func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" -+ $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" - done - - if test -n "$exclude_expsyms"; then - $opt_dry_run || { -- $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T -- $MV "$nlist"T "$nlist" -+ eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' -+ eval '$MV "$nlist"T "$nlist"' - } - fi - - if test -n "$export_symbols_regex"; then - $opt_dry_run || { -- $EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T -- $MV "$nlist"T "$nlist" -+ eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' -+ eval '$MV "$nlist"T "$nlist"' - } - fi - -@@ -2593,23 +3325,23 @@ extern \"C\" { - export_symbols="$output_objdir/$outputname.exp" - $opt_dry_run || { - $RM $export_symbols -- ${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' < "$nlist" > "$export_symbols" -+ eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' - case $host in - *cygwin* | *mingw* | *cegcc* ) -- echo EXPORTS > "$output_objdir/$outputname.def" -- cat "$export_symbols" >> "$output_objdir/$outputname.def" -+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' -+ eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' - ;; - esac - } - else - $opt_dry_run || { -- ${SED} -e 's/\([].[*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/' < "$export_symbols" > "$output_objdir/$outputname.exp" -- $GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T -- $MV "$nlist"T "$nlist" -+ eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' -+ eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' -+ eval '$MV "$nlist"T "$nlist"' - case $host in - *cygwin* | *mingw* | *cegcc* ) -- echo EXPORTS > "$output_objdir/$outputname.def" -- cat "$nlist" >> "$output_objdir/$outputname.def" -+ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' -+ eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' - ;; - esac - } -@@ -2620,10 +3352,52 @@ extern \"C\" { - func_verbose "extracting global C symbols from \`$dlprefile'" - func_basename "$dlprefile" - name="$func_basename_result" -- $opt_dry_run || { -- $ECHO ": $name " >> "$nlist" -- eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" -- } -+ case $host in -+ *cygwin* | *mingw* | *cegcc* ) -+ # if an import library, we need to obtain dlname -+ if func_win32_import_lib_p "$dlprefile"; then -+ func_tr_sh "$dlprefile" -+ eval "curr_lafile=\$libfile_$func_tr_sh_result" -+ dlprefile_dlbasename="" -+ if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then -+ # Use subshell, to avoid clobbering current variable values -+ dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` -+ if test -n "$dlprefile_dlname" ; then -+ func_basename "$dlprefile_dlname" -+ dlprefile_dlbasename="$func_basename_result" -+ else -+ # no lafile. user explicitly requested -dlpreopen . -+ $sharedlib_from_linklib_cmd "$dlprefile" -+ dlprefile_dlbasename=$sharedlib_from_linklib_result -+ fi -+ fi -+ $opt_dry_run || { -+ if test -n "$dlprefile_dlbasename" ; then -+ eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' -+ else -+ func_warning "Could not compute DLL name from $name" -+ eval '$ECHO ": $name " >> "$nlist"' -+ fi -+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 -+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | -+ $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" -+ } -+ else # not an import lib -+ $opt_dry_run || { -+ eval '$ECHO ": $name " >> "$nlist"' -+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 -+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" -+ } -+ fi -+ ;; -+ *) -+ $opt_dry_run || { -+ eval '$ECHO ": $name " >> "$nlist"' -+ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 -+ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" -+ } -+ ;; -+ esac - done - - $opt_dry_run || { -@@ -2661,26 +3435,9 @@ typedef struct { - const char *name; - void *address; - } lt_dlsymlist; --" -- case $host in -- *cygwin* | *mingw* | *cegcc* ) -- echo >> "$output_objdir/$my_dlsyms" "\ --/* DATA imports from DLLs on WIN32 con't be const, because -- runtime relocations are performed -- see ld's documentation -- on pseudo-relocs. */" -- lt_dlsym_const= ;; -- *osf5*) -- echo >> "$output_objdir/$my_dlsyms" "\ --/* This system does not cope well with relocations in const data */" -- lt_dlsym_const= ;; -- *) -- lt_dlsym_const=const ;; -- esac -- -- echo >> "$output_objdir/$my_dlsyms" "\ --extern $lt_dlsym_const lt_dlsymlist -+extern LT_DLSYM_CONST lt_dlsymlist - lt_${my_prefix}_LTX_preloaded_symbols[]; --$lt_dlsym_const lt_dlsymlist -+LT_DLSYM_CONST lt_dlsymlist - lt_${my_prefix}_LTX_preloaded_symbols[] = - {\ - { \"$my_originator\", (void *) 0 }," -@@ -2736,7 +3493,7 @@ static const void *lt_preloaded_setup() { - for arg in $LTCFLAGS; do - case $arg in - -pie | -fpie | -fPIE) ;; -- *) symtab_cflags="$symtab_cflags $arg" ;; -+ *) func_append symtab_cflags " $arg" ;; - esac - done - -@@ -2796,9 +3553,11 @@ func_win32_libid () - win32_libid_type="x86 archive import" - ;; - *ar\ archive*) # could be an import, or static -- if $OBJDUMP -f "$1" | $SED -e '10q' 2>/dev/null | -- $EGREP 'file format (pe-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then -- win32_nmres=`$NM -f posix -A "$1" | -+ # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. -+ if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | -+ $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then -+ func_to_tool_file "$1" func_convert_file_msys_to_w32 -+ win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | - $SED -n -e ' - 1,100{ - / I /{ -@@ -2827,6 +3586,131 @@ func_win32_libid () - $ECHO "$win32_libid_type" - } - -+# func_cygming_dll_for_implib ARG -+# -+# Platform-specific function to extract the -+# name of the DLL associated with the specified -+# import library ARG. -+# Invoked by eval'ing the libtool variable -+# $sharedlib_from_linklib_cmd -+# Result is available in the variable -+# $sharedlib_from_linklib_result -+func_cygming_dll_for_implib () -+{ -+ $opt_debug -+ sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` -+} -+ -+# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs -+# -+# The is the core of a fallback implementation of a -+# platform-specific function to extract the name of the -+# DLL associated with the specified import library LIBNAME. -+# -+# SECTION_NAME is either .idata$6 or .idata$7, depending -+# on the platform and compiler that created the implib. -+# -+# Echos the name of the DLL associated with the -+# specified import library. -+func_cygming_dll_for_implib_fallback_core () -+{ -+ $opt_debug -+ match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` -+ $OBJDUMP -s --section "$1" "$2" 2>/dev/null | -+ $SED '/^Contents of section '"$match_literal"':/{ -+ # Place marker at beginning of archive member dllname section -+ s/.*/====MARK====/ -+ p -+ d -+ } -+ # These lines can sometimes be longer than 43 characters, but -+ # are always uninteresting -+ /:[ ]*file format pe[i]\{,1\}-/d -+ /^In archive [^:]*:/d -+ # Ensure marker is printed -+ /^====MARK====/p -+ # Remove all lines with less than 43 characters -+ /^.\{43\}/!d -+ # From remaining lines, remove first 43 characters -+ s/^.\{43\}//' | -+ $SED -n ' -+ # Join marker and all lines until next marker into a single line -+ /^====MARK====/ b para -+ H -+ $ b para -+ b -+ :para -+ x -+ s/\n//g -+ # Remove the marker -+ s/^====MARK====// -+ # Remove trailing dots and whitespace -+ s/[\. \t]*$// -+ # Print -+ /./p' | -+ # we now have a list, one entry per line, of the stringified -+ # contents of the appropriate section of all members of the -+ # archive which possess that section. Heuristic: eliminate -+ # all those which have a first or second character that is -+ # a '.' (that is, objdump's representation of an unprintable -+ # character.) This should work for all archives with less than -+ # 0x302f exports -- but will fail for DLLs whose name actually -+ # begins with a literal '.' or a single character followed by -+ # a '.'. -+ # -+ # Of those that remain, print the first one. -+ $SED -e '/^\./d;/^.\./d;q' -+} -+ -+# func_cygming_gnu_implib_p ARG -+# This predicate returns with zero status (TRUE) if -+# ARG is a GNU/binutils-style import library. Returns -+# with nonzero status (FALSE) otherwise. -+func_cygming_gnu_implib_p () -+{ -+ $opt_debug -+ func_to_tool_file "$1" func_convert_file_msys_to_w32 -+ func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` -+ test -n "$func_cygming_gnu_implib_tmp" -+} -+ -+# func_cygming_ms_implib_p ARG -+# This predicate returns with zero status (TRUE) if -+# ARG is an MS-style import library. Returns -+# with nonzero status (FALSE) otherwise. -+func_cygming_ms_implib_p () -+{ -+ $opt_debug -+ func_to_tool_file "$1" func_convert_file_msys_to_w32 -+ func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` -+ test -n "$func_cygming_ms_implib_tmp" -+} -+ -+# func_cygming_dll_for_implib_fallback ARG -+# Platform-specific function to extract the -+# name of the DLL associated with the specified -+# import library ARG. -+# -+# This fallback implementation is for use when $DLLTOOL -+# does not support the --identify-strict option. -+# Invoked by eval'ing the libtool variable -+# $sharedlib_from_linklib_cmd -+# Result is available in the variable -+# $sharedlib_from_linklib_result -+func_cygming_dll_for_implib_fallback () -+{ -+ $opt_debug -+ if func_cygming_gnu_implib_p "$1" ; then -+ # binutils import library -+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` -+ elif func_cygming_ms_implib_p "$1" ; then -+ # ms-generated import library -+ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` -+ else -+ # unknown -+ sharedlib_from_linklib_result="" -+ fi -+} - - - # func_extract_an_archive dir oldlib -@@ -2917,7 +3801,7 @@ func_extract_archives () - darwin_file= - darwin_files= - for darwin_file in $darwin_filelist; do -- darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` -+ darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` - $LIPO -create -output "$darwin_file" $darwin_files - done # $darwin_filelist - $RM -rf unfat-$$ -@@ -2932,7 +3816,7 @@ func_extract_archives () - func_extract_an_archive "$my_xdir" "$my_xabs" - ;; - esac -- my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` -+ my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` - done - - func_extract_archives_result="$my_oldobjs" -@@ -3014,7 +3898,110 @@ func_fallback_echo () - _LTECHO_EOF' - } - ECHO=\"$qECHO\" -- fi\ -+ fi -+ -+# Very basic option parsing. These options are (a) specific to -+# the libtool wrapper, (b) are identical between the wrapper -+# /script/ and the wrapper /executable/ which is used only on -+# windows platforms, and (c) all begin with the string "--lt-" -+# (application programs are unlikely to have options which match -+# this pattern). -+# -+# There are only two supported options: --lt-debug and -+# --lt-dump-script. There is, deliberately, no --lt-help. -+# -+# The first argument to this parsing function should be the -+# script's $0 value, followed by "$@". -+lt_option_debug= -+func_parse_lt_options () -+{ -+ lt_script_arg0=\$0 -+ shift -+ for lt_opt -+ do -+ case \"\$lt_opt\" in -+ --lt-debug) lt_option_debug=1 ;; -+ --lt-dump-script) -+ lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` -+ test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. -+ lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` -+ cat \"\$lt_dump_D/\$lt_dump_F\" -+ exit 0 -+ ;; -+ --lt-*) -+ \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 -+ exit 1 -+ ;; -+ esac -+ done -+ -+ # Print the debug banner immediately: -+ if test -n \"\$lt_option_debug\"; then -+ echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 -+ fi -+} -+ -+# Used when --lt-debug. Prints its arguments to stdout -+# (redirection is the responsibility of the caller) -+func_lt_dump_args () -+{ -+ lt_dump_args_N=1; -+ for lt_arg -+ do -+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" -+ lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` -+ done -+} -+ -+# Core function for launching the target application -+func_exec_program_core () -+{ -+" -+ case $host in -+ # Backslashes separate directories on plain windows -+ *-*-mingw | *-*-os2* | *-cegcc*) -+ $ECHO "\ -+ if test -n \"\$lt_option_debug\"; then -+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 -+ func_lt_dump_args \${1+\"\$@\"} 1>&2 -+ fi -+ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} -+" -+ ;; -+ -+ *) -+ $ECHO "\ -+ if test -n \"\$lt_option_debug\"; then -+ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 -+ func_lt_dump_args \${1+\"\$@\"} 1>&2 -+ fi -+ exec \"\$progdir/\$program\" \${1+\"\$@\"} -+" -+ ;; -+ esac -+ $ECHO "\ -+ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 -+ exit 1 -+} -+ -+# A function to encapsulate launching the target application -+# Strips options in the --lt-* namespace from \$@ and -+# launches target application with the remaining arguments. -+func_exec_program () -+{ -+ for lt_wr_arg -+ do -+ case \$lt_wr_arg in -+ --lt-*) ;; -+ *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; -+ esac -+ shift -+ done -+ func_exec_program_core \${1+\"\$@\"} -+} -+ -+ # Parse options -+ func_parse_lt_options \"\$0\" \${1+\"\$@\"} - - # Find the directory that this script lives in. - thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` -@@ -3078,7 +4065,7 @@ _LTECHO_EOF' - - # relink executable if necessary - if test -n \"\$relink_command\"; then -- if relink_command_output=\`eval \"\$relink_command\" 2>&1\`; then : -+ if relink_command_output=\`eval \$relink_command 2>&1\`; then : - else - $ECHO \"\$relink_command_output\" >&2 - $RM \"\$progdir/\$file\" -@@ -3102,6 +4089,18 @@ _LTECHO_EOF' - - if test -f \"\$progdir/\$program\"; then" - -+ # fixup the dll searchpath if we need to. -+ # -+ # Fix the DLL searchpath if we need to. Do this before prepending -+ # to shlibpath, because on Windows, both are PATH and uninstalled -+ # libraries must come first. -+ if test -n "$dllsearchpath"; then -+ $ECHO "\ -+ # Add the dll search path components to the executable PATH -+ PATH=$dllsearchpath:\$PATH -+" -+ fi -+ - # Export our shlibpath_var if we have one. - if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then - $ECHO "\ -@@ -3116,35 +4115,10 @@ _LTECHO_EOF' - " - fi - -- # fixup the dll searchpath if we need to. -- if test -n "$dllsearchpath"; then -- $ECHO "\ -- # Add the dll search path components to the executable PATH -- PATH=$dllsearchpath:\$PATH --" -- fi -- - $ECHO "\ - if test \"\$libtool_execute_magic\" != \"$magic\"; then - # Run the actual program with our arguments. --" -- case $host in -- # Backslashes separate directories on plain windows -- *-*-mingw | *-*-os2* | *-cegcc*) -- $ECHO "\ -- exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} --" -- ;; -- -- *) -- $ECHO "\ -- exec \"\$progdir/\$program\" \${1+\"\$@\"} --" -- ;; -- esac -- $ECHO "\ -- \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 -- exit 1 -+ func_exec_program \${1+\"\$@\"} - fi - else - # The program doesn't exist. -@@ -3158,166 +4132,6 @@ fi\ - } - - --# func_to_host_path arg --# --# Convert paths to host format when used with build tools. --# Intended for use with "native" mingw (where libtool itself --# is running under the msys shell), or in the following cross- --# build environments: --# $build $host --# mingw (msys) mingw [e.g. native] --# cygwin mingw --# *nix + wine mingw --# where wine is equipped with the `winepath' executable. --# In the native mingw case, the (msys) shell automatically --# converts paths for any non-msys applications it launches, --# but that facility isn't available from inside the cwrapper. --# Similar accommodations are necessary for $host mingw and --# $build cygwin. Calling this function does no harm for other --# $host/$build combinations not listed above. --# --# ARG is the path (on $build) that should be converted to --# the proper representation for $host. The result is stored --# in $func_to_host_path_result. --func_to_host_path () --{ -- func_to_host_path_result="$1" -- if test -n "$1"; then -- case $host in -- *mingw* ) -- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' -- case $build in -- *mingw* ) # actually, msys -- # awkward: cmd appends spaces to result -- func_to_host_path_result=`( cmd //c echo "$1" ) 2>/dev/null | -- $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` -- ;; -- *cygwin* ) -- func_to_host_path_result=`cygpath -w "$1" | -- $SED -e "$lt_sed_naive_backslashify"` -- ;; -- * ) -- # Unfortunately, winepath does not exit with a non-zero -- # error code, so we are forced to check the contents of -- # stdout. On the other hand, if the command is not -- # found, the shell will set an exit code of 127 and print -- # *an error message* to stdout. So we must check for both -- # error code of zero AND non-empty stdout, which explains -- # the odd construction: -- func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` -- if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then -- func_to_host_path_result=`$ECHO "$func_to_host_path_tmp1" | -- $SED -e "$lt_sed_naive_backslashify"` -- else -- # Allow warning below. -- func_to_host_path_result= -- fi -- ;; -- esac -- if test -z "$func_to_host_path_result" ; then -- func_error "Could not determine host path corresponding to" -- func_error " \`$1'" -- func_error "Continuing, but uninstalled executables may not work." -- # Fallback: -- func_to_host_path_result="$1" -- fi -- ;; -- esac -- fi --} --# end: func_to_host_path -- --# func_to_host_pathlist arg --# --# Convert pathlists to host format when used with build tools. --# See func_to_host_path(), above. This function supports the --# following $build/$host combinations (but does no harm for --# combinations not listed here): --# $build $host --# mingw (msys) mingw [e.g. native] --# cygwin mingw --# *nix + wine mingw --# --# Path separators are also converted from $build format to --# $host format. If ARG begins or ends with a path separator --# character, it is preserved (but converted to $host format) --# on output. --# --# ARG is a pathlist (on $build) that should be converted to --# the proper representation on $host. The result is stored --# in $func_to_host_pathlist_result. --func_to_host_pathlist () --{ -- func_to_host_pathlist_result="$1" -- if test -n "$1"; then -- case $host in -- *mingw* ) -- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' -- # Remove leading and trailing path separator characters from -- # ARG. msys behavior is inconsistent here, cygpath turns them -- # into '.;' and ';.', and winepath ignores them completely. -- func_stripname : : "$1" -- func_to_host_pathlist_tmp1=$func_stripname_result -- case $build in -- *mingw* ) # Actually, msys. -- # Awkward: cmd appends spaces to result. -- func_to_host_pathlist_result=` -- ( cmd //c echo "$func_to_host_pathlist_tmp1" ) 2>/dev/null | -- $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` -- ;; -- *cygwin* ) -- func_to_host_pathlist_result=`cygpath -w -p "$func_to_host_pathlist_tmp1" | -- $SED -e "$lt_sed_naive_backslashify"` -- ;; -- * ) -- # unfortunately, winepath doesn't convert pathlists -- func_to_host_pathlist_result="" -- func_to_host_pathlist_oldIFS=$IFS -- IFS=: -- for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do -- IFS=$func_to_host_pathlist_oldIFS -- if test -n "$func_to_host_pathlist_f" ; then -- func_to_host_path "$func_to_host_pathlist_f" -- if test -n "$func_to_host_path_result" ; then -- if test -z "$func_to_host_pathlist_result" ; then -- func_to_host_pathlist_result="$func_to_host_path_result" -- else -- func_append func_to_host_pathlist_result ";$func_to_host_path_result" -- fi -- fi -- fi -- done -- IFS=$func_to_host_pathlist_oldIFS -- ;; -- esac -- if test -z "$func_to_host_pathlist_result"; then -- func_error "Could not determine the host path(s) corresponding to" -- func_error " \`$1'" -- func_error "Continuing, but uninstalled executables may not work." -- # Fallback. This may break if $1 contains DOS-style drive -- # specifications. The fix is not to complicate the expression -- # below, but for the user to provide a working wine installation -- # with winepath so that path translation in the cross-to-mingw -- # case works properly. -- lt_replace_pathsep_nix_to_dos="s|:|;|g" -- func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ -- $SED -e "$lt_replace_pathsep_nix_to_dos"` -- fi -- # Now, add the leading and trailing path separators back -- case "$1" in -- :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" -- ;; -- esac -- case "$1" in -- *: ) func_append func_to_host_pathlist_result ";" -- ;; -- esac -- ;; -- esac -- fi --} --# end: func_to_host_pathlist -- - # func_emit_cwrapperexe_src - # emit the source code for a wrapper executable on stdout - # Must ONLY be called from within func_mode_link because -@@ -3334,10 +4148,6 @@ func_emit_cwrapperexe_src () - - This wrapper executable should never be moved out of the build directory. - If it is, it will not operate correctly. -- -- Currently, it simply execs the wrapper *script* "$SHELL $output", -- but could eventually absorb all of the scripts functionality and -- exec $objdir/$outputname directly. - */ - EOF - cat <<"EOF" -@@ -3462,22 +4272,13 @@ int setenv (const char *, const char *, int); - if (stale) { free ((void *) stale); stale = 0; } \ - } while (0) - --#undef LTWRAPPER_DEBUGPRINTF --#if defined LT_DEBUGWRAPPER --# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args --static void --ltwrapper_debugprintf (const char *fmt, ...) --{ -- va_list args; -- va_start (args, fmt); -- (void) vfprintf (stderr, fmt, args); -- va_end (args); --} -+#if defined(LT_DEBUGWRAPPER) -+static int lt_debug = 1; - #else --# define LTWRAPPER_DEBUGPRINTF(args) -+static int lt_debug = 0; - #endif - --const char *program_name = NULL; -+const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ - - void *xmalloc (size_t num); - char *xstrdup (const char *string); -@@ -3487,7 +4288,10 @@ char *chase_symlinks (const char *pathspec); - int make_executable (const char *path); - int check_executable (const char *path); - char *strendzap (char *str, const char *pat); --void lt_fatal (const char *message, ...); -+void lt_debugprintf (const char *file, int line, const char *fmt, ...); -+void lt_fatal (const char *file, int line, const char *message, ...); -+static const char *nonnull (const char *s); -+static const char *nonempty (const char *s); - void lt_setenv (const char *name, const char *value); - char *lt_extend_str (const char *orig_value, const char *add, int to_end); - void lt_update_exe_path (const char *name, const char *value); -@@ -3497,14 +4301,14 @@ void lt_dump_script (FILE *f); - EOF - - cat <"))); -+ lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n", -+ nonnull (lt_argv_zero)); - for (i = 0; i < newargc; i++) - { -- LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); -+ lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n", -+ i, nonnull (newargz[i])); - } - - EOF -@@ -3706,7 +4529,9 @@ EOF - if (rval == -1) - { - /* failed to start process */ -- LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); -+ lt_debugprintf (__FILE__, __LINE__, -+ "(main) failed to launch target \"%s\": %s\n", -+ lt_argv_zero, nonnull (strerror (errno))); - return 127; - } - return rval; -@@ -3728,7 +4553,7 @@ xmalloc (size_t num) - { - void *p = (void *) malloc (num); - if (!p) -- lt_fatal ("Memory exhausted"); -+ lt_fatal (__FILE__, __LINE__, "memory exhausted"); - - return p; - } -@@ -3762,8 +4587,8 @@ check_executable (const char *path) - { - struct stat st; - -- LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", -- path ? (*path ? path : "EMPTY!") : "NULL!")); -+ lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n", -+ nonempty (path)); - if ((!path) || (!*path)) - return 0; - -@@ -3780,8 +4605,8 @@ make_executable (const char *path) - int rval = 0; - struct stat st; - -- LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", -- path ? (*path ? path : "EMPTY!") : "NULL!")); -+ lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", -+ nonempty (path)); - if ((!path) || (!*path)) - return 0; - -@@ -3807,8 +4632,8 @@ find_executable (const char *wrapper) - int tmp_len; - char *concat_name; - -- LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", -- wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); -+ lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", -+ nonempty (wrapper)); - - if ((wrapper == NULL) || (*wrapper == '\0')) - return NULL; -@@ -3861,7 +4686,8 @@ find_executable (const char *wrapper) - { - /* empty path: current directory */ - if (getcwd (tmp, LT_PATHMAX) == NULL) -- lt_fatal ("getcwd failed"); -+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", -+ nonnull (strerror (errno))); - tmp_len = strlen (tmp); - concat_name = - XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); -@@ -3886,7 +4712,8 @@ find_executable (const char *wrapper) - } - /* Relative path | not found in path: prepend cwd */ - if (getcwd (tmp, LT_PATHMAX) == NULL) -- lt_fatal ("getcwd failed"); -+ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", -+ nonnull (strerror (errno))); - tmp_len = strlen (tmp); - concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); - memcpy (concat_name, tmp, tmp_len); -@@ -3912,8 +4739,9 @@ chase_symlinks (const char *pathspec) - int has_symlinks = 0; - while (strlen (tmp_pathspec) && !has_symlinks) - { -- LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", -- tmp_pathspec)); -+ lt_debugprintf (__FILE__, __LINE__, -+ "checking path component for symlinks: %s\n", -+ tmp_pathspec); - if (lstat (tmp_pathspec, &s) == 0) - { - if (S_ISLNK (s.st_mode) != 0) -@@ -3935,8 +4763,9 @@ chase_symlinks (const char *pathspec) - } - else - { -- char *errstr = strerror (errno); -- lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); -+ lt_fatal (__FILE__, __LINE__, -+ "error accessing file \"%s\": %s", -+ tmp_pathspec, nonnull (strerror (errno))); - } - } - XFREE (tmp_pathspec); -@@ -3949,7 +4778,8 @@ chase_symlinks (const char *pathspec) - tmp_pathspec = realpath (pathspec, buf); - if (tmp_pathspec == 0) - { -- lt_fatal ("Could not follow symlinks for %s", pathspec); -+ lt_fatal (__FILE__, __LINE__, -+ "could not follow symlinks for %s", pathspec); - } - return xstrdup (tmp_pathspec); - #endif -@@ -3975,11 +4805,25 @@ strendzap (char *str, const char *pat) - return str; - } - -+void -+lt_debugprintf (const char *file, int line, const char *fmt, ...) -+{ -+ va_list args; -+ if (lt_debug) -+ { -+ (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); -+ va_start (args, fmt); -+ (void) vfprintf (stderr, fmt, args); -+ va_end (args); -+ } -+} -+ - static void --lt_error_core (int exit_status, const char *mode, -+lt_error_core (int exit_status, const char *file, -+ int line, const char *mode, - const char *message, va_list ap) - { -- fprintf (stderr, "%s: %s: ", program_name, mode); -+ fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); - vfprintf (stderr, message, ap); - fprintf (stderr, ".\n"); - -@@ -3988,20 +4832,32 @@ lt_error_core (int exit_status, const char *mode, - } - - void --lt_fatal (const char *message, ...) -+lt_fatal (const char *file, int line, const char *message, ...) - { - va_list ap; - va_start (ap, message); -- lt_error_core (EXIT_FAILURE, "FATAL", message, ap); -+ lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); - va_end (ap); - } - -+static const char * -+nonnull (const char *s) -+{ -+ return s ? s : "(null)"; -+} -+ -+static const char * -+nonempty (const char *s) -+{ -+ return (s && !*s) ? "(empty)" : nonnull (s); -+} -+ - void - lt_setenv (const char *name, const char *value) - { -- LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", -- (name ? name : ""), -- (value ? value : ""))); -+ lt_debugprintf (__FILE__, __LINE__, -+ "(lt_setenv) setting '%s' to '%s'\n", -+ nonnull (name), nonnull (value)); - { - #ifdef HAVE_SETENV - /* always make a copy, for consistency with !HAVE_SETENV */ -@@ -4049,9 +4905,9 @@ lt_extend_str (const char *orig_value, const char *add, int to_end) - void - lt_update_exe_path (const char *name, const char *value) - { -- LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", -- (name ? name : ""), -- (value ? value : ""))); -+ lt_debugprintf (__FILE__, __LINE__, -+ "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", -+ nonnull (name), nonnull (value)); - - if (name && *name && value && *value) - { -@@ -4070,9 +4926,9 @@ lt_update_exe_path (const char *name, const char *value) - void - lt_update_lib_path (const char *name, const char *value) - { -- LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", -- (name ? name : ""), -- (value ? value : ""))); -+ lt_debugprintf (__FILE__, __LINE__, -+ "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", -+ nonnull (name), nonnull (value)); - - if (name && *name && value && *value) - { -@@ -4222,7 +5078,7 @@ EOF - func_win32_import_lib_p () - { - $opt_debug -- case `eval "$file_magic_cmd \"\$1\" 2>/dev/null" | $SED -e 10q` in -+ case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in - *import*) : ;; - *) false ;; - esac -@@ -4401,9 +5257,9 @@ func_mode_link () - ;; - *) - if test "$prev" = dlfiles; then -- dlfiles="$dlfiles $arg" -+ func_append dlfiles " $arg" - else -- dlprefiles="$dlprefiles $arg" -+ func_append dlprefiles " $arg" - fi - prev= - continue -@@ -4427,7 +5283,7 @@ func_mode_link () - *-*-darwin*) - case "$deplibs " in - *" $qarg.ltframework "*) ;; -- *) deplibs="$deplibs $qarg.ltframework" # this is fixed later -+ *) func_append deplibs " $qarg.ltframework" # this is fixed later - ;; - esac - ;; -@@ -4446,7 +5302,7 @@ func_mode_link () - moreargs= - for fil in `cat "$save_arg"` - do --# moreargs="$moreargs $fil" -+# func_append moreargs " $fil" - arg=$fil - # A libtool-controlled object. - -@@ -4475,7 +5331,7 @@ func_mode_link () - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then -- dlfiles="$dlfiles $pic_object" -+ func_append dlfiles " $pic_object" - prev= - continue - else -@@ -4487,7 +5343,7 @@ func_mode_link () - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. -- dlprefiles="$dlprefiles $pic_object" -+ func_append dlprefiles " $pic_object" - prev= - fi - -@@ -4557,12 +5413,12 @@ func_mode_link () - if test "$prev" = rpath; then - case "$rpath " in - *" $arg "*) ;; -- *) rpath="$rpath $arg" ;; -+ *) func_append rpath " $arg" ;; - esac - else - case "$xrpath " in - *" $arg "*) ;; -- *) xrpath="$xrpath $arg" ;; -+ *) func_append xrpath " $arg" ;; - esac - fi - prev= -@@ -4574,28 +5430,28 @@ func_mode_link () - continue - ;; - weak) -- weak_libs="$weak_libs $arg" -+ func_append weak_libs " $arg" - prev= - continue - ;; - xcclinker) -- linker_flags="$linker_flags $qarg" -- compiler_flags="$compiler_flags $qarg" -+ func_append linker_flags " $qarg" -+ func_append compiler_flags " $qarg" - prev= - func_append compile_command " $qarg" - func_append finalize_command " $qarg" - continue - ;; - xcompiler) -- compiler_flags="$compiler_flags $qarg" -+ func_append compiler_flags " $qarg" - prev= - func_append compile_command " $qarg" - func_append finalize_command " $qarg" - continue - ;; - xlinker) -- linker_flags="$linker_flags $qarg" -- compiler_flags="$compiler_flags $wl$qarg" -+ func_append linker_flags " $qarg" -+ func_append compiler_flags " $wl$qarg" - prev= - func_append compile_command " $wl$qarg" - func_append finalize_command " $wl$qarg" -@@ -4686,15 +5542,16 @@ func_mode_link () - ;; - - -L*) -- func_stripname '-L' '' "$arg" -- dir=$func_stripname_result -- if test -z "$dir"; then -+ func_stripname "-L" '' "$arg" -+ if test -z "$func_stripname_result"; then - if test "$#" -gt 0; then - func_fatal_error "require no space between \`-L' and \`$1'" - else - func_fatal_error "need path for \`-L' option" - fi - fi -+ func_resolve_sysroot "$func_stripname_result" -+ dir=$func_resolve_sysroot_result - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; -@@ -4706,10 +5563,16 @@ func_mode_link () - ;; - esac - case "$deplibs " in -- *" -L$dir "*) ;; -+ *" -L$dir "* | *" $arg "*) -+ # Will only happen for absolute or sysroot arguments -+ ;; - *) -- deplibs="$deplibs -L$dir" -- lib_search_path="$lib_search_path $dir" -+ # Preserve sysroot, but never include relative directories -+ case $dir in -+ [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; -+ *) func_append deplibs " -L$dir" ;; -+ esac -+ func_append lib_search_path " $dir" - ;; - esac - case $host in -@@ -4718,12 +5581,12 @@ func_mode_link () - case :$dllsearchpath: in - *":$dir:"*) ;; - ::) dllsearchpath=$dir;; -- *) dllsearchpath="$dllsearchpath:$dir";; -+ *) func_append dllsearchpath ":$dir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - ::) dllsearchpath=$testbindir;; -- *) dllsearchpath="$dllsearchpath:$testbindir";; -+ *) func_append dllsearchpath ":$testbindir";; - esac - ;; - esac -@@ -4747,7 +5610,7 @@ func_mode_link () - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C and math libraries are in the System framework -- deplibs="$deplibs System.ltframework" -+ func_append deplibs " System.ltframework" - continue - ;; - *-*-sco3.2v5* | *-*-sco5v6*) -@@ -4758,9 +5621,6 @@ func_mode_link () - # Compiler inserts libc in the correct place for threads to work - test "X$arg" = "X-lc" && continue - ;; -- *-*-linux*) -- test "X$arg" = "X-lc" && continue -- ;; - esac - elif test "X$arg" = "X-lc_r"; then - case $host in -@@ -4770,7 +5630,7 @@ func_mode_link () - ;; - esac - fi -- deplibs="$deplibs $arg" -+ func_append deplibs " $arg" - continue - ;; - -@@ -4782,8 +5642,8 @@ func_mode_link () - # Tru64 UNIX uses -model [arg] to determine the layout of C++ - # classes, name mangling, and exception handling. - # Darwin uses the -arch flag to determine output architecture. -- -model|-arch|-isysroot) -- compiler_flags="$compiler_flags $arg" -+ -model|-arch|-isysroot|--sysroot) -+ func_append compiler_flags " $arg" - func_append compile_command " $arg" - func_append finalize_command " $arg" - prev=xcompiler -@@ -4791,12 +5651,12 @@ func_mode_link () - ;; - - -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) -- compiler_flags="$compiler_flags $arg" -+ func_append compiler_flags " $arg" - func_append compile_command " $arg" - func_append finalize_command " $arg" - case "$new_inherited_linker_flags " in - *" $arg "*) ;; -- * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; -+ * ) func_append new_inherited_linker_flags " $arg" ;; - esac - continue - ;; -@@ -4863,13 +5723,17 @@ func_mode_link () - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) ;; -+ =*) -+ func_stripname '=' '' "$dir" -+ dir=$lt_sysroot$func_stripname_result -+ ;; - *) - func_fatal_error "only absolute run-paths are allowed" - ;; - esac - case "$xrpath " in - *" $dir "*) ;; -- *) xrpath="$xrpath $dir" ;; -+ *) func_append xrpath " $dir" ;; - esac - continue - ;; -@@ -4922,8 +5786,8 @@ func_mode_link () - for flag in $args; do - IFS="$save_ifs" - func_quote_for_eval "$flag" -- arg="$arg $func_quote_for_eval_result" -- compiler_flags="$compiler_flags $func_quote_for_eval_result" -+ func_append arg " $func_quote_for_eval_result" -+ func_append compiler_flags " $func_quote_for_eval_result" - done - IFS="$save_ifs" - func_stripname ' ' '' "$arg" -@@ -4938,9 +5802,9 @@ func_mode_link () - for flag in $args; do - IFS="$save_ifs" - func_quote_for_eval "$flag" -- arg="$arg $wl$func_quote_for_eval_result" -- compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" -- linker_flags="$linker_flags $func_quote_for_eval_result" -+ func_append arg " $wl$func_quote_for_eval_result" -+ func_append compiler_flags " $wl$func_quote_for_eval_result" -+ func_append linker_flags " $func_quote_for_eval_result" - done - IFS="$save_ifs" - func_stripname ' ' '' "$arg" -@@ -4968,24 +5832,27 @@ func_mode_link () - arg="$func_quote_for_eval_result" - ;; - -- # -64, -mips[0-9] enable 64-bit mode on the SGI compiler -- # -r[0-9][0-9]* specifies the processor on the SGI compiler -- # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler -- # +DA*, +DD* enable 64-bit mode on the HP compiler -- # -q* pass through compiler args for the IBM compiler -- # -m*, -t[45]*, -txscale* pass through architecture-specific -- # compiler args for GCC -- # -F/path gives path to uninstalled frameworks, gcc on darwin -- # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC -- # @file GCC response files -- # -tp=* Portland pgcc target processor selection -+ # Flags to be passed through unchanged, with rationale: -+ # -64, -mips[0-9] enable 64-bit mode for the SGI compiler -+ # -r[0-9][0-9]* specify processor for the SGI compiler -+ # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler -+ # +DA*, +DD* enable 64-bit mode for the HP compiler -+ # -q* compiler args for the IBM compiler -+ # -m*, -t[45]*, -txscale* architecture-specific flags for GCC -+ # -F/path path to uninstalled frameworks, gcc on darwin -+ # -p, -pg, --coverage, -fprofile-* profiling flags for GCC -+ # @file GCC response files -+ # -tp=* Portland pgcc target processor selection -+ # --sysroot=* for sysroot support -+ # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization - -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -- -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*) -+ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -+ -O*|-flto*|-fwhopr*|-fuse-linker-plugin) - func_quote_for_eval "$arg" - arg="$func_quote_for_eval_result" - func_append compile_command " $arg" - func_append finalize_command " $arg" -- compiler_flags="$compiler_flags $arg" -+ func_append compiler_flags " $arg" - continue - ;; - -@@ -4997,7 +5864,7 @@ func_mode_link () - - *.$objext) - # A standard object. -- objs="$objs $arg" -+ func_append objs " $arg" - ;; - - *.lo) -@@ -5028,7 +5895,7 @@ func_mode_link () - - if test "$prev" = dlfiles; then - if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then -- dlfiles="$dlfiles $pic_object" -+ func_append dlfiles " $pic_object" - prev= - continue - else -@@ -5040,7 +5907,7 @@ func_mode_link () - # CHECK ME: I think I busted this. -Ossama - if test "$prev" = dlprefiles; then - # Preload the old-style object. -- dlprefiles="$dlprefiles $pic_object" -+ func_append dlprefiles " $pic_object" - prev= - fi - -@@ -5085,24 +5952,25 @@ func_mode_link () - - *.$libext) - # An archive. -- deplibs="$deplibs $arg" -- old_deplibs="$old_deplibs $arg" -+ func_append deplibs " $arg" -+ func_append old_deplibs " $arg" - continue - ;; - - *.la) - # A libtool-controlled library. - -+ func_resolve_sysroot "$arg" - if test "$prev" = dlfiles; then - # This library was specified with -dlopen. -- dlfiles="$dlfiles $arg" -+ func_append dlfiles " $func_resolve_sysroot_result" - prev= - elif test "$prev" = dlprefiles; then - # The library was specified with -dlpreopen. -- dlprefiles="$dlprefiles $arg" -+ func_append dlprefiles " $func_resolve_sysroot_result" - prev= - else -- deplibs="$deplibs $arg" -+ func_append deplibs " $func_resolve_sysroot_result" - fi - continue - ;; -@@ -5127,7 +5995,7 @@ func_mode_link () - func_fatal_help "the \`$prevarg' option requires an argument" - - if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then -- eval "arg=\"$export_dynamic_flag_spec\"" -+ eval arg=\"$export_dynamic_flag_spec\" - func_append compile_command " $arg" - func_append finalize_command " $arg" - fi -@@ -5144,11 +6012,13 @@ func_mode_link () - else - shlib_search_path= - fi -- eval "sys_lib_search_path=\"$sys_lib_search_path_spec\"" -- eval "sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"" -+ eval sys_lib_search_path=\"$sys_lib_search_path_spec\" -+ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" - - func_dirname "$output" "/" "" - output_objdir="$func_dirname_result$objdir" -+ func_to_tool_file "$output_objdir/" -+ tool_output_objdir=$func_to_tool_file_result - # Create the object directory. - func_mkdir_p "$output_objdir" - -@@ -5169,12 +6039,12 @@ func_mode_link () - # Find all interdependent deplibs by searching for libraries - # that are linked more than once (e.g. -la -lb -la) - for deplib in $deplibs; do -- if $opt_duplicate_deps ; then -+ if $opt_preserve_dup_deps ; then - case "$libs " in -- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; -+ *" $deplib "*) func_append specialdeplibs " $deplib" ;; - esac - fi -- libs="$libs $deplib" -+ func_append libs " $deplib" - done - - if test "$linkmode" = lib; then -@@ -5187,9 +6057,9 @@ func_mode_link () - if $opt_duplicate_compiler_generated_deps; then - for pre_post_dep in $predeps $postdeps; do - case "$pre_post_deps " in -- *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; -+ *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; - esac -- pre_post_deps="$pre_post_deps $pre_post_dep" -+ func_append pre_post_deps " $pre_post_dep" - done - fi - pre_post_deps= -@@ -5256,8 +6126,9 @@ func_mode_link () - for lib in $dlprefiles; do - # Ignore non-libtool-libs - dependency_libs= -+ func_resolve_sysroot "$lib" - case $lib in -- *.la) func_source "$lib" ;; -+ *.la) func_source "$func_resolve_sysroot_result" ;; - esac - - # Collect preopened libtool deplibs, except any this library -@@ -5267,7 +6138,7 @@ func_mode_link () - deplib_base=$func_basename_result - case " $weak_libs " in - *" $deplib_base "*) ;; -- *) deplibs="$deplibs $deplib" ;; -+ *) func_append deplibs " $deplib" ;; - esac - done - done -@@ -5288,11 +6159,11 @@ func_mode_link () - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else -- compiler_flags="$compiler_flags $deplib" -+ func_append compiler_flags " $deplib" - if test "$linkmode" = lib ; then - case "$new_inherited_linker_flags " in - *" $deplib "*) ;; -- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; -+ * ) func_append new_inherited_linker_flags " $deplib" ;; - esac - fi - fi -@@ -5377,7 +6248,7 @@ func_mode_link () - if test "$linkmode" = lib ; then - case "$new_inherited_linker_flags " in - *" $deplib "*) ;; -- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; -+ * ) func_append new_inherited_linker_flags " $deplib" ;; - esac - fi - fi -@@ -5390,7 +6261,8 @@ func_mode_link () - test "$pass" = conv && continue - newdependency_libs="$deplib $newdependency_libs" - func_stripname '-L' '' "$deplib" -- newlib_search_path="$newlib_search_path $func_stripname_result" -+ func_resolve_sysroot "$func_stripname_result" -+ func_append newlib_search_path " $func_resolve_sysroot_result" - ;; - prog) - if test "$pass" = conv; then -@@ -5404,7 +6276,8 @@ func_mode_link () - finalize_deplibs="$deplib $finalize_deplibs" - fi - func_stripname '-L' '' "$deplib" -- newlib_search_path="$newlib_search_path $func_stripname_result" -+ func_resolve_sysroot "$func_stripname_result" -+ func_append newlib_search_path " $func_resolve_sysroot_result" - ;; - *) - func_warning "\`-L' is ignored for archives/objects" -@@ -5415,17 +6288,21 @@ func_mode_link () - -R*) - if test "$pass" = link; then - func_stripname '-R' '' "$deplib" -- dir=$func_stripname_result -+ func_resolve_sysroot "$func_stripname_result" -+ dir=$func_resolve_sysroot_result - # Make sure the xrpath contains only unique directories. - case "$xrpath " in - *" $dir "*) ;; -- *) xrpath="$xrpath $dir" ;; -+ *) func_append xrpath " $dir" ;; - esac - fi - deplibs="$deplib $deplibs" - continue - ;; -- *.la) lib="$deplib" ;; -+ *.la) -+ func_resolve_sysroot "$deplib" -+ lib=$func_resolve_sysroot_result -+ ;; - *.$libext) - if test "$pass" = conv; then - deplibs="$deplib $deplibs" -@@ -5488,11 +6365,11 @@ func_mode_link () - if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then - # If there is no dlopen support or we're linking statically, - # we need to preload. -- newdlprefiles="$newdlprefiles $deplib" -+ func_append newdlprefiles " $deplib" - compile_deplibs="$deplib $compile_deplibs" - finalize_deplibs="$deplib $finalize_deplibs" - else -- newdlfiles="$newdlfiles $deplib" -+ func_append newdlfiles " $deplib" - fi - fi - continue -@@ -5538,7 +6415,7 @@ func_mode_link () - for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do - case " $new_inherited_linker_flags " in - *" $tmp_inherited_linker_flag "*) ;; -- *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; -+ *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; - esac - done - fi -@@ -5546,8 +6423,8 @@ func_mode_link () - if test "$linkmode,$pass" = "lib,link" || - test "$linkmode,$pass" = "prog,scan" || - { test "$linkmode" != prog && test "$linkmode" != lib; }; then -- test -n "$dlopen" && dlfiles="$dlfiles $dlopen" -- test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" -+ test -n "$dlopen" && func_append dlfiles " $dlopen" -+ test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" - fi - - if test "$pass" = conv; then -@@ -5558,20 +6435,20 @@ func_mode_link () - func_fatal_error "cannot find name of link library for \`$lib'" - fi - # It is a libtool convenience library, so add in its objects. -- convenience="$convenience $ladir/$objdir/$old_library" -- old_convenience="$old_convenience $ladir/$objdir/$old_library" -+ func_append convenience " $ladir/$objdir/$old_library" -+ func_append old_convenience " $ladir/$objdir/$old_library" - elif test "$linkmode" != prog && test "$linkmode" != lib; then - func_fatal_error "\`$lib' is not a convenience library" - fi - tmp_libs= - for deplib in $dependency_libs; do - deplibs="$deplib $deplibs" -- if $opt_duplicate_deps ; then -+ if $opt_preserve_dup_deps ; then - case "$tmp_libs " in -- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; -+ *" $deplib "*) func_append specialdeplibs " $deplib" ;; - esac - fi -- tmp_libs="$tmp_libs $deplib" -+ func_append tmp_libs " $deplib" - done - continue - fi # $pass = conv -@@ -5579,9 +6456,15 @@ func_mode_link () - - # Get the name of the library we link against. - linklib= -- for l in $old_library $library_names; do -- linklib="$l" -- done -+ if test -n "$old_library" && -+ { test "$prefer_static_libs" = yes || -+ test "$prefer_static_libs,$installed" = "built,no"; }; then -+ linklib=$old_library -+ else -+ for l in $old_library $library_names; do -+ linklib="$l" -+ done -+ fi - if test -z "$linklib"; then - func_fatal_error "cannot find name of link library for \`$lib'" - fi -@@ -5598,9 +6481,9 @@ func_mode_link () - # statically, we need to preload. We also need to preload any - # dependent libraries so libltdl's deplib preloader doesn't - # bomb out in the load deplibs phase. -- dlprefiles="$dlprefiles $lib $dependency_libs" -+ func_append dlprefiles " $lib $dependency_libs" - else -- newdlfiles="$newdlfiles $lib" -+ func_append newdlfiles " $lib" - fi - continue - fi # $pass = dlopen -@@ -5622,14 +6505,14 @@ func_mode_link () - - # Find the relevant object directory and library name. - if test "X$installed" = Xyes; then -- if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then -+ if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then - func_warning "library \`$lib' was moved." - dir="$ladir" - absdir="$abs_ladir" - libdir="$abs_ladir" - else -- dir="$libdir" -- absdir="$libdir" -+ dir="$lt_sysroot$libdir" -+ absdir="$lt_sysroot$libdir" - fi - test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes - else -@@ -5637,12 +6520,12 @@ func_mode_link () - dir="$ladir" - absdir="$abs_ladir" - # Remove this search path later -- notinst_path="$notinst_path $abs_ladir" -+ func_append notinst_path " $abs_ladir" - else - dir="$ladir/$objdir" - absdir="$abs_ladir/$objdir" - # Remove this search path later -- notinst_path="$notinst_path $abs_ladir" -+ func_append notinst_path " $abs_ladir" - fi - fi # $installed = yes - func_stripname 'lib' '.la' "$laname" -@@ -5653,20 +6536,46 @@ func_mode_link () - if test -z "$libdir" && test "$linkmode" = prog; then - func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" - fi -- # Prefer using a static library (so that no silly _DYNAMIC symbols -- # are required to link). -- if test -n "$old_library"; then -- newdlprefiles="$newdlprefiles $dir/$old_library" -- # Keep a list of preopened convenience libraries to check -- # that they are being used correctly in the link pass. -- test -z "$libdir" && \ -- dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" -- # Otherwise, use the dlname, so that lt_dlopen finds it. -- elif test -n "$dlname"; then -- newdlprefiles="$newdlprefiles $dir/$dlname" -- else -- newdlprefiles="$newdlprefiles $dir/$linklib" -- fi -+ case "$host" in -+ # special handling for platforms with PE-DLLs. -+ *cygwin* | *mingw* | *cegcc* ) -+ # Linker will automatically link against shared library if both -+ # static and shared are present. Therefore, ensure we extract -+ # symbols from the import library if a shared library is present -+ # (otherwise, the dlopen module name will be incorrect). We do -+ # this by putting the import library name into $newdlprefiles. -+ # We recover the dlopen module name by 'saving' the la file -+ # name in a special purpose variable, and (later) extracting the -+ # dlname from the la file. -+ if test -n "$dlname"; then -+ func_tr_sh "$dir/$linklib" -+ eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" -+ func_append newdlprefiles " $dir/$linklib" -+ else -+ func_append newdlprefiles " $dir/$old_library" -+ # Keep a list of preopened convenience libraries to check -+ # that they are being used correctly in the link pass. -+ test -z "$libdir" && \ -+ func_append dlpreconveniencelibs " $dir/$old_library" -+ fi -+ ;; -+ * ) -+ # Prefer using a static library (so that no silly _DYNAMIC symbols -+ # are required to link). -+ if test -n "$old_library"; then -+ func_append newdlprefiles " $dir/$old_library" -+ # Keep a list of preopened convenience libraries to check -+ # that they are being used correctly in the link pass. -+ test -z "$libdir" && \ -+ func_append dlpreconveniencelibs " $dir/$old_library" -+ # Otherwise, use the dlname, so that lt_dlopen finds it. -+ elif test -n "$dlname"; then -+ func_append newdlprefiles " $dir/$dlname" -+ else -+ func_append newdlprefiles " $dir/$linklib" -+ fi -+ ;; -+ esac - fi # $pass = dlpreopen - - if test -z "$libdir"; then -@@ -5684,7 +6593,7 @@ func_mode_link () - - - if test "$linkmode" = prog && test "$pass" != link; then -- newlib_search_path="$newlib_search_path $ladir" -+ func_append newlib_search_path " $ladir" - deplibs="$lib $deplibs" - - linkalldeplibs=no -@@ -5697,7 +6606,8 @@ func_mode_link () - for deplib in $dependency_libs; do - case $deplib in - -L*) func_stripname '-L' '' "$deplib" -- newlib_search_path="$newlib_search_path $func_stripname_result" -+ func_resolve_sysroot "$func_stripname_result" -+ func_append newlib_search_path " $func_resolve_sysroot_result" - ;; - esac - # Need to link against all dependency_libs? -@@ -5708,12 +6618,12 @@ func_mode_link () - # or/and link against static libraries - newdependency_libs="$deplib $newdependency_libs" - fi -- if $opt_duplicate_deps ; then -+ if $opt_preserve_dup_deps ; then - case "$tmp_libs " in -- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; -+ *" $deplib "*) func_append specialdeplibs " $deplib" ;; - esac - fi -- tmp_libs="$tmp_libs $deplib" -+ func_append tmp_libs " $deplib" - done # for deplib - continue - fi # $linkmode = prog... -@@ -5728,7 +6638,7 @@ func_mode_link () - # Make sure the rpath contains only unique directories. - case "$temp_rpath:" in - *"$absdir:"*) ;; -- *) temp_rpath="$temp_rpath$absdir:" ;; -+ *) func_append temp_rpath "$absdir:" ;; - esac - fi - -@@ -5740,7 +6650,7 @@ func_mode_link () - *) - case "$compile_rpath " in - *" $absdir "*) ;; -- *) compile_rpath="$compile_rpath $absdir" -+ *) func_append compile_rpath " $absdir" ;; - esac - ;; - esac -@@ -5749,7 +6659,7 @@ func_mode_link () - *) - case "$finalize_rpath " in - *" $libdir "*) ;; -- *) finalize_rpath="$finalize_rpath $libdir" -+ *) func_append finalize_rpath " $libdir" ;; - esac - ;; - esac -@@ -5774,12 +6684,12 @@ func_mode_link () - case $host in - *cygwin* | *mingw* | *cegcc*) - # No point in relinking DLLs because paths are not encoded -- notinst_deplibs="$notinst_deplibs $lib" -+ func_append notinst_deplibs " $lib" - need_relink=no - ;; - *) - if test "$installed" = no; then -- notinst_deplibs="$notinst_deplibs $lib" -+ func_append notinst_deplibs " $lib" - need_relink=yes - fi - ;; -@@ -5814,7 +6724,7 @@ func_mode_link () - *) - case "$compile_rpath " in - *" $absdir "*) ;; -- *) compile_rpath="$compile_rpath $absdir" -+ *) func_append compile_rpath " $absdir" ;; - esac - ;; - esac -@@ -5823,7 +6733,7 @@ func_mode_link () - *) - case "$finalize_rpath " in - *" $libdir "*) ;; -- *) finalize_rpath="$finalize_rpath $libdir" -+ *) func_append finalize_rpath " $libdir" ;; - esac - ;; - esac -@@ -5835,7 +6745,7 @@ func_mode_link () - shift - realname="$1" - shift -- eval "libname=\"$libname_spec\"" -+ libname=`eval "\\$ECHO \"$libname_spec\""` - # use dlname if we got it. it's perfectly good, no? - if test -n "$dlname"; then - soname="$dlname" -@@ -5848,7 +6758,7 @@ func_mode_link () - versuffix="-$major" - ;; - esac -- eval "soname=\"$soname_spec\"" -+ eval soname=\"$soname_spec\" - else - soname="$realname" - fi -@@ -5877,7 +6787,7 @@ func_mode_link () - linklib=$newlib - fi # test -n "$old_archive_from_expsyms_cmds" - -- if test "$linkmode" = prog || test "$mode" != relink; then -+ if test "$linkmode" = prog || test "$opt_mode" != relink; then - add_shlibpath= - add_dir= - add= -@@ -5933,7 +6843,7 @@ func_mode_link () - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) -- add_dir="$add_dir -L$inst_prefix_dir$libdir" -+ func_append add_dir " -L$inst_prefix_dir$libdir" - ;; - esac - fi -@@ -5955,7 +6865,7 @@ func_mode_link () - if test -n "$add_shlibpath"; then - case :$compile_shlibpath: in - *":$add_shlibpath:"*) ;; -- *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; -+ *) func_append compile_shlibpath "$add_shlibpath:" ;; - esac - fi - if test "$linkmode" = prog; then -@@ -5969,13 +6879,13 @@ func_mode_link () - test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; -- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; -+ *) func_append finalize_shlibpath "$libdir:" ;; - esac - fi - fi - fi - -- if test "$linkmode" = prog || test "$mode" = relink; then -+ if test "$linkmode" = prog || test "$opt_mode" = relink; then - add_shlibpath= - add_dir= - add= -@@ -5989,7 +6899,7 @@ func_mode_link () - elif test "$hardcode_shlibpath_var" = yes; then - case :$finalize_shlibpath: in - *":$libdir:"*) ;; -- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; -+ *) func_append finalize_shlibpath "$libdir:" ;; - esac - add="-l$name" - elif test "$hardcode_automatic" = yes; then -@@ -6001,12 +6911,12 @@ func_mode_link () - fi - else - # We cannot seem to hardcode it, guess we'll fake it. -- add_dir="-L$libdir" -+ add_dir="-L$lt_sysroot$libdir" - # Try looking first in the location we're being installed to. - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) -- add_dir="$add_dir -L$inst_prefix_dir$libdir" -+ func_append add_dir " -L$inst_prefix_dir$libdir" - ;; - esac - fi -@@ -6083,27 +6993,33 @@ func_mode_link () - temp_xrpath=$func_stripname_result - case " $xrpath " in - *" $temp_xrpath "*) ;; -- *) xrpath="$xrpath $temp_xrpath";; -+ *) func_append xrpath " $temp_xrpath";; - esac;; -- *) temp_deplibs="$temp_deplibs $libdir";; -+ *) func_append temp_deplibs " $libdir";; - esac - done - dependency_libs="$temp_deplibs" - fi - -- newlib_search_path="$newlib_search_path $absdir" -+ func_append newlib_search_path " $absdir" - # Link against this library - test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" - # ... and its dependency_libs - tmp_libs= - for deplib in $dependency_libs; do - newdependency_libs="$deplib $newdependency_libs" -- if $opt_duplicate_deps ; then -+ case $deplib in -+ -L*) func_stripname '-L' '' "$deplib" -+ func_resolve_sysroot "$func_stripname_result";; -+ *) func_resolve_sysroot "$deplib" ;; -+ esac -+ if $opt_preserve_dup_deps ; then - case "$tmp_libs " in -- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; -+ *" $func_resolve_sysroot_result "*) -+ func_append specialdeplibs " $func_resolve_sysroot_result" ;; - esac - fi -- tmp_libs="$tmp_libs $deplib" -+ func_append tmp_libs " $func_resolve_sysroot_result" - done - - if test "$link_all_deplibs" != no; then -@@ -6113,8 +7029,10 @@ func_mode_link () - case $deplib in - -L*) path="$deplib" ;; - *.la) -+ func_resolve_sysroot "$deplib" -+ deplib=$func_resolve_sysroot_result - func_dirname "$deplib" "" "." -- dir="$func_dirname_result" -+ dir=$func_dirname_result - # We need an absolute path. - case $dir in - [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; -@@ -6130,7 +7048,7 @@ func_mode_link () - case $host in - *-*-darwin*) - depdepl= -- deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` -+ eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` - if test -n "$deplibrary_names" ; then - for tmp in $deplibrary_names ; do - depdepl=$tmp -@@ -6141,8 +7059,8 @@ func_mode_link () - if test -z "$darwin_install_name"; then - darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` - fi -- compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" -- linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" -+ func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" -+ func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" - path= - fi - fi -@@ -6152,7 +7070,7 @@ func_mode_link () - ;; - esac - else -- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` -+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` - test -z "$libdir" && \ - func_fatal_error "\`$deplib' is not a valid libtool archive" - test "$absdir" != "$libdir" && \ -@@ -6192,7 +7110,7 @@ func_mode_link () - for dir in $newlib_search_path; do - case "$lib_search_path " in - *" $dir "*) ;; -- *) lib_search_path="$lib_search_path $dir" ;; -+ *) func_append lib_search_path " $dir" ;; - esac - done - newlib_search_path= -@@ -6205,7 +7123,7 @@ func_mode_link () - fi - for var in $vars dependency_libs; do - # Add libraries to $var in reverse order -- eval tmp_libs=\$$var -+ eval tmp_libs=\"\$$var\" - new_libs= - for deplib in $tmp_libs; do - # FIXME: Pedantically, this is the right thing to do, so -@@ -6250,13 +7168,13 @@ func_mode_link () - -L*) - case " $tmp_libs " in - *" $deplib "*) ;; -- *) tmp_libs="$tmp_libs $deplib" ;; -+ *) func_append tmp_libs " $deplib" ;; - esac - ;; -- *) tmp_libs="$tmp_libs $deplib" ;; -+ *) func_append tmp_libs " $deplib" ;; - esac - done -- eval $var=\$tmp_libs -+ eval $var=\"$tmp_libs\" - done # for var - fi - # Last step: remove runtime libs from dependency_libs -@@ -6269,7 +7187,7 @@ func_mode_link () - ;; - esac - if test -n "$i" ; then -- tmp_libs="$tmp_libs $i" -+ func_append tmp_libs " $i" - fi - done - dependency_libs=$tmp_libs -@@ -6310,7 +7228,7 @@ func_mode_link () - # Now set the variables for building old libraries. - build_libtool_libs=no - oldlibs="$output" -- objs="$objs$old_deplibs" -+ func_append objs "$old_deplibs" - ;; - - lib) -@@ -6319,8 +7237,8 @@ func_mode_link () - lib*) - func_stripname 'lib' '.la' "$outputname" - name=$func_stripname_result -- eval "shared_ext=\"$shrext_cmds\"" -- eval "libname=\"$libname_spec\"" -+ eval shared_ext=\"$shrext_cmds\" -+ eval libname=\"$libname_spec\" - ;; - *) - test "$module" = no && \ -@@ -6330,8 +7248,8 @@ func_mode_link () - # Add the "lib" prefix for modules if required - func_stripname '' '.la' "$outputname" - name=$func_stripname_result -- eval "shared_ext=\"$shrext_cmds\"" -- eval "libname=\"$libname_spec\"" -+ eval shared_ext=\"$shrext_cmds\" -+ eval libname=\"$libname_spec\" - else - func_stripname '' '.la' "$outputname" - libname=$func_stripname_result -@@ -6346,7 +7264,7 @@ func_mode_link () - echo - $ECHO "*** Warning: Linking the shared library $output against the non-libtool" - $ECHO "*** objects $objs is not portable!" -- libobjs="$libobjs $objs" -+ func_append libobjs " $objs" - fi - fi - -@@ -6544,7 +7462,7 @@ func_mode_link () - done - - # Make executables depend on our current version. -- verstring="$verstring:${current}.0" -+ func_append verstring ":${current}.0" - ;; - - qnx) -@@ -6612,10 +7530,10 @@ func_mode_link () - fi - - func_generate_dlsyms "$libname" "$libname" "yes" -- libobjs="$libobjs $symfileobj" -+ func_append libobjs " $symfileobj" - test "X$libobjs" = "X " && libobjs= - -- if test "$mode" != relink; then -+ if test "$opt_mode" != relink; then - # Remove our outputs, but don't remove object files since they - # may have been created when compiling PIC objects. - removelist= -@@ -6631,7 +7549,7 @@ func_mode_link () - continue - fi - fi -- removelist="$removelist $p" -+ func_append removelist " $p" - ;; - *) ;; - esac -@@ -6642,7 +7560,7 @@ func_mode_link () - - # Now set the variables for building old libraries. - if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then -- oldlibs="$oldlibs $output_objdir/$libname.$libext" -+ func_append oldlibs " $output_objdir/$libname.$libext" - - # Transform .lo files to .o files. - oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` -@@ -6659,10 +7577,11 @@ func_mode_link () - # If the user specified any rpath flags, then add them. - temp_xrpath= - for libdir in $xrpath; do -- temp_xrpath="$temp_xrpath -R$libdir" -+ func_replace_sysroot "$libdir" -+ func_append temp_xrpath " -R$func_replace_sysroot_result" - case "$finalize_rpath " in - *" $libdir "*) ;; -- *) finalize_rpath="$finalize_rpath $libdir" ;; -+ *) func_append finalize_rpath " $libdir" ;; - esac - done - if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then -@@ -6676,7 +7595,7 @@ func_mode_link () - for lib in $old_dlfiles; do - case " $dlprefiles $dlfiles " in - *" $lib "*) ;; -- *) dlfiles="$dlfiles $lib" ;; -+ *) func_append dlfiles " $lib" ;; - esac - done - -@@ -6686,7 +7605,7 @@ func_mode_link () - for lib in $old_dlprefiles; do - case "$dlprefiles " in - *" $lib "*) ;; -- *) dlprefiles="$dlprefiles $lib" ;; -+ *) func_append dlprefiles " $lib" ;; - esac - done - -@@ -6698,7 +7617,7 @@ func_mode_link () - ;; - *-*-rhapsody* | *-*-darwin1.[012]) - # Rhapsody C library is in the System framework -- deplibs="$deplibs System.ltframework" -+ func_append deplibs " System.ltframework" - ;; - *-*-netbsd*) - # Don't link with libc until the a.out ld.so is fixed. -@@ -6715,7 +7634,7 @@ func_mode_link () - *) - # Add libc to deplibs on all other systems if necessary. - if test "$build_libtool_need_lc" = "yes"; then -- deplibs="$deplibs -lc" -+ func_append deplibs " -lc" - fi - ;; - esac -@@ -6764,18 +7683,18 @@ EOF - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $i "*) -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - i="" - ;; - esac - fi - if test -n "$i" ; then -- eval "libname=\"$libname_spec\"" -- eval "deplib_matches=\"$library_names_spec\"" -+ libname=`eval "\\$ECHO \"$libname_spec\""` -+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""` - set dummy $deplib_matches; shift - deplib_match=$1 - if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - else - droppeddeps=yes - echo -@@ -6789,7 +7708,7 @@ EOF - fi - ;; - *) -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - ;; - esac - done -@@ -6807,18 +7726,18 @@ EOF - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $i "*) -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - i="" - ;; - esac - fi - if test -n "$i" ; then -- eval "libname=\"$libname_spec\"" -- eval "deplib_matches=\"$library_names_spec\"" -+ libname=`eval "\\$ECHO \"$libname_spec\""` -+ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""` - set dummy $deplib_matches; shift - deplib_match=$1 - if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - else - droppeddeps=yes - echo -@@ -6840,7 +7759,7 @@ EOF - fi - ;; - *) -- newdeplibs="$newdeplibs $i" -+ func_append newdeplibs " $i" - ;; - esac - done -@@ -6857,15 +7776,27 @@ EOF - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $a_deplib "*) -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - a_deplib="" - ;; - esac - fi - if test -n "$a_deplib" ; then -- eval "libname=\"$libname_spec\"" -+ libname=`eval "\\$ECHO \"$libname_spec\""` -+ if test -n "$file_magic_glob"; then -+ libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob` -+ else -+ libnameglob=$libname -+ fi -+ test "$want_nocaseglob" = yes && nocaseglob=`shopt -p nocaseglob` - for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do -- potential_libs=`ls $i/$libname[.-]* 2>/dev/null` -+ if test "$want_nocaseglob" = yes; then -+ shopt -s nocaseglob -+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` -+ $nocaseglob -+ else -+ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` -+ fi - for potent_lib in $potential_libs; do - # Follow soft links. - if ls -lLd "$potent_lib" 2>/dev/null | -@@ -6885,10 +7816,10 @@ EOF - *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; - esac - done -- if eval "$file_magic_cmd \"\$potlib\"" 2>/dev/null | -+ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | - $SED -e 10q | - $EGREP "$file_magic_regex" > /dev/null; then -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - a_deplib="" - break 2 - fi -@@ -6913,7 +7844,7 @@ EOF - ;; - *) - # Add a -L argument. -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - ;; - esac - done # Gone through all deplibs. -@@ -6929,20 +7860,20 @@ EOF - if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then - case " $predeps $postdeps " in - *" $a_deplib "*) -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - a_deplib="" - ;; - esac - fi - if test -n "$a_deplib" ; then -- eval "libname=\"$libname_spec\"" -+ libname=`eval "\\$ECHO \"$libname_spec\""` - for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do - potential_libs=`ls $i/$libname[.-]* 2>/dev/null` - for potent_lib in $potential_libs; do - potlib="$potent_lib" # see symlink-check above in file_magic test - if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ - $EGREP "$match_pattern_regex" > /dev/null; then -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - a_deplib="" - break 2 - fi -@@ -6967,7 +7898,7 @@ EOF - ;; - *) - # Add a -L argument. -- newdeplibs="$newdeplibs $a_deplib" -+ func_append newdeplibs " $a_deplib" - ;; - esac - done # Gone through all deplibs. -@@ -7071,7 +8002,7 @@ EOF - *) - case " $deplibs " in - *" -L$path/$objdir "*) -- new_libs="$new_libs -L$path/$objdir" ;; -+ func_append new_libs " -L$path/$objdir" ;; - esac - ;; - esac -@@ -7081,10 +8012,10 @@ EOF - -L*) - case " $new_libs " in - *" $deplib "*) ;; -- *) new_libs="$new_libs $deplib" ;; -+ *) func_append new_libs " $deplib" ;; - esac - ;; -- *) new_libs="$new_libs $deplib" ;; -+ *) func_append new_libs " $deplib" ;; - esac - done - deplibs="$new_libs" -@@ -7101,10 +8032,12 @@ EOF - hardcode_libdirs= - dep_rpath= - rpath="$finalize_rpath" -- test "$mode" != relink && rpath="$compile_rpath$rpath" -+ test "$opt_mode" != relink && rpath="$compile_rpath$rpath" - for libdir in $rpath; do - if test -n "$hardcode_libdir_flag_spec"; then - if test -n "$hardcode_libdir_separator"; then -+ func_replace_sysroot "$libdir" -+ libdir=$func_replace_sysroot_result - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else -@@ -7113,18 +8046,18 @@ EOF - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) -- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" -+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" - ;; - esac - fi - else -- eval "flag=\"$hardcode_libdir_flag_spec\"" -- dep_rpath="$dep_rpath $flag" -+ eval flag=\"$hardcode_libdir_flag_spec\" -+ func_append dep_rpath " $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; -- *) perm_rpath="$perm_rpath $libdir" ;; -+ *) func_apped perm_rpath " $libdir" ;; - esac - fi - done -@@ -7133,40 +8066,38 @@ EOF - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" - if test -n "$hardcode_libdir_flag_spec_ld"; then -- eval "dep_rpath=\"$hardcode_libdir_flag_spec_ld\"" -+ eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" - else -- eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" -+ eval dep_rpath=\"$hardcode_libdir_flag_spec\" - fi - fi - if test -n "$runpath_var" && test -n "$perm_rpath"; then - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do -- rpath="$rpath$dir:" -+ func_append rpath "$dir:" - done -- eval $runpath_var=\$rpath\$$runpath_var -- export $runpath_var -+ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" - fi - test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" - fi - - shlibpath="$finalize_shlibpath" -- test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" -+ test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" - if test -n "$shlibpath"; then -- eval $shlibpath_var=\$shlibpath\$$shlibpath_var -- export $shlibpath_var -+ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" - fi - - # Get the real and link names of the library. -- eval "shared_ext=\"$shrext_cmds\"" -- eval "library_names=\"$library_names_spec\"" -+ eval shared_ext=\"$shrext_cmds\" -+ eval library_names=\"$library_names_spec\" - set dummy $library_names - shift - realname="$1" - shift - - if test -n "$soname_spec"; then -- eval "soname=\"$soname_spec\"" -+ eval soname=\"$soname_spec\" - else - soname="$realname" - fi -@@ -7178,7 +8109,7 @@ EOF - linknames= - for link - do -- linknames="$linknames $link" -+ func_append linknames " $link" - done - - # Use standard objects if they are pic -@@ -7189,7 +8120,7 @@ EOF - if test -n "$export_symbols" && test -n "$include_expsyms"; then - $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" - export_symbols="$output_objdir/$libname.uexp" -- delfiles="$delfiles $export_symbols" -+ func_append delfiles " $export_symbols" - fi - - orig_export_symbols= -@@ -7220,13 +8151,45 @@ EOF - $opt_dry_run || $RM $export_symbols - cmds=$export_symbols_cmds - save_ifs="$IFS"; IFS='~' -- for cmd in $cmds; do -+ for cmd1 in $cmds; do - IFS="$save_ifs" -- eval "cmd=\"$cmd\"" -- func_len " $cmd" -- len=$func_len_result -- if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then -+ # Take the normal branch if the nm_file_list_spec branch -+ # doesn't work or if tool conversion is not needed. -+ case $nm_file_list_spec~$to_tool_file_cmd in -+ *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) -+ try_normal_branch=yes -+ eval cmd=\"$cmd1\" -+ func_len " $cmd" -+ len=$func_len_result -+ ;; -+ *) -+ try_normal_branch=no -+ ;; -+ esac -+ if test "$try_normal_branch" = yes \ -+ && { test "$len" -lt "$max_cmd_len" \ -+ || test "$max_cmd_len" -le -1; } -+ then -+ func_show_eval "$cmd" 'exit $?' -+ skipped_export=false -+ elif test -n "$nm_file_list_spec"; then -+ func_basename "$output" -+ output_la=$func_basename_result -+ save_libobjs=$libobjs -+ save_output=$output -+ output=${output_objdir}/${output_la}.nm -+ func_to_tool_file "$output" -+ libobjs=$nm_file_list_spec$func_to_tool_file_result -+ func_append delfiles " $output" -+ func_verbose "creating $NM input file list: $output" -+ for obj in $save_libobjs; do -+ func_to_tool_file "$obj" -+ $ECHO "$func_to_tool_file_result" -+ done > "$output" -+ eval cmd=\"$cmd1\" - func_show_eval "$cmd" 'exit $?' -+ output=$save_output -+ libobjs=$save_libobjs - skipped_export=false - else - # The command line is too long to execute in one step. -@@ -7248,7 +8211,7 @@ EOF - if test -n "$export_symbols" && test -n "$include_expsyms"; then - tmp_export_symbols="$export_symbols" - test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" -- $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" -+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' - fi - - if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then -@@ -7260,7 +8223,7 @@ EOF - # global variables. join(1) would be nice here, but unfortunately - # isn't a blessed tool. - $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter -- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" -+ func_append delfiles " $export_symbols $output_objdir/$libname.filter" - export_symbols=$output_objdir/$libname.def - $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols - fi -@@ -7270,7 +8233,7 @@ EOF - case " $convenience " in - *" $test_deplib "*) ;; - *) -- tmp_deplibs="$tmp_deplibs $test_deplib" -+ func_append tmp_deplibs " $test_deplib" - ;; - esac - done -@@ -7286,43 +8249,43 @@ EOF - fi - if test -n "$whole_archive_flag_spec"; then - save_libobjs=$libobjs -- eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" -+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\" - test "X$libobjs" = "X " && libobjs= - else - gentop="$output_objdir/${outputname}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - - func_extract_archives $gentop $convenience -- libobjs="$libobjs $func_extract_archives_result" -+ func_append libobjs " $func_extract_archives_result" - test "X$libobjs" = "X " && libobjs= - fi - fi - - if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then -- eval "flag=\"$thread_safe_flag_spec\"" -- linker_flags="$linker_flags $flag" -+ eval flag=\"$thread_safe_flag_spec\" -+ func_append linker_flags " $flag" - fi - - # Make a backup of the uninstalled library when relinking -- if test "$mode" = relink; then -- $opt_dry_run || (cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U) || exit $? -+ if test "$opt_mode" = relink; then -+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? - fi - - # Do each of the archive commands. - if test "$module" = yes && test -n "$module_cmds" ; then - if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then -- eval "test_cmds=\"$module_expsym_cmds\"" -+ eval test_cmds=\"$module_expsym_cmds\" - cmds=$module_expsym_cmds - else -- eval "test_cmds=\"$module_cmds\"" -+ eval test_cmds=\"$module_cmds\" - cmds=$module_cmds - fi - else - if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then -- eval "test_cmds=\"$archive_expsym_cmds\"" -+ eval test_cmds=\"$archive_expsym_cmds\" - cmds=$archive_expsym_cmds - else -- eval "test_cmds=\"$archive_cmds\"" -+ eval test_cmds=\"$archive_cmds\" - cmds=$archive_cmds - fi - fi -@@ -7366,10 +8329,13 @@ EOF - echo 'INPUT (' > $output - for obj in $save_libobjs - do -- $ECHO "$obj" >> $output -+ func_to_tool_file "$obj" -+ $ECHO "$func_to_tool_file_result" >> $output - done - echo ')' >> $output -- delfiles="$delfiles $output" -+ func_append delfiles " $output" -+ func_to_tool_file "$output" -+ output=$func_to_tool_file_result - elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then - output=${output_objdir}/${output_la}.lnk - func_verbose "creating linker input file list: $output" -@@ -7383,15 +8349,17 @@ EOF - fi - for obj - do -- $ECHO "$obj" >> $output -+ func_to_tool_file "$obj" -+ $ECHO "$func_to_tool_file_result" >> $output - done -- delfiles="$delfiles $output" -- output=$firstobj\"$file_list_spec$output\" -+ func_append delfiles " $output" -+ func_to_tool_file "$output" -+ output=$firstobj\"$file_list_spec$func_to_tool_file_result\" - else - if test -n "$save_libobjs"; then - func_verbose "creating reloadable object files..." - output=$output_objdir/$output_la-${k}.$objext -- eval "test_cmds=\"$reload_cmds\"" -+ eval test_cmds=\"$reload_cmds\" - func_len " $test_cmds" - len0=$func_len_result - len=$len0 -@@ -7411,12 +8379,12 @@ EOF - if test "$k" -eq 1 ; then - # The first file doesn't have a previous command to add. - reload_objs=$objlist -- eval "concat_cmds=\"$reload_cmds\"" -+ eval concat_cmds=\"$reload_cmds\" - else - # All subsequent reloadable object files will link in - # the last one created. - reload_objs="$objlist $last_robj" -- eval "concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"" -+ eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" - fi - last_robj=$output_objdir/$output_la-${k}.$objext - func_arith $k + 1 -@@ -7433,11 +8401,11 @@ EOF - # files will link in the last one created. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ - reload_objs="$objlist $last_robj" -- eval "concat_cmds=\"\${concat_cmds}$reload_cmds\"" -+ eval concat_cmds=\"\${concat_cmds}$reload_cmds\" - if test -n "$last_robj"; then -- eval "concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"" -+ eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" - fi -- delfiles="$delfiles $output" -+ func_append delfiles " $output" - - else - output= -@@ -7450,9 +8418,9 @@ EOF - libobjs=$output - # Append the command to create the export file. - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ -- eval "concat_cmds=\"\$concat_cmds$export_symbols_cmds\"" -+ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" - if test -n "$last_robj"; then -- eval "concat_cmds=\"\$concat_cmds~\$RM $last_robj\"" -+ eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" - fi - fi - -@@ -7471,7 +8439,7 @@ EOF - lt_exit=$? - - # Restore the uninstalled library and exit -- if test "$mode" = relink; then -+ if test "$opt_mode" = relink; then - ( cd "$output_objdir" && \ - $RM "${realname}T" && \ - $MV "${realname}U" "$realname" ) -@@ -7492,7 +8460,7 @@ EOF - if test -n "$export_symbols" && test -n "$include_expsyms"; then - tmp_export_symbols="$export_symbols" - test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" -- $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" -+ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' - fi - - if test -n "$orig_export_symbols"; then -@@ -7504,7 +8472,7 @@ EOF - # global variables. join(1) would be nice here, but unfortunately - # isn't a blessed tool. - $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter -- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" -+ func_append delfiles " $export_symbols $output_objdir/$libname.filter" - export_symbols=$output_objdir/$libname.def - $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols - fi -@@ -7515,7 +8483,7 @@ EOF - output=$save_output - - if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then -- eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" -+ eval libobjs=\"\$libobjs $whole_archive_flag_spec\" - test "X$libobjs" = "X " && libobjs= - fi - # Expand the library linking commands again to reset the -@@ -7539,23 +8507,23 @@ EOF - - if test -n "$delfiles"; then - # Append the command to remove temporary files to $cmds. -- eval "cmds=\"\$cmds~\$RM $delfiles\"" -+ eval cmds=\"\$cmds~\$RM $delfiles\" - fi - - # Add any objects from preloaded convenience libraries - if test -n "$dlprefiles"; then - gentop="$output_objdir/${outputname}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - - func_extract_archives $gentop $dlprefiles -- libobjs="$libobjs $func_extract_archives_result" -+ func_append libobjs " $func_extract_archives_result" - test "X$libobjs" = "X " && libobjs= - fi - - save_ifs="$IFS"; IFS='~' - for cmd in $cmds; do - IFS="$save_ifs" -- eval "cmd=\"$cmd\"" -+ eval cmd=\"$cmd\" - $opt_silent || { - func_quote_for_expand "$cmd" - eval "func_echo $func_quote_for_expand_result" -@@ -7564,7 +8532,7 @@ EOF - lt_exit=$? - - # Restore the uninstalled library and exit -- if test "$mode" = relink; then -+ if test "$opt_mode" = relink; then - ( cd "$output_objdir" && \ - $RM "${realname}T" && \ - $MV "${realname}U" "$realname" ) -@@ -7576,8 +8544,8 @@ EOF - IFS="$save_ifs" - - # Restore the uninstalled library and exit -- if test "$mode" = relink; then -- $opt_dry_run || (cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname) || exit $? -+ if test "$opt_mode" = relink; then -+ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? - - if test -n "$convenience"; then - if test -z "$whole_archive_flag_spec"; then -@@ -7656,17 +8624,20 @@ EOF - - if test -n "$convenience"; then - if test -n "$whole_archive_flag_spec"; then -- eval "tmp_whole_archive_flags=\"$whole_archive_flag_spec\"" -+ eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" - reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` - else - gentop="$output_objdir/${obj}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - - func_extract_archives $gentop $convenience - reload_conv_objs="$reload_objs $func_extract_archives_result" - fi - fi - -+ # If we're not building shared, we need to use non_pic_objs -+ test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" -+ - # Create the old-style object. - reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test - -@@ -7690,7 +8661,7 @@ EOF - # Create an invalid libtool object if no PIC, so that we don't - # accidentally link it into a program. - # $show "echo timestamp > $libobj" -- # $opt_dry_run || echo timestamp > $libobj || exit $? -+ # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? - exit $EXIT_SUCCESS - fi - -@@ -7740,8 +8711,8 @@ EOF - if test "$tagname" = CXX ; then - case ${MACOSX_DEPLOYMENT_TARGET-10.0} in - 10.[0123]) -- compile_command="$compile_command ${wl}-bind_at_load" -- finalize_command="$finalize_command ${wl}-bind_at_load" -+ func_append compile_command " ${wl}-bind_at_load" -+ func_append finalize_command " ${wl}-bind_at_load" - ;; - esac - fi -@@ -7761,7 +8732,7 @@ EOF - *) - case " $compile_deplibs " in - *" -L$path/$objdir "*) -- new_libs="$new_libs -L$path/$objdir" ;; -+ func_append new_libs " -L$path/$objdir" ;; - esac - ;; - esac -@@ -7771,17 +8742,17 @@ EOF - -L*) - case " $new_libs " in - *" $deplib "*) ;; -- *) new_libs="$new_libs $deplib" ;; -+ *) func_append new_libs " $deplib" ;; - esac - ;; -- *) new_libs="$new_libs $deplib" ;; -+ *) func_append new_libs " $deplib" ;; - esac - done - compile_deplibs="$new_libs" - - -- compile_command="$compile_command $compile_deplibs" -- finalize_command="$finalize_command $finalize_deplibs" -+ func_append compile_command " $compile_deplibs" -+ func_append finalize_command " $finalize_deplibs" - - if test -n "$rpath$xrpath"; then - # If the user specified any rpath flags, then add them. -@@ -7789,7 +8760,7 @@ EOF - # This is the magic to use -rpath. - case "$finalize_rpath " in - *" $libdir "*) ;; -- *) finalize_rpath="$finalize_rpath $libdir" ;; -+ *) func_append finalize_rpath " $libdir" ;; - esac - done - fi -@@ -7808,18 +8779,18 @@ EOF - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) -- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" -+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" - ;; - esac - fi - else -- eval "flag=\"$hardcode_libdir_flag_spec\"" -- rpath="$rpath $flag" -+ eval flag=\"$hardcode_libdir_flag_spec\" -+ func_append rpath " $flag" - fi - elif test -n "$runpath_var"; then - case "$perm_rpath " in - *" $libdir "*) ;; -- *) perm_rpath="$perm_rpath $libdir" ;; -+ *) func_append perm_rpath " $libdir" ;; - esac - fi - case $host in -@@ -7828,12 +8799,12 @@ EOF - case :$dllsearchpath: in - *":$libdir:"*) ;; - ::) dllsearchpath=$libdir;; -- *) dllsearchpath="$dllsearchpath:$libdir";; -+ *) func_append dllsearchpath ":$libdir";; - esac - case :$dllsearchpath: in - *":$testbindir:"*) ;; - ::) dllsearchpath=$testbindir;; -- *) dllsearchpath="$dllsearchpath:$testbindir";; -+ *) func_append dllsearchpath ":$testbindir";; - esac - ;; - esac -@@ -7842,7 +8813,7 @@ EOF - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" -- eval "rpath=\" $hardcode_libdir_flag_spec\"" -+ eval rpath=\" $hardcode_libdir_flag_spec\" - fi - compile_rpath="$rpath" - -@@ -7859,18 +8830,18 @@ EOF - *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) - ;; - *) -- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" -+ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" - ;; - esac - fi - else -- eval "flag=\"$hardcode_libdir_flag_spec\"" -- rpath="$rpath $flag" -+ eval flag=\"$hardcode_libdir_flag_spec\" -+ func_append rpath " $flag" - fi - elif test -n "$runpath_var"; then - case "$finalize_perm_rpath " in - *" $libdir "*) ;; -- *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; -+ *) func_append finalize_perm_rpath " $libdir" ;; - esac - fi - done -@@ -7878,7 +8849,7 @@ EOF - if test -n "$hardcode_libdir_separator" && - test -n "$hardcode_libdirs"; then - libdir="$hardcode_libdirs" -- eval "rpath=\" $hardcode_libdir_flag_spec\"" -+ eval rpath=\" $hardcode_libdir_flag_spec\" - fi - finalize_rpath="$rpath" - -@@ -7921,6 +8892,12 @@ EOF - exit_status=0 - func_show_eval "$link_command" 'exit_status=$?' - -+ if test -n "$postlink_cmds"; then -+ func_to_tool_file "$output" -+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` -+ func_execute_cmds "$postlink_cmds" 'exit $?' -+ fi -+ - # Delete the generated files. - if test -f "$output_objdir/${outputname}S.${objext}"; then - func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' -@@ -7943,7 +8920,7 @@ EOF - # We should set the runpath_var. - rpath= - for dir in $perm_rpath; do -- rpath="$rpath$dir:" -+ func_append rpath "$dir:" - done - compile_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi -@@ -7951,7 +8928,7 @@ EOF - # We should set the runpath_var. - rpath= - for dir in $finalize_perm_rpath; do -- rpath="$rpath$dir:" -+ func_append rpath "$dir:" - done - finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " - fi -@@ -7966,6 +8943,13 @@ EOF - $opt_dry_run || $RM $output - # Link the executable and exit - func_show_eval "$link_command" 'exit $?' -+ -+ if test -n "$postlink_cmds"; then -+ func_to_tool_file "$output" -+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` -+ func_execute_cmds "$postlink_cmds" 'exit $?' -+ fi -+ - exit $EXIT_SUCCESS - fi - -@@ -7999,6 +8983,12 @@ EOF - - func_show_eval "$link_command" 'exit $?' - -+ if test -n "$postlink_cmds"; then -+ func_to_tool_file "$output_objdir/$outputname" -+ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` -+ func_execute_cmds "$postlink_cmds" 'exit $?' -+ fi -+ - # Now create the wrapper script. - func_verbose "creating $output" - -@@ -8096,7 +9086,7 @@ EOF - else - oldobjs="$old_deplibs $non_pic_objects" - if test "$preload" = yes && test -f "$symfileobj"; then -- oldobjs="$oldobjs $symfileobj" -+ func_append oldobjs " $symfileobj" - fi - fi - addlibs="$old_convenience" -@@ -8104,10 +9094,10 @@ EOF - - if test -n "$addlibs"; then - gentop="$output_objdir/${outputname}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - - func_extract_archives $gentop $addlibs -- oldobjs="$oldobjs $func_extract_archives_result" -+ func_append oldobjs " $func_extract_archives_result" - fi - - # Do each command in the archive commands. -@@ -8118,10 +9108,10 @@ EOF - # Add any objects from preloaded convenience libraries - if test -n "$dlprefiles"; then - gentop="$output_objdir/${outputname}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - - func_extract_archives $gentop $dlprefiles -- oldobjs="$oldobjs $func_extract_archives_result" -+ func_append oldobjs " $func_extract_archives_result" - fi - - # POSIX demands no paths to be encoded in archives. We have -@@ -8139,7 +9129,7 @@ EOF - else - echo "copying selected object files to avoid basename conflicts..." - gentop="$output_objdir/${outputname}x" -- generated="$generated $gentop" -+ func_append generated " $gentop" - func_mkdir_p "$gentop" - save_oldobjs=$oldobjs - oldobjs= -@@ -8163,18 +9153,28 @@ EOF - esac - done - func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" -- oldobjs="$oldobjs $gentop/$newobj" -+ func_append oldobjs " $gentop/$newobj" - ;; -- *) oldobjs="$oldobjs $obj" ;; -+ *) func_append oldobjs " $obj" ;; - esac - done - fi -- eval "cmds=\"$old_archive_cmds\"" -+ eval cmds=\"$old_archive_cmds\" - - func_len " $cmds" - len=$func_len_result - if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then - cmds=$old_archive_cmds -+ elif test -n "$archiver_list_spec"; then -+ func_verbose "using command file archive linking..." -+ for obj in $oldobjs -+ do -+ func_to_tool_file "$obj" -+ $ECHO "$func_to_tool_file_result" -+ done > $output_objdir/$libname.libcmd -+ func_to_tool_file "$output_objdir/$libname.libcmd" -+ oldobjs=" $archiver_list_spec$func_to_tool_file_result" -+ cmds=$old_archive_cmds - else - # the command line is too long to link in one step, link in parts - func_verbose "using piecewise archive linking..." -@@ -8189,7 +9189,7 @@ EOF - do - last_oldobj=$obj - done -- eval "test_cmds=\"$old_archive_cmds\"" -+ eval test_cmds=\"$old_archive_cmds\" - func_len " $test_cmds" - len0=$func_len_result - len=$len0 -@@ -8208,7 +9208,7 @@ EOF - RANLIB=$save_RANLIB - fi - test -z "$concat_cmds" || concat_cmds=$concat_cmds~ -- eval "concat_cmds=\"\${concat_cmds}$old_archive_cmds\"" -+ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" - objlist= - len=$len0 - fi -@@ -8216,9 +9216,9 @@ EOF - RANLIB=$save_RANLIB - oldobjs=$objlist - if test "X$oldobjs" = "X" ; then -- eval "cmds=\"\$concat_cmds\"" -+ eval cmds=\"\$concat_cmds\" - else -- eval "cmds=\"\$concat_cmds~\$old_archive_cmds\"" -+ eval cmds=\"\$concat_cmds~\$old_archive_cmds\" - fi - fi - fi -@@ -8268,12 +9268,23 @@ EOF - *.la) - func_basename "$deplib" - name="$func_basename_result" -- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` -+ func_resolve_sysroot "$deplib" -+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` - test -z "$libdir" && \ - func_fatal_error "\`$deplib' is not a valid libtool archive" -- newdependency_libs="$newdependency_libs $libdir/$name" -+ func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" -+ ;; -+ -L*) -+ func_stripname -L '' "$deplib" -+ func_replace_sysroot "$func_stripname_result" -+ func_append newdependency_libs " -L$func_replace_sysroot_result" - ;; -- *) newdependency_libs="$newdependency_libs $deplib" ;; -+ -R*) -+ func_stripname -R '' "$deplib" -+ func_replace_sysroot "$func_stripname_result" -+ func_append newdependency_libs " -R$func_replace_sysroot_result" -+ ;; -+ *) func_append newdependency_libs " $deplib" ;; - esac - done - dependency_libs="$newdependency_libs" -@@ -8284,12 +9295,14 @@ EOF - *.la) - func_basename "$lib" - name="$func_basename_result" -- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` -+ func_resolve_sysroot "$lib" -+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` -+ - test -z "$libdir" && \ - func_fatal_error "\`$lib' is not a valid libtool archive" -- newdlfiles="$newdlfiles $libdir/$name" -+ func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" - ;; -- *) newdlfiles="$newdlfiles $lib" ;; -+ *) func_append newdlfiles " $lib" ;; - esac - done - dlfiles="$newdlfiles" -@@ -8303,10 +9316,11 @@ EOF - # the library: - func_basename "$lib" - name="$func_basename_result" -- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` -+ func_resolve_sysroot "$lib" -+ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` - test -z "$libdir" && \ - func_fatal_error "\`$lib' is not a valid libtool archive" -- newdlprefiles="$newdlprefiles $libdir/$name" -+ func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" - ;; - esac - done -@@ -8318,7 +9332,7 @@ EOF - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac -- newdlfiles="$newdlfiles $abs" -+ func_append newdlfiles " $abs" - done - dlfiles="$newdlfiles" - newdlprefiles= -@@ -8327,7 +9341,7 @@ EOF - [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; - *) abs=`pwd`"/$lib" ;; - esac -- newdlprefiles="$newdlprefiles $abs" -+ func_append newdlprefiles " $abs" - done - dlprefiles="$newdlprefiles" - fi -@@ -8412,7 +9426,7 @@ relink_command=\"$relink_command\"" - exit $EXIT_SUCCESS - } - --{ test "$mode" = link || test "$mode" = relink; } && -+{ test "$opt_mode" = link || test "$opt_mode" = relink; } && - func_mode_link ${1+"$@"} - - -@@ -8432,9 +9446,9 @@ func_mode_uninstall () - for arg - do - case $arg in -- -f) RM="$RM $arg"; rmforce=yes ;; -- -*) RM="$RM $arg" ;; -- *) files="$files $arg" ;; -+ -f) func_append RM " $arg"; rmforce=yes ;; -+ -*) func_append RM " $arg" ;; -+ *) func_append files " $arg" ;; - esac - done - -@@ -8443,24 +9457,23 @@ func_mode_uninstall () - - rmdirs= - -- origobjdir="$objdir" - for file in $files; do - func_dirname "$file" "" "." - dir="$func_dirname_result" - if test "X$dir" = X.; then -- objdir="$origobjdir" -+ odir="$objdir" - else -- objdir="$dir/$origobjdir" -+ odir="$dir/$objdir" - fi - func_basename "$file" - name="$func_basename_result" -- test "$mode" = uninstall && objdir="$dir" -+ test "$opt_mode" = uninstall && odir="$dir" - -- # Remember objdir for removal later, being careful to avoid duplicates -- if test "$mode" = clean; then -+ # Remember odir for removal later, being careful to avoid duplicates -+ if test "$opt_mode" = clean; then - case " $rmdirs " in -- *" $objdir "*) ;; -- *) rmdirs="$rmdirs $objdir" ;; -+ *" $odir "*) ;; -+ *) func_append rmdirs " $odir" ;; - esac - fi - -@@ -8486,18 +9499,17 @@ func_mode_uninstall () - - # Delete the libtool libraries and symlinks. - for n in $library_names; do -- rmfiles="$rmfiles $objdir/$n" -+ func_append rmfiles " $odir/$n" - done -- test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" -+ test -n "$old_library" && func_append rmfiles " $odir/$old_library" - -- case "$mode" in -+ case "$opt_mode" in - clean) -- case " $library_names " in -- # " " in the beginning catches empty $dlname -+ case " $library_names " in - *" $dlname "*) ;; -- *) rmfiles="$rmfiles $objdir/$dlname" ;; -+ *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; - esac -- test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" -+ test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" - ;; - uninstall) - if test -n "$library_names"; then -@@ -8525,19 +9537,19 @@ func_mode_uninstall () - # Add PIC object to the list of files to remove. - if test -n "$pic_object" && - test "$pic_object" != none; then -- rmfiles="$rmfiles $dir/$pic_object" -+ func_append rmfiles " $dir/$pic_object" - fi - - # Add non-PIC object to the list of files to remove. - if test -n "$non_pic_object" && - test "$non_pic_object" != none; then -- rmfiles="$rmfiles $dir/$non_pic_object" -+ func_append rmfiles " $dir/$non_pic_object" - fi - fi - ;; - - *) -- if test "$mode" = clean ; then -+ if test "$opt_mode" = clean ; then - noexename=$name - case $file in - *.exe) -@@ -8547,7 +9559,7 @@ func_mode_uninstall () - noexename=$func_stripname_result - # $file with .exe has already been added to rmfiles, - # add $file without .exe -- rmfiles="$rmfiles $file" -+ func_append rmfiles " $file" - ;; - esac - # Do a test to see if this is a libtool program. -@@ -8556,7 +9568,7 @@ func_mode_uninstall () - func_ltwrapper_scriptname "$file" - relink_command= - func_source $func_ltwrapper_scriptname_result -- rmfiles="$rmfiles $func_ltwrapper_scriptname_result" -+ func_append rmfiles " $func_ltwrapper_scriptname_result" - else - relink_command= - func_source $dir/$noexename -@@ -8564,12 +9576,12 @@ func_mode_uninstall () - - # note $name still contains .exe if it was in $file originally - # as does the version of $file that was added into $rmfiles -- rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" -+ func_append rmfiles " $odir/$name $odir/${name}S.${objext}" - if test "$fast_install" = yes && test -n "$relink_command"; then -- rmfiles="$rmfiles $objdir/lt-$name" -+ func_append rmfiles " $odir/lt-$name" - fi - if test "X$noexename" != "X$name" ; then -- rmfiles="$rmfiles $objdir/lt-${noexename}.c" -+ func_append rmfiles " $odir/lt-${noexename}.c" - fi - fi - fi -@@ -8577,7 +9589,6 @@ func_mode_uninstall () - esac - func_show_eval "$RM $rmfiles" 'exit_status=1' - done -- objdir="$origobjdir" - - # Try to remove the ${objdir}s in the directories where we deleted files - for dir in $rmdirs; do -@@ -8589,16 +9600,16 @@ func_mode_uninstall () - exit $exit_status - } - --{ test "$mode" = uninstall || test "$mode" = clean; } && -+{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && - func_mode_uninstall ${1+"$@"} - --test -z "$mode" && { -+test -z "$opt_mode" && { - help="$generic_help" - func_fatal_help "you must specify a MODE" - } - - test -z "$exec_cmd" && \ -- func_fatal_help "invalid operation mode \`$mode'" -+ func_fatal_help "invalid operation mode \`$opt_mode'" - - if test -n "$exec_cmd"; then - eval exec "$exec_cmd" -diff --git a/ltoptions.m4 b/ltoptions.m4 -index 5ef12ced2a8..17cfd51c0b3 100644 ---- a/ltoptions.m4 -+++ b/ltoptions.m4 -@@ -8,7 +8,7 @@ - # unlimited permission to copy and/or distribute it, with or without - # modifications, as long as this notice is preserved. - --# serial 6 ltoptions.m4 -+# serial 7 ltoptions.m4 - - # This is to help aclocal find these macros, as it can't see m4_define. - AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) -diff --git a/ltversion.m4 b/ltversion.m4 -index bf87f77132d..9c7b5d41185 100644 ---- a/ltversion.m4 -+++ b/ltversion.m4 -@@ -7,17 +7,17 @@ - # unlimited permission to copy and/or distribute it, with or without - # modifications, as long as this notice is preserved. - --# Generated from ltversion.in. -+# @configure_input@ - --# serial 3134 ltversion.m4 -+# serial 3293 ltversion.m4 - # This file is part of GNU Libtool - --m4_define([LT_PACKAGE_VERSION], [2.2.7a]) --m4_define([LT_PACKAGE_REVISION], [1.3134]) -+m4_define([LT_PACKAGE_VERSION], [2.4]) -+m4_define([LT_PACKAGE_REVISION], [1.3293]) - - AC_DEFUN([LTVERSION_VERSION], --[macro_version='2.2.7a' --macro_revision='1.3134' -+[macro_version='2.4' -+macro_revision='1.3293' - _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) - _LT_DECL(, macro_revision, 0) - ]) -diff --git a/lt~obsolete.m4 b/lt~obsolete.m4 -index bf92b5e0790..c573da90c5c 100644 ---- a/lt~obsolete.m4 -+++ b/lt~obsolete.m4 -@@ -7,7 +7,7 @@ - # unlimited permission to copy and/or distribute it, with or without - # modifications, as long as this notice is preserved. - --# serial 4 lt~obsolete.m4 -+# serial 5 lt~obsolete.m4 - - # These exist entirely to fool aclocal when bootstrapping libtool. - # -diff --git a/opcodes/configure b/opcodes/configure -index 4723dcdc1e8..006ccdaa458 100755 ---- a/opcodes/configure -+++ b/opcodes/configure -@@ -680,6 +680,9 @@ OTOOL - LIPO - NMEDIT - DSYMUTIL -+MANIFEST_TOOL -+ac_ct_AR -+DLLTOOL - OBJDUMP - LN_S - NM -@@ -798,6 +801,7 @@ enable_static - with_pic - enable_fast_install - with_gnu_ld -+with_libtool_sysroot - enable_libtool_lock - enable_targets - enable_werror -@@ -1462,6 +1466,8 @@ Optional Packages: - --with-pic try to use only PIC/non-PIC objects [default=use - both] - --with-gnu-ld assume the C compiler uses GNU ld [default=no] -+ --with-libtool-sysroot=DIR Search for dependent libraries within DIR -+ (or the compiler's sysroot if not specified). - - Some influential environment variables: - CC C compiler command -@@ -5403,8 +5409,8 @@ esac - - - --macro_version='2.2.7a' --macro_revision='1.3134' -+macro_version='2.4' -+macro_revision='1.3293' - - - -@@ -5444,7 +5450,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 - $as_echo_n "checking how to print strings... " >&6; } - # Test print first, because it will be a builtin if present. --if test "X`print -r -- -n 2>/dev/null`" = X-n && \ -+if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' - elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then -@@ -6130,8 +6136,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; - # Try some XSI features - xsi_shell=no - ( _lt_dummy="a/b/c" -- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ -- = c,a/b,, \ -+ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ -+ = c,a/b,b/c, \ - && eval 'test $(( 1 + 1 )) -eq 2 \ - && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ - && xsi_shell=yes -@@ -6180,6 +6186,80 @@ esac - - - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -+$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -+if ${lt_cv_to_host_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 -+ ;; -+ esac -+ ;; -+ *-*-cygwin* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin -+ ;; -+ *-*-cygwin* ) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+ * ) # otherwise, assume *nix -+ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin -+ ;; -+ esac -+ ;; -+ * ) # unhandled hosts (and "normal" native builds) -+ lt_cv_to_host_file_cmd=func_convert_file_noop -+ ;; -+esac -+ -+fi -+ -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -+$as_echo "$lt_cv_to_host_file_cmd" >&6; } -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -+$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -+if ${lt_cv_to_tool_file_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ #assume ordinary cross tools, or native build. -+lt_cv_to_tool_file_cmd=func_convert_file_noop -+case $host in -+ *-*-mingw* ) -+ case $build in -+ *-*-mingw* ) # actually msys -+ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 -+ ;; -+ esac -+ ;; -+esac -+ -+fi -+ -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -+$as_echo "$lt_cv_to_tool_file_cmd" >&6; } -+ -+ -+ -+ -+ - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 - $as_echo_n "checking for $LD option to reload object files... " >&6; } - if ${lt_cv_ld_reload_flag+:} false; then : -@@ -6196,6 +6276,11 @@ case $reload_flag in - esac - reload_cmds='$LD$reload_flag -o $output$reload_objs' - case $host_os in -+ cygwin* | mingw* | pw32* | cegcc*) -+ if test "$GCC" != yes; then -+ reload_cmds=false -+ fi -+ ;; - darwin*) - if test "$GCC" = yes; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' -@@ -6364,7 +6449,8 @@ mingw* | pw32*) - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else -- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' -+ # Keep this pattern in sync with the one in func_win32_libid. -+ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; -@@ -6518,6 +6604,21 @@ esac - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 - $as_echo "$lt_cv_deplibs_check_method" >&6; } -+ -+file_magic_glob= -+want_nocaseglob=no -+if test "$build" = "$host"; then -+ case $host_os in -+ mingw* | pw32*) -+ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then -+ want_nocaseglob=yes -+ else -+ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` -+ fi -+ ;; -+ esac -+fi -+ - file_magic_cmd=$lt_cv_file_magic_cmd - deplibs_check_method=$lt_cv_deplibs_check_method - test -z "$deplibs_check_method" && deplibs_check_method=unknown -@@ -6531,11 +6632,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ - - - if test -n "$ac_tool_prefix"; then -- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. --set dummy ${ac_tool_prefix}ar; ac_word=$2 -+ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -+set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$DLLTOOL"; then -+ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+DLLTOOL=$ac_cv_prog_DLLTOOL -+if test -n "$DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -+$as_echo "$DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_DLLTOOL"; then -+ ac_ct_DLLTOOL=$DLLTOOL -+ # Extract the first word of "dlltool", so it can be a program name with args. -+set dummy dlltool; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_DLLTOOL"; then -+ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_DLLTOOL="dlltool" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -+if test -n "$ac_ct_DLLTOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -+$as_echo "$ac_ct_DLLTOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_DLLTOOL" = x; then -+ DLLTOOL="false" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ DLLTOOL=$ac_ct_DLLTOOL -+ fi -+else -+ DLLTOOL="$ac_cv_prog_DLLTOOL" -+fi -+ -+test -z "$DLLTOOL" && DLLTOOL=dlltool -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -+$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -+if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_sharedlib_from_linklib_cmd='unknown' -+ -+case $host_os in -+cygwin* | mingw* | pw32* | cegcc*) -+ # two different shell functions defined in ltmain.sh -+ # decide which to use based on capabilities of $DLLTOOL -+ case `$DLLTOOL --help 2>&1` in -+ *--identify-strict*) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib -+ ;; -+ *) -+ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback -+ ;; -+ esac -+ ;; -+*) -+ # fallback: assume linklib IS sharedlib -+ lt_cv_sharedlib_from_linklib_cmd="$ECHO" -+ ;; -+esac -+ -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -+$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -+sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -+test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO -+ -+ -+ -+ -+ -+ -+ -+if test -n "$ac_tool_prefix"; then -+ for ac_prog in ar -+ do -+ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -+set dummy $ac_tool_prefix$ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_AR+:} false; then : -@@ -6551,7 +6805,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_AR="${ac_tool_prefix}ar" -+ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6571,11 +6825,15 @@ $as_echo "no" >&6; } - fi - - -+ test -n "$AR" && break -+ done - fi --if test -z "$ac_cv_prog_AR"; then -+if test -z "$AR"; then - ac_ct_AR=$AR -- # Extract the first word of "ar", so it can be a program name with args. --set dummy ar; ac_word=$2 -+ for ac_prog in ar -+do -+ # Extract the first word of "$ac_prog", so it can be a program name with args. -+set dummy $ac_prog; ac_word=$2 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 - $as_echo_n "checking for $ac_word... " >&6; } - if ${ac_cv_prog_ac_ct_AR+:} false; then : -@@ -6591,7 +6849,7 @@ do - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -- ac_cv_prog_ac_ct_AR="ar" -+ ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -@@ -6610,6 +6868,10 @@ else - $as_echo "no" >&6; } - fi - -+ -+ test -n "$ac_ct_AR" && break -+done -+ - if test "x$ac_ct_AR" = x; then - AR="false" - else -@@ -6621,16 +6883,72 @@ ac_tool_warned=yes ;; - esac - AR=$ac_ct_AR - fi --else -- AR="$ac_cv_prog_AR" - fi - --test -z "$AR" && AR=ar --test -z "$AR_FLAGS" && AR_FLAGS=cru -+: ${AR=ar} -+: ${AR_FLAGS=cru} -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -+$as_echo_n "checking for archiver @FILE support... " >&6; } -+if ${lt_cv_ar_at_file+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_ar_at_file=no -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+/* end confdefs.h. */ -+ -+int -+main () -+{ - -+ ; -+ return 0; -+} -+_ACEOF -+if ac_fn_c_try_compile "$LINENO"; then : -+ echo conftest.$ac_objext > conftest.lst -+ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -eq 0; then -+ # Ensure the archiver fails upon bogus file names. -+ rm -f conftest.$ac_objext libconftest.a -+ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 -+ (eval $lt_ar_try) 2>&5 -+ ac_status=$? -+ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 -+ test $ac_status = 0; } -+ if test "$ac_status" -ne 0; then -+ lt_cv_ar_at_file=@ -+ fi -+ fi -+ rm -f conftest.* libconftest.a - -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -+$as_echo "$lt_cv_ar_at_file" >&6; } - -+if test "x$lt_cv_ar_at_file" = xno; then -+ archiver_list_spec= -+else -+ archiver_list_spec=$lt_cv_ar_at_file -+fi - - - -@@ -6972,8 +7290,8 @@ esac - lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" - - # Transform an extracted symbol line into symbol name and symbol address --lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" --lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" -+lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" - - # Handle CRLF in mingw tool chain - opt_cr= -@@ -7009,6 +7327,7 @@ for ac_symprfx in "" "_"; do - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi -+ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no -@@ -7050,6 +7369,18 @@ _LT_EOF - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -+/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -+#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) -+/* DATA imports from DLLs on WIN32 con't be const, because runtime -+ relocations are performed -- see ld's documentation on pseudo-relocs. */ -+# define LT_DLSYM_CONST -+#elif defined(__osf__) -+/* This system does not cope well with relocations in const data. */ -+# define LT_DLSYM_CONST -+#else -+# define LT_DLSYM_CONST const -+#endif -+ - #ifdef __cplusplus - extern "C" { - #endif -@@ -7061,7 +7392,7 @@ _LT_EOF - cat <<_LT_EOF >> conftest.$ac_ext - - /* The mapping between symbol names and symbols. */ --const struct { -+LT_DLSYM_CONST struct { - const char *name; - void *address; - } -@@ -7087,8 +7418,8 @@ static const void *lt_preloaded_setup() { - _LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext -- lt_save_LIBS="$LIBS" -- lt_save_CFLAGS="$CFLAGS" -+ lt_globsym_save_LIBS=$LIBS -+ lt_globsym_save_CFLAGS=$CFLAGS - LIBS="conftstm.$ac_objext" - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 -@@ -7098,8 +7429,8 @@ _LT_EOF - test $ac_status = 0; } && test -s conftest${ac_exeext}; then - pipe_works=yes - fi -- LIBS="$lt_save_LIBS" -- CFLAGS="$lt_save_CFLAGS" -+ LIBS=$lt_globsym_save_LIBS -+ CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi -@@ -7136,6 +7467,16 @@ else - $as_echo "ok" >&6; } - fi - -+# Response file support. -+if test "$lt_cv_nm_interface" = "MS dumpbin"; then -+ nm_file_list_spec='@' -+elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then -+ nm_file_list_spec='@' -+fi -+ -+ -+ -+ - - - -@@ -7152,6 +7493,45 @@ fi - - - -+ -+ -+ -+ -+ -+ -+ -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -+$as_echo_n "checking for sysroot... " >&6; } -+ -+# Check whether --with-libtool-sysroot was given. -+if test "${with_libtool_sysroot+set}" = set; then : -+ withval=$with_libtool_sysroot; -+else -+ with_libtool_sysroot=no -+fi -+ -+ -+lt_sysroot= -+case ${with_libtool_sysroot} in #( -+ yes) -+ if test "$GCC" = yes; then -+ lt_sysroot=`$CC --print-sysroot 2>/dev/null` -+ fi -+ ;; #( -+ /*) -+ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` -+ ;; #( -+ no|'') -+ ;; #( -+ *) -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 -+$as_echo "${with_libtool_sysroot}" >&6; } -+ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 -+ ;; -+esac -+ -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -+$as_echo "${lt_sysroot:-no}" >&6; } - - - -@@ -7363,6 +7743,123 @@ esac - - need_locks="$enable_libtool_lock" - -+if test -n "$ac_tool_prefix"; then -+ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -+set dummy ${ac_tool_prefix}mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$MANIFEST_TOOL"; then -+ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -+if test -n "$MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -+$as_echo "$MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ -+fi -+if test -z "$ac_cv_prog_MANIFEST_TOOL"; then -+ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL -+ # Extract the first word of "mt", so it can be a program name with args. -+set dummy mt; ac_word=$2 -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -+$as_echo_n "checking for $ac_word... " >&6; } -+if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ if test -n "$ac_ct_MANIFEST_TOOL"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -+else -+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -+for as_dir in $PATH -+do -+ IFS=$as_save_IFS -+ test -z "$as_dir" && as_dir=. -+ for ac_exec_ext in '' $ac_executable_extensions; do -+ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then -+ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" -+ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 -+ break 2 -+ fi -+done -+ done -+IFS=$as_save_IFS -+ -+fi -+fi -+ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -+if test -n "$ac_ct_MANIFEST_TOOL"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -+$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -+else -+ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -+$as_echo "no" >&6; } -+fi -+ -+ if test "x$ac_ct_MANIFEST_TOOL" = x; then -+ MANIFEST_TOOL=":" -+ else -+ case $cross_compiling:$ac_tool_warned in -+yes:) -+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -+ac_tool_warned=yes ;; -+esac -+ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL -+ fi -+else -+ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -+fi -+ -+test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -+$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -+if ${lt_cv_path_mainfest_tool+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_path_mainfest_tool=no -+ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 -+ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out -+ cat conftest.err >&5 -+ if $GREP 'Manifest Tool' conftest.out > /dev/null; then -+ lt_cv_path_mainfest_tool=yes -+ fi -+ rm -f conftest* -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -+$as_echo "$lt_cv_path_mainfest_tool" >&6; } -+if test "x$lt_cv_path_mainfest_tool" != xyes; then -+ MANIFEST_TOOL=: -+fi -+ -+ -+ -+ -+ - - case $host_os in - rhapsody* | darwin*) -@@ -7926,6 +8423,8 @@ _LT_EOF - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cru libconftest.a conftest.o" >&5 - $AR cru libconftest.a conftest.o 2>&5 -+ echo "$RANLIB libconftest.a" >&5 -+ $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF - int main() { return 0;} - _LT_EOF -@@ -8091,7 +8590,8 @@ fi - LIBTOOL_DEPS="$ltmain" - - # Always use our own libtool. --LIBTOOL='$(SHELL) $(top_builddir)/libtool' -+LIBTOOL='$(SHELL) $(top_builddir)' -+LIBTOOL="$LIBTOOL/${host_alias}-libtool" - - - -@@ -8180,7 +8680,7 @@ aix3*) - esac - - # Global variables: --ofile=libtool -+ofile=${host_alias}-libtool - can_build_shared=yes - - # All known linkers require a `.a' archive for static linking (except MSVC, -@@ -8478,8 +8978,6 @@ fi - lt_prog_compiler_pic= - lt_prog_compiler_static= - --{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 --$as_echo_n "checking for $compiler option to produce PIC... " >&6; } - - if test "$GCC" = yes; then - lt_prog_compiler_wl='-Wl,' -@@ -8645,6 +9143,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; -+ nagfor*) -+ # NAG Fortran compiler -+ lt_prog_compiler_wl='-Wl,-Wl,,' -+ lt_prog_compiler_pic='-PIC' -+ lt_prog_compiler_static='-Bstatic' -+ ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) -@@ -8707,7 +9211,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in -- f77* | f90* | f95*) -+ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; -@@ -8764,13 +9268,17 @@ case $host_os in - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; - esac --{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 --$as_echo "$lt_prog_compiler_pic" >&6; } -- -- -- -- - -+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -+$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -+if ${lt_cv_prog_compiler_pic+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -+$as_echo "$lt_cv_prog_compiler_pic" >&6; } -+lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - - # - # Check to make sure the PIC flag actually works. -@@ -8831,6 +9339,11 @@ fi - - - -+ -+ -+ -+ -+ - # - # Check to make sure the static flag actually works. - # -@@ -9181,7 +9694,8 @@ _LT_EOF - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes -- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' -+ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' -@@ -9280,12 +9794,12 @@ _LT_EOF - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec= - hardcode_libdir_flag_spec_ld='-rpath $libdir' -- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' -+ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test "x$supports_anon_versioning" = xyes; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ -- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' -+ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac -@@ -9299,8 +9813,8 @@ _LT_EOF - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - -@@ -9318,8 +9832,8 @@ _LT_EOF - - _LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9365,8 +9879,8 @@ _LT_EOF - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi -@@ -9496,7 +10010,13 @@ _LT_EOF - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9509,22 +10029,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" -@@ -9536,7 +10063,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - else - # Determine the default libpath from the value encoded in an - # empty executable. -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ if test "${lt_cv_aix_libpath+set}" = set; then -+ aix_libpath=$lt_cv_aix_libpath -+else -+ if ${lt_cv_aix_libpath_+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ - - int -@@ -9549,22 +10082,29 @@ main () - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : - --lt_aix_libpath_sed=' -- /Import File Strings/,/^$/ { -- /^0/ { -- s/^0 *\(.*\)$/\1/ -- p -- } -- }' --aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --# Check for a 64-bit object if we didn't find anything. --if test -z "$aix_libpath"; then -- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` --fi -+ lt_aix_libpath_sed=' -+ /Import File Strings/,/^$/ { -+ /^0/ { -+ s/^0 *\([^ ]*\) *$/\1/ -+ p -+ } -+ }' -+ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ # Check for a 64-bit object if we didn't find anything. -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` -+ fi - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext --if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi -+ if test -z "$lt_cv_aix_libpath_"; then -+ lt_cv_aix_libpath_="/usr/lib:/lib" -+ fi -+ -+fi -+ -+ aix_libpath=$lt_cv_aix_libpath_ -+fi - - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, -@@ -9609,20 +10149,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. -- hardcode_libdir_flag_spec=' ' -- allow_undefined_flag=unsupported -- # Tell ltmain to make .lib files, not .a files. -- libext=lib -- # Tell ltmain to make .dll files, not .so files. -- shrext_cmds=".dll" -- # FIXME: Setting linknames here is a bad hack. -- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -- # The linker will automatically build a .lib file if we build a DLL. -- old_archive_from_new_cmds='true' -- # FIXME: Should let the user specify the lib program. -- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -- fix_srcfile_path='`cygpath -w "$srcfile"`' -- enable_shared_with_static_runtimes=yes -+ case $cc_basename in -+ cl*) -+ # Native MSVC -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ always_export_symbols=yes -+ file_list_spec='@' -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' -+ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then -+ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; -+ else -+ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; -+ fi~ -+ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ -+ linknames=' -+ # The linker will not automatically build a static lib if we build a DLL. -+ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' -+ enable_shared_with_static_runtimes=yes -+ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' -+ # Don't use ranlib -+ old_postinstall_cmds='chmod 644 $oldlib' -+ postlink_cmds='lt_outputfile="@OUTPUT@"~ -+ lt_tool_outputfile="@TOOL_OUTPUT@"~ -+ case $lt_outputfile in -+ *.exe|*.EXE) ;; -+ *) -+ lt_outputfile="$lt_outputfile.exe" -+ lt_tool_outputfile="$lt_tool_outputfile.exe" -+ ;; -+ esac~ -+ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then -+ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; -+ $RM "$lt_outputfile.manifest"; -+ fi' -+ ;; -+ *) -+ # Assume MSVC wrapper -+ hardcode_libdir_flag_spec=' ' -+ allow_undefined_flag=unsupported -+ # Tell ltmain to make .lib files, not .a files. -+ libext=lib -+ # Tell ltmain to make .dll files, not .so files. -+ shrext_cmds=".dll" -+ # FIXME: Setting linknames here is a bad hack. -+ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' -+ # The linker will automatically build a .lib file if we build a DLL. -+ old_archive_from_new_cmds='true' -+ # FIXME: Should let the user specify the lib program. -+ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' -+ enable_shared_with_static_runtimes=yes -+ ;; -+ esac - ;; - - darwin* | rhapsody*) -@@ -9683,7 +10266,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) -- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no -@@ -9691,7 +10274,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux9*) - if test "$GCC" = yes; then -- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' -+ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' - fi -@@ -9707,7 +10290,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - - hpux10*) - if test "$GCC" = yes && test "$with_gnu_ld" = no; then -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi -@@ -9731,10 +10314,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi - archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) -- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else -@@ -9813,23 +10396,36 @@ fi - - irix5* | irix6* | nonstopux*) - if test "$GCC" = yes; then -- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. -- save_LDFLAGS="$LDFLAGS" -- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -- cat confdefs.h - <<_ACEOF >conftest.$ac_ext -+ # This should be the same for all languages, so no per-tag cache variable. -+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -+$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -+if ${lt_cv_irix_exported_symbol+:} false; then : -+ $as_echo_n "(cached) " >&6 -+else -+ save_LDFLAGS="$LDFLAGS" -+ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" -+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext - /* end confdefs.h. */ --int foo(void) {} -+int foo (void) { return 0; } - _ACEOF - if ac_fn_c_try_link "$LINENO"; then : -- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -- -+ lt_cv_irix_exported_symbol=yes -+else -+ lt_cv_irix_exported_symbol=no - fi - rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -- LDFLAGS="$save_LDFLAGS" -+ LDFLAGS="$save_LDFLAGS" -+fi -+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -+$as_echo "$lt_cv_irix_exported_symbol" >&6; } -+ if test "$lt_cv_irix_exported_symbol" = yes; then -+ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' -+ fi - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' -@@ -9914,7 +10510,7 @@ rm -f core conftest.err conftest.$ac_objext \ - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test "$GCC" = yes; then - allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' -- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' -+ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' -@@ -9933,9 +10529,9 @@ rm -f core conftest.err conftest.$ac_objext \ - no_undefined_flag=' -z defs' - if test "$GCC" = yes; then - wlarc='${wl}' -- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' -+ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ -- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' -+ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) -@@ -10511,8 +11107,9 @@ cygwin* | mingw* | pw32* | cegcc*) - need_version=no - need_lib_prefix=no - -- case $GCC,$host_os in -- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) -+ case $GCC,$cc_basename in -+ yes,*) -+ # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \${file}`~ -@@ -10545,13 +11142,71 @@ cygwin* | mingw* | pw32* | cegcc*) - library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' - ;; - esac -+ dynamic_linker='Win32 ld.exe' -+ ;; -+ -+ *,cl*) -+ # Native MSVC -+ libname_spec='$name' -+ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' -+ library_names_spec='${libname}.dll.lib' -+ -+ case $build_os in -+ mingw*) -+ sys_lib_search_path_spec= -+ lt_save_ifs=$IFS -+ IFS=';' -+ for lt_path in $LIB -+ do -+ IFS=$lt_save_ifs -+ # Let DOS variable expansion print the short 8.3 style file name. -+ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` -+ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" -+ done -+ IFS=$lt_save_ifs -+ # Convert to MSYS style. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` -+ ;; -+ cygwin*) -+ # Convert to unix form, then to dos form, then back to unix form -+ # but this time dos style (no spaces!) so that the unix form looks -+ # like /cygdrive/c/PROGRA~1:/cygdr... -+ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` -+ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` -+ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ ;; -+ *) -+ sys_lib_search_path_spec="$LIB" -+ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then -+ # It is most probably a Windows format PATH. -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` -+ else -+ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` -+ fi -+ # FIXME: find the short name or the path components, as spaces are -+ # common. (e.g. "Program Files" -> "PROGRA~1") -+ ;; -+ esac -+ -+ # DLL is installed to $(libdir)/../bin by postinstall_cmds -+ postinstall_cmds='base_file=`basename \${file}`~ -+ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ -+ dldir=$destdir/`dirname \$dlpath`~ -+ test -d \$dldir || mkdir -p \$dldir~ -+ $install_prog $dir/$dlname \$dldir/$dlname' -+ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ -+ dlpath=$dir/\$dldll~ -+ $RM \$dlpath' -+ shlibpath_overrides_runpath=yes -+ dynamic_linker='Win32 link.exe' - ;; - - *) -+ # Assume MSVC wrapper - library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' -+ dynamic_linker='Win32 ld.exe' - ;; - esac -- dynamic_linker='Win32 ld.exe' - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; -@@ -10643,7 +11298,7 @@ haiku*) - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=yes -- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' -+ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -@@ -11439,7 +12094,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11442 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11483,10 +12138,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -11545,7 +12200,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11548 "configure" -+#line $LINENO "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -11589,10 +12244,10 @@ else - /* When -fvisbility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ - #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) --void fnord () __attribute__((visibility("default"))); -+int fnord () __attribute__((visibility("default"))); - #endif - --void fnord () { int i=42; } -+int fnord () { return 42; } - int main () - { - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); -@@ -13756,13 +14411,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' - lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' - lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' - lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -+lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' - reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' - reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' - OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' - deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' - file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -+file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -+want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -+DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -+sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' - AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' - AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -+archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' - STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' - RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' - old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -@@ -13777,14 +14439,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de - lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -+nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -+lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' - objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' - MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' --lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -+lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' - lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' - lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' - need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -+MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' - DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' - NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' - LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -@@ -13817,12 +14482,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q - hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' - inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' - link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' --fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' - always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' - export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' - exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' - include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' - prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -+postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' - file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' - variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' - need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -@@ -13877,8 +14542,13 @@ reload_flag \ - OBJDUMP \ - deplibs_check_method \ - file_magic_cmd \ -+file_magic_glob \ -+want_nocaseglob \ -+DLLTOOL \ -+sharedlib_from_linklib_cmd \ - AR \ - AR_FLAGS \ -+archiver_list_spec \ - STRIP \ - RANLIB \ - CC \ -@@ -13888,12 +14558,14 @@ lt_cv_sys_global_symbol_pipe \ - lt_cv_sys_global_symbol_to_cdecl \ - lt_cv_sys_global_symbol_to_c_name_address \ - lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -+nm_file_list_spec \ - lt_prog_compiler_no_builtin_flag \ --lt_prog_compiler_wl \ - lt_prog_compiler_pic \ -+lt_prog_compiler_wl \ - lt_prog_compiler_static \ - lt_cv_prog_compiler_c_o \ - need_locks \ -+MANIFEST_TOOL \ - DSYMUTIL \ - NMEDIT \ - LIPO \ -@@ -13909,7 +14581,6 @@ no_undefined_flag \ - hardcode_libdir_flag_spec \ - hardcode_libdir_flag_spec_ld \ - hardcode_libdir_separator \ --fix_srcfile_path \ - exclude_expsyms \ - include_expsyms \ - file_list_spec \ -@@ -13945,6 +14616,7 @@ module_cmds \ - module_expsym_cmds \ - export_symbols_cmds \ - prelink_cmds \ -+postlink_cmds \ - postinstall_cmds \ - postuninstall_cmds \ - finish_cmds \ -@@ -14710,7 +15382,8 @@ $as_echo X"$file" | - # NOTE: Changes made to this file will be lost: look at ltmain.sh. - # - # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, --# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, -+# Inc. - # Written by Gordon Matzigkeit, 1996 - # - # This file is part of GNU Libtool. -@@ -14813,19 +15486,42 @@ SP2NL=$lt_lt_SP2NL - # turn newlines into spaces. - NL2SP=$lt_lt_NL2SP - -+# convert \$build file names to \$host format. -+to_host_file_cmd=$lt_cv_to_host_file_cmd -+ -+# convert \$build files to toolchain format. -+to_tool_file_cmd=$lt_cv_to_tool_file_cmd -+ - # An object symbol dumper. - OBJDUMP=$lt_OBJDUMP - - # Method to check whether dependent libraries are shared objects. - deplibs_check_method=$lt_deplibs_check_method - --# Command to use when deplibs_check_method == "file_magic". -+# Command to use when deplibs_check_method = "file_magic". - file_magic_cmd=$lt_file_magic_cmd - -+# How to find potential files when deplibs_check_method = "file_magic". -+file_magic_glob=$lt_file_magic_glob -+ -+# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -+want_nocaseglob=$lt_want_nocaseglob -+ -+# DLL creation program. -+DLLTOOL=$lt_DLLTOOL -+ -+# Command to associate shared and link libraries. -+sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd -+ - # The archiver. - AR=$lt_AR -+ -+# Flags to create an archive. - AR_FLAGS=$lt_AR_FLAGS - -+# How to feed a file listing to the archiver. -+archiver_list_spec=$lt_archiver_list_spec -+ - # A symbol stripping program. - STRIP=$lt_STRIP - -@@ -14855,6 +15551,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - # Transform the output of nm in a C name address pair when lib prefix is needed. - global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -+# Specify filename containing input files for \$NM. -+nm_file_list_spec=$lt_nm_file_list_spec -+ -+# The root where to search for dependent libraries,and in which our libraries should be installed. -+lt_sysroot=$lt_sysroot -+ - # The name of the directory that contains temporary libtool files. - objdir=$objdir - -@@ -14864,6 +15566,9 @@ MAGIC_CMD=$MAGIC_CMD - # Must we lock files when doing compilation? - need_locks=$lt_need_locks - -+# Manifest tool. -+MANIFEST_TOOL=$lt_MANIFEST_TOOL -+ - # Tool to manipulate archived DWARF debug symbol files on Mac OS X. - DSYMUTIL=$lt_DSYMUTIL - -@@ -14978,12 +15683,12 @@ with_gcc=$GCC - # Compiler flag to turn off builtin functions. - no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - --# How to pass a linker flag through the compiler. --wl=$lt_lt_prog_compiler_wl -- - # Additional compiler flags for building library objects. - pic_flag=$lt_lt_prog_compiler_pic - -+# How to pass a linker flag through the compiler. -+wl=$lt_lt_prog_compiler_wl -+ - # Compiler flag to prevent dynamic linking. - link_static_flag=$lt_lt_prog_compiler_static - -@@ -15070,9 +15775,6 @@ inherit_rpath=$inherit_rpath - # Whether libtool must link a program against all its dependency libraries. - link_all_deplibs=$link_all_deplibs - --# Fix the shell variable \$srcfile for the compiler. --fix_srcfile_path=$lt_fix_srcfile_path -- - # Set to "yes" if exported symbols are required. - always_export_symbols=$always_export_symbols - -@@ -15088,6 +15790,9 @@ include_expsyms=$lt_include_expsyms - # Commands necessary for linking programs (against libraries) with templates. - prelink_cmds=$lt_prelink_cmds - -+# Commands necessary for finishing linking programs. -+postlink_cmds=$lt_postlink_cmds -+ - # Specify filename containing input files. - file_list_spec=$lt_file_list_spec - -@@ -15120,210 +15825,169 @@ ltmain="$ac_aux_dir/ltmain.sh" - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? -- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- case $xsi_shell in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac --} -- --# func_basename file --func_basename () --{ -- func_basename_result="${1##*/}" --} -- --# func_dirname_and_basename file append nondir_replacement --# perform func_basename and func_dirname in a single function --# call: --# dirname: Compute the dirname of FILE. If nonempty, --# add APPEND to the result, otherwise set result --# to NONDIR_REPLACEMENT. --# value returned in "$func_dirname_result" --# basename: Compute filename of FILE. --# value retuned in "$func_basename_result" --# Implementation must be kept synchronized with func_dirname --# and func_basename. For efficiency, we do not delegate to --# those functions but instead duplicate the functionality here. --func_dirname_and_basename () --{ -- case ${1} in -- */*) func_dirname_result="${1%/*}${2}" ;; -- * ) func_dirname_result="${3}" ;; -- esac -- func_basename_result="${1##*/}" --} -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --func_stripname () --{ -- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are -- # positional parameters, so assign one to ordinary parameter first. -- func_stripname_result=${3} -- func_stripname_result=${func_stripname_result#"${1}"} -- func_stripname_result=${func_stripname_result%"${2}"} --} -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=${1%%=*} -- func_opt_split_arg=${1#*=} --} -- --# func_lo2o object --func_lo2o () --{ -- case ${1} in -- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; -- *) func_lo2o_result=${1} ;; -- esac --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=${1%.*}.lo --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=$(( $* )) --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=${#1} --} -- --_LT_EOF -- ;; -- *) # Bourne compatible functions. -- cat << \_LT_EOF >> "$cfgfile" -- --# func_dirname file append nondir_replacement --# Compute the dirname of FILE. If nonempty, add APPEND to the result, --# otherwise set result to NONDIR_REPLACEMENT. --func_dirname () --{ -- # Extract subdirectory from the argument. -- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` -- if test "X$func_dirname_result" = "X${1}"; then -- func_dirname_result="${3}" -- else -- func_dirname_result="$func_dirname_result${2}" -- fi --} -- --# func_basename file --func_basename () --{ -- func_basename_result=`$ECHO "${1}" | $SED "$basename"` --} -- -- --# func_stripname prefix suffix name --# strip PREFIX and SUFFIX off of NAME. --# PREFIX and SUFFIX must not contain globbing or regex special --# characters, hashes, percent signs, but SUFFIX may contain a leading --# dot (in which case that matches only a dot). --# func_strip_suffix prefix name --func_stripname () --{ -- case ${2} in -- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; -- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; -- esac --} -- --# sed scripts: --my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' --my_sed_long_arg='1s/^-[^=]*=//' -- --# func_opt_split --func_opt_split () --{ -- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` -- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` --} -- --# func_lo2o object --func_lo2o () --{ -- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` --} -- --# func_xform libobj-or-source --func_xform () --{ -- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` --} -- --# func_arith arithmetic-term... --func_arith () --{ -- func_arith_result=`expr "$@"` --} -- --# func_len string --# STRING may not start with a hyphen. --func_len () --{ -- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` --} -- --_LT_EOF --esac -- --case $lt_shell_append in -- yes) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1+=\$2" --} --_LT_EOF -- ;; -- *) -- cat << \_LT_EOF >> "$cfgfile" -- --# func_append var value --# Append VALUE to the end of shell variable VAR. --func_append () --{ -- eval "$1=\$$1\$2" --} -- --_LT_EOF -- ;; -- esac -- -- -- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ -- || (rm -f "$cfgfile"; exit 1) -- -- mv -f "$cfgfile" "$ofile" || -+ sed '$q' "$ltmain" >> "$cfgfile" \ -+ || (rm -f "$cfgfile"; exit 1) -+ -+ if test x"$xsi_shell" = xyes; then -+ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ -+func_dirname ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_basename ()$/,/^} # func_basename /c\ -+func_basename ()\ -+{\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ -+func_dirname_and_basename ()\ -+{\ -+\ case ${1} in\ -+\ */*) func_dirname_result="${1%/*}${2}" ;;\ -+\ * ) func_dirname_result="${3}" ;;\ -+\ esac\ -+\ func_basename_result="${1##*/}"\ -+} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ -+func_stripname ()\ -+{\ -+\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ -+\ # positional parameters, so assign one to ordinary parameter first.\ -+\ func_stripname_result=${3}\ -+\ func_stripname_result=${func_stripname_result#"${1}"}\ -+\ func_stripname_result=${func_stripname_result%"${2}"}\ -+} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ -+func_split_long_opt ()\ -+{\ -+\ func_split_long_opt_name=${1%%=*}\ -+\ func_split_long_opt_arg=${1#*=}\ -+} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ -+func_split_short_opt ()\ -+{\ -+\ func_split_short_opt_arg=${1#??}\ -+\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ -+} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ -+func_lo2o ()\ -+{\ -+\ case ${1} in\ -+\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ -+\ *) func_lo2o_result=${1} ;;\ -+\ esac\ -+} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_xform ()$/,/^} # func_xform /c\ -+func_xform ()\ -+{\ -+ func_xform_result=${1%.*}.lo\ -+} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_arith ()$/,/^} # func_arith /c\ -+func_arith ()\ -+{\ -+ func_arith_result=$(( $* ))\ -+} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_len ()$/,/^} # func_len /c\ -+func_len ()\ -+{\ -+ func_len_result=${#1}\ -+} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+fi -+ -+if test x"$lt_shell_append" = xyes; then -+ sed -e '/^func_append ()$/,/^} # func_append /c\ -+func_append ()\ -+{\ -+ eval "${1}+=\\${2}"\ -+} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ -+func_append_quoted ()\ -+{\ -+\ func_quote_for_eval "${2}"\ -+\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ -+} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+test 0 -eq $? || _lt_function_replace_fail=: -+ -+ -+ # Save a `func_append' function call where possible by direct use of '+=' -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+else -+ # Save a `func_append' function call even when '+=' is not available -+ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ -+ && mv -f "$cfgfile.tmp" "$cfgfile" \ -+ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") -+ test 0 -eq $? || _lt_function_replace_fail=: -+fi -+ -+if test x"$_lt_function_replace_fail" = x":"; then -+ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 -+$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} -+fi -+ -+ -+ mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - --- -2.26.0 - diff --git a/poky/meta/recipes-devtools/binutils/binutils/0007-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch b/poky/meta/recipes-devtools/binutils/binutils/0007-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch new file mode 100644 index 000000000..7f8fc9a5c --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0007-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch @@ -0,0 +1,35 @@ +From 2b81508b9af76db292cd756432b03035cb8157e0 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 2 Mar 2015 01:39:01 +0000 +Subject: [PATCH 07/17] don't let the distro compiler point to the wrong + installation location + +Thanks to RP for helping find the source code causing the issue. + +2010/08/13 +Nitin A Kamble + +Upstream-Status: Inappropriate [embedded specific] + +Signed-off-by: Khem Raj +--- + libiberty/Makefile.in | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/libiberty/Makefile.in b/libiberty/Makefile.in +index 895f701bcd0..97a53f15bb6 100644 +--- a/libiberty/Makefile.in ++++ b/libiberty/Makefile.in +@@ -367,7 +367,8 @@ install-strip: install + # multilib-specific flags, it's overridden by FLAGS_TO_PASS from the + # default multilib, so we have to take CFLAGS into account as well, + # since it will be passed the multilib flags. +-MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory` ++#MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory` ++MULTIOSDIR = "" + install_to_libdir: all + if test -n "${target_header_dir}"; then \ + ${mkinstalldirs} $(DESTDIR)$(libdir)/$(MULTIOSDIR); \ +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0008-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch b/poky/meta/recipes-devtools/binutils/binutils/0008-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch deleted file mode 100644 index b0e94ab1c..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0008-don-t-let-the-distro-compiler-point-to-the-wrong-ins.patch +++ /dev/null @@ -1,32 +0,0 @@ -From e36a4e05f900bbe6a8d744a93f3a407bc55c96f7 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 2 Mar 2015 01:39:01 +0000 -Subject: [PATCH] don't let the distro compiler point to the wrong installation - location - -Thanks to RP for helping find the source code causing the issue. - -2010/08/13 -Nitin A Kamble - -Upstream-Status: Inappropriate [embedded specific] - -Signed-off-by: Khem Raj ---- - libiberty/Makefile.in | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/libiberty/Makefile.in b/libiberty/Makefile.in -index fe738d0db4..27d818f253 100644 ---- a/libiberty/Makefile.in -+++ b/libiberty/Makefile.in -@@ -365,7 +365,8 @@ install-strip: install - # multilib-specific flags, it's overridden by FLAGS_TO_PASS from the - # default multilib, so we have to take CFLAGS into account as well, - # since it will be passed the multilib flags. --MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory` -+#MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory` -+MULTIOSDIR = "" - install_to_libdir: all - if test -n "${target_header_dir}"; then \ - ${mkinstalldirs} $(DESTDIR)$(libdir)/$(MULTIOSDIR); \ diff --git a/poky/meta/recipes-devtools/binutils/binutils/0008-warn-for-uses-of-system-directories-when-cross-linki.patch b/poky/meta/recipes-devtools/binutils/binutils/0008-warn-for-uses-of-system-directories-when-cross-linki.patch new file mode 100644 index 000000000..2356c6a60 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0008-warn-for-uses-of-system-directories-when-cross-linki.patch @@ -0,0 +1,287 @@ +From 413075afbdb16e7cc05511682ca9e3c880acb5a7 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 15 Jan 2016 06:31:09 +0000 +Subject: [PATCH 08/17] warn for uses of system directories when cross linking + +2008-07-02 Joseph Myers + + ld/ + * ld.h (args_type): Add error_poison_system_directories. + * ld.texinfo (--error-poison-system-directories): Document. + * ldfile.c (ldfile_add_library_path): Check + command_line.error_poison_system_directories. + * ldmain.c (main): Initialize + command_line.error_poison_system_directories. + * lexsup.c (enum option_values): Add + OPTION_ERROR_POISON_SYSTEM_DIRECTORIES. + (ld_options): Add --error-poison-system-directories. + (parse_args): Handle new option. + +2007-06-13 Joseph Myers + + ld/ + * config.in: Regenerate. + * ld.h (args_type): Add poison_system_directories. + * ld.texinfo (--no-poison-system-directories): Document. + * ldfile.c (ldfile_add_library_path): Check + command_line.poison_system_directories. + * ldmain.c (main): Initialize + command_line.poison_system_directories. + * lexsup.c (enum option_values): Add + OPTION_NO_POISON_SYSTEM_DIRECTORIES. + (ld_options): Add --no-poison-system-directories. + (parse_args): Handle new option. + +2007-04-20 Joseph Myers + + Merge from Sourcery G++ binutils 2.17: + + 2007-03-20 Joseph Myers + Based on patch by Mark Hatle . + ld/ + * configure.in (--enable-poison-system-directories): New option. + * configure, config.in: Regenerate. + * ldfile.c (ldfile_add_library_path): If + ENABLE_POISON_SYSTEM_DIRECTORIES defined, warn for use of /lib, + /usr/lib, /usr/local/lib or /usr/X11R6/lib. + +Upstream-Status: Pending + +Signed-off-by: Mark Hatle +Signed-off-by: Scott Garman +Signed-off-by: Khem Raj +--- + ld/config.in | 3 +++ + ld/configure | 16 ++++++++++++++++ + ld/configure.ac | 10 ++++++++++ + ld/ld.h | 8 ++++++++ + ld/ld.texi | 12 ++++++++++++ + ld/ldfile.c | 17 +++++++++++++++++ + ld/ldlex.h | 5 +++++ + ld/ldmain.c | 2 ++ + ld/lexsup.c | 27 +++++++++++++++++++++++++++ + 9 files changed, 100 insertions(+) + +diff --git a/ld/config.in b/ld/config.in +index f1712107367..308e0173c16 100644 +--- a/ld/config.in ++++ b/ld/config.in +@@ -40,6 +40,9 @@ + language is requested. */ + #undef ENABLE_NLS + ++/* Define to warn for use of native system library directories */ ++#undef ENABLE_POISON_SYSTEM_DIRECTORIES ++ + /* Additional extension a shared object might have. */ + #undef EXTRA_SHLIB_EXTENSION + +diff --git a/ld/configure b/ld/configure +index f08ce9969ea..1c872c0db5f 100755 +--- a/ld/configure ++++ b/ld/configure +@@ -826,6 +826,7 @@ with_lib_path + enable_targets + enable_64_bit_bfd + with_sysroot ++enable_poison_system_directories + enable_gold + enable_got + enable_compressed_debug_sections +@@ -1493,6 +1494,8 @@ Optional Features: + --disable-largefile omit support for large files + --enable-targets alternative target configurations + --enable-64-bit-bfd 64-bit support (on hosts with narrower word sizes) ++ --enable-poison-system-directories ++ warn for use of native system library directories + --enable-gold[=ARG] build gold [ARG={default,yes,no}] + --enable-got= GOT handling scheme (target, single, negative, + multigot) +@@ -15793,6 +15796,19 @@ fi + + + ++# Check whether --enable-poison-system-directories was given. ++if test "${enable_poison_system_directories+set}" = set; then : ++ enableval=$enable_poison_system_directories; ++else ++ enable_poison_system_directories=no ++fi ++ ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ ++$as_echo "#define ENABLE_POISON_SYSTEM_DIRECTORIES 1" >>confdefs.h ++ ++fi ++ + # Check whether --enable-gold was given. + if test "${enable_gold+set}" = set; then : + enableval=$enable_gold; case "${enableval}" in +diff --git a/ld/configure.ac b/ld/configure.ac +index 5a4938afdb0..dbbbde74b04 100644 +--- a/ld/configure.ac ++++ b/ld/configure.ac +@@ -94,6 +94,16 @@ AC_SUBST(use_sysroot) + AC_SUBST(TARGET_SYSTEM_ROOT) + AC_SUBST(TARGET_SYSTEM_ROOT_DEFINE) + ++AC_ARG_ENABLE([poison-system-directories], ++ AS_HELP_STRING([--enable-poison-system-directories], ++ [warn for use of native system library directories]),, ++ [enable_poison_system_directories=no]) ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES], ++ [1], ++ [Define to warn for use of native system library directories]) ++fi ++ + dnl Use --enable-gold to decide if this linker should be the default. + dnl "install_as_default" is set to false if gold is the default linker. + dnl "installed_linker" is the installed BFD linker name. +diff --git a/ld/ld.h b/ld/ld.h +index 1790dc81a66..73f832eb169 100644 +--- a/ld/ld.h ++++ b/ld/ld.h +@@ -166,6 +166,14 @@ typedef struct + in the linker script. */ + bfd_boolean force_group_allocation; + ++ /* If TRUE (the default) warn for uses of system directories when ++ cross linking. */ ++ bfd_boolean poison_system_directories; ++ ++ /* If TRUE (default FALSE) give an error for uses of system ++ directories when cross linking instead of a warning. */ ++ bfd_boolean error_poison_system_directories; ++ + /* Big or little endian as set on command line. */ + enum endian_enum endian; + +diff --git a/ld/ld.texi b/ld/ld.texi +index 2a93e9456ac..3eeb70607fd 100644 +--- a/ld/ld.texi ++++ b/ld/ld.texi +@@ -2655,6 +2655,18 @@ string identifying the original linked file does not change. + + Passing @code{none} for @var{style} disables the setting from any + @code{--build-id} options earlier on the command line. ++ ++@kindex --no-poison-system-directories ++@item --no-poison-system-directories ++Do not warn for @option{-L} options using system directories such as ++@file{/usr/lib} when cross linking. This option is intended for use ++in chroot environments when such directories contain the correct ++libraries for the target system rather than the host. ++ ++@kindex --error-poison-system-directories ++@item --error-poison-system-directories ++Give an error instead of a warning for @option{-L} options using ++system directories when cross linking. + @end table + + @c man end +diff --git a/ld/ldfile.c b/ld/ldfile.c +index e39170b5d94..fadc248a140 100644 +--- a/ld/ldfile.c ++++ b/ld/ldfile.c +@@ -117,6 +117,23 @@ ldfile_add_library_path (const char *name, bfd_boolean cmdline) + new_dirs->name = concat (ld_sysroot, name + strlen ("$SYSROOT"), (const char *) NULL); + else + new_dirs->name = xstrdup (name); ++ ++#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES ++ if (command_line.poison_system_directories ++ && ((!strncmp (name, "/lib", 4)) ++ || (!strncmp (name, "/usr/lib", 8)) ++ || (!strncmp (name, "/usr/local/lib", 14)) ++ || (!strncmp (name, "/usr/X11R6/lib", 14)))) ++ { ++ if (command_line.error_poison_system_directories) ++ einfo (_("%X%P: error: library search path \"%s\" is unsafe for " ++ "cross-compilation\n"), name); ++ else ++ einfo (_("%P: warning: library search path \"%s\" is unsafe for " ++ "cross-compilation\n"), name); ++ } ++#endif ++ + } + + /* Try to open a BFD for a lang_input_statement. */ +diff --git a/ld/ldlex.h b/ld/ldlex.h +index 5ea083ebeb3..941dc5f3dc8 100644 +--- a/ld/ldlex.h ++++ b/ld/ldlex.h +@@ -155,6 +155,11 @@ enum option_values + OPTION_NON_CONTIGUOUS_REGIONS, + OPTION_NON_CONTIGUOUS_REGIONS_WARNINGS, + OPTION_DEPENDENCY_FILE, ++ OPTION_CTF_VARIABLES, ++ OPTION_NO_CTF_VARIABLES, ++ OPTION_CTF_SHARE_TYPES, ++ OPTION_NO_POISON_SYSTEM_DIRECTORIES, ++ OPTION_ERROR_POISON_SYSTEM_DIRECTORIES, + }; + + /* The initial parser states. */ +diff --git a/ld/ldmain.c b/ld/ldmain.c +index f5c5a336320..516ba0360e2 100644 +--- a/ld/ldmain.c ++++ b/ld/ldmain.c +@@ -322,6 +322,8 @@ main (int argc, char **argv) + command_line.warn_mismatch = TRUE; + command_line.warn_search_mismatch = TRUE; + command_line.check_section_addresses = -1; ++ command_line.poison_system_directories = TRUE; ++ command_line.error_poison_system_directories = FALSE; + + /* We initialize DEMANGLING based on the environment variable + COLLECT_NO_DEMANGLE. The gcc collect2 program will demangle the +diff --git a/ld/lexsup.c b/ld/lexsup.c +index 58c6c078325..879e7bb7658 100644 +--- a/ld/lexsup.c ++++ b/ld/lexsup.c +@@ -572,6 +572,26 @@ static const struct ld_option ld_options[] = + { {"no-print-map-discarded", no_argument, NULL, OPTION_NO_PRINT_MAP_DISCARDED}, + '\0', NULL, N_("Do not show discarded sections in map file output"), + TWO_DASHES }, ++ { {"ctf-variables", no_argument, NULL, OPTION_CTF_VARIABLES}, ++ '\0', NULL, N_("Emit names and types of static variables in CTF"), ++ TWO_DASHES }, ++ { {"no-ctf-variables", no_argument, NULL, OPTION_NO_CTF_VARIABLES}, ++ '\0', NULL, N_("Do not emit names and types of static variables in CTF"), ++ TWO_DASHES }, ++ { {"ctf-share-types=", required_argument, NULL, ++ OPTION_CTF_SHARE_TYPES}, ++ '\0', NULL, N_("How to share CTF types between translation units.\n" ++ " is: share-unconflicted (default),\n" ++ " share-duplicated"), ++ TWO_DASHES }, ++ { {"no-poison-system-directories", no_argument, NULL, ++ OPTION_NO_POISON_SYSTEM_DIRECTORIES}, ++ '\0', NULL, N_("Do not warn for -L options using system directories"), ++ TWO_DASHES }, ++ { {"error-poison-system-directories", no_argument, NULL, ++ + OPTION_ERROR_POISON_SYSTEM_DIRECTORIES}, ++ '\0', NULL, N_("Give an error for -L options using system directories"), ++ TWO_DASHES }, + }; + + #define OPTION_COUNT ARRAY_SIZE (ld_options) +@@ -1632,6 +1652,13 @@ parse_args (unsigned argc, char **argv) + + case OPTION_PRINT_MAP_DISCARDED: + config.print_map_discarded = TRUE; ++ ++ case OPTION_NO_POISON_SYSTEM_DIRECTORIES: ++ command_line.poison_system_directories = FALSE; ++ break; ++ ++ case OPTION_ERROR_POISON_SYSTEM_DIRECTORIES: ++ command_line.error_poison_system_directories = TRUE; + break; + + case OPTION_DEPENDENCY_FILE: +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0009-Change-default-emulation-for-mips64-linux.patch b/poky/meta/recipes-devtools/binutils/binutils/0009-Change-default-emulation-for-mips64-linux.patch new file mode 100644 index 000000000..fb5276e18 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0009-Change-default-emulation-for-mips64-linux.patch @@ -0,0 +1,60 @@ +From 13a67e9040c01abd284fe506471e0eab668ee3dc Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 2 Mar 2015 01:44:14 +0000 +Subject: [PATCH 09/17] Change default emulation for mips64*-*-linux + +we change the default emulations to be N64 instead of N32 + +Upstream-Status: Inappropriate [ OE configuration Specific] + +Signed-off-by: Khem Raj +--- + bfd/config.bfd | 8 ++++---- + ld/configure.tgt | 8 ++++---- + 2 files changed, 8 insertions(+), 8 deletions(-) + +diff --git a/bfd/config.bfd b/bfd/config.bfd +index 14523caf0c5..e5233cd1f7e 100644 +--- a/bfd/config.bfd ++++ b/bfd/config.bfd +@@ -894,12 +894,12 @@ case "${targ}" in + targ_selvecs="mips_elf32_le_vec mips_elf64_be_vec mips_elf64_le_vec mips_ecoff_be_vec mips_ecoff_le_vec" + ;; + mips64*el-*-linux*) +- targ_defvec=mips_elf32_ntrad_le_vec +- targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_trad_le_vec mips_elf32_trad_be_vec mips_elf64_trad_le_vec mips_elf64_trad_be_vec" ++ targ_defvec=mips_elf64_trad_le_vec ++ targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_ntrad_le_vec mips_elf32_trad_le_vec mips_elf32_trad_be_vec mips_elf64_trad_be_vec" + ;; + mips64*-*-linux*) +- targ_defvec=mips_elf32_ntrad_be_vec +- targ_selvecs="mips_elf32_ntrad_le_vec mips_elf32_trad_be_vec mips_elf32_trad_le_vec mips_elf64_trad_be_vec mips_elf64_trad_le_vec" ++ targ_defvec=mips_elf64_trad_be_vec ++ targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_ntrad_be_vec mips_elf32_trad_be_vec mips_elf32_trad_le_vec mips_elf64_trad_le_vec" + ;; + mips*el-*-linux*) + targ_defvec=mips_elf32_trad_le_vec +diff --git a/ld/configure.tgt b/ld/configure.tgt +index 87c7d9a4cad..9b4bf2ca964 100644 +--- a/ld/configure.tgt ++++ b/ld/configure.tgt +@@ -531,12 +531,12 @@ mips*-*-vxworks*) targ_emul=elf32ebmipvxworks + ;; + mips*-*-windiss) targ_emul=elf32mipswindiss + ;; +-mips64*el-*-linux-*) targ_emul=elf32ltsmipn32 +- targ_extra_emuls="elf32btsmipn32 elf32ltsmip elf32btsmip elf64ltsmip elf64btsmip" ++mips64*el-*-linux-*) targ_emul=elf64ltsmip ++ targ_extra_emuls="elf32btsmipn32 elf32ltsmipn32 elf32ltsmip elf32btsmip elf64btsmip" + targ_extra_libpath=$targ_extra_emuls + ;; +-mips64*-*-linux-*) targ_emul=elf32btsmipn32 +- targ_extra_emuls="elf32ltsmipn32 elf32btsmip elf32ltsmip elf64btsmip elf64ltsmip" ++mips64*-*-linux-*) targ_emul=elf64btsmip ++ targ_extra_emuls="elf32btsmipn32 elf32ltsmipn32 elf32btsmip elf32ltsmip elf64ltsmip" + targ_extra_libpath=$targ_extra_emuls + ;; + mips*el-*-linux-*) targ_emul=elf32ltsmip +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch b/poky/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch deleted file mode 100644 index 11a8110d4..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0009-warn-for-uses-of-system-directories-when-cross-linki.patch +++ /dev/null @@ -1,269 +0,0 @@ -From 7b24f81e04c9d00d96de7dbd250beade6d2c6e44 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 15 Jan 2016 06:31:09 +0000 -Subject: [PATCH] warn for uses of system directories when cross linking - -2008-07-02 Joseph Myers - - ld/ - * ld.h (args_type): Add error_poison_system_directories. - * ld.texinfo (--error-poison-system-directories): Document. - * ldfile.c (ldfile_add_library_path): Check - command_line.error_poison_system_directories. - * ldmain.c (main): Initialize - command_line.error_poison_system_directories. - * lexsup.c (enum option_values): Add - OPTION_ERROR_POISON_SYSTEM_DIRECTORIES. - (ld_options): Add --error-poison-system-directories. - (parse_args): Handle new option. - -2007-06-13 Joseph Myers - - ld/ - * config.in: Regenerate. - * ld.h (args_type): Add poison_system_directories. - * ld.texinfo (--no-poison-system-directories): Document. - * ldfile.c (ldfile_add_library_path): Check - command_line.poison_system_directories. - * ldmain.c (main): Initialize - command_line.poison_system_directories. - * lexsup.c (enum option_values): Add - OPTION_NO_POISON_SYSTEM_DIRECTORIES. - (ld_options): Add --no-poison-system-directories. - (parse_args): Handle new option. - -2007-04-20 Joseph Myers - - Merge from Sourcery G++ binutils 2.17: - - 2007-03-20 Joseph Myers - Based on patch by Mark Hatle . - ld/ - * configure.in (--enable-poison-system-directories): New option. - * configure, config.in: Regenerate. - * ldfile.c (ldfile_add_library_path): If - ENABLE_POISON_SYSTEM_DIRECTORIES defined, warn for use of /lib, - /usr/lib, /usr/local/lib or /usr/X11R6/lib. - -Upstream-Status: Pending - -Signed-off-by: Mark Hatle -Signed-off-by: Scott Garman -Signed-off-by: Khem Raj ---- - ld/config.in | 3 +++ - ld/configure | 16 ++++++++++++++++ - ld/configure.ac | 10 ++++++++++ - ld/ld.h | 8 ++++++++ - ld/ld.texi | 12 ++++++++++++ - ld/ldfile.c | 17 +++++++++++++++++ - ld/ldlex.h | 2 ++ - ld/ldmain.c | 2 ++ - ld/lexsup.c | 15 +++++++++++++++ - 9 files changed, 85 insertions(+) - -diff --git a/ld/config.in b/ld/config.in -index d93c9b0830..5da2742bea 100644 ---- a/ld/config.in -+++ b/ld/config.in -@@ -31,6 +31,9 @@ - language is requested. */ - #undef ENABLE_NLS - -+/* Define to warn for use of native system library directories */ -+#undef ENABLE_POISON_SYSTEM_DIRECTORIES -+ - /* Additional extension a shared object might have. */ - #undef EXTRA_SHLIB_EXTENSION - -diff --git a/ld/configure b/ld/configure -index 811134a503..f8c17c19ae 100755 ---- a/ld/configure -+++ b/ld/configure -@@ -826,6 +826,7 @@ with_lib_path - enable_targets - enable_64_bit_bfd - with_sysroot -+enable_poison_system_directories - enable_gold - enable_got - enable_compressed_debug_sections -@@ -1491,6 +1492,8 @@ Optional Features: - --disable-largefile omit support for large files - --enable-targets alternative target configurations - --enable-64-bit-bfd 64-bit support (on hosts with narrower word sizes) -+ --enable-poison-system-directories -+ warn for use of native system library directories - --enable-gold[=ARG] build gold [ARG={default,yes,no}] - --enable-got= GOT handling scheme (target, single, negative, - multigot) -@@ -15788,6 +15791,19 @@ fi - - - -+# Check whether --enable-poison-system-directories was given. -+if test "${enable_poison_system_directories+set}" = set; then : -+ enableval=$enable_poison_system_directories; -+else -+ enable_poison_system_directories=no -+fi -+ -+if test "x${enable_poison_system_directories}" = "xyes"; then -+ -+$as_echo "#define ENABLE_POISON_SYSTEM_DIRECTORIES 1" >>confdefs.h -+ -+fi -+ - # Check whether --enable-gold was given. - if test "${enable_gold+set}" = set; then : - enableval=$enable_gold; case "${enableval}" in -diff --git a/ld/configure.ac b/ld/configure.ac -index b5e849d84a..22e022ec03 100644 ---- a/ld/configure.ac -+++ b/ld/configure.ac -@@ -94,6 +94,16 @@ AC_SUBST(use_sysroot) - AC_SUBST(TARGET_SYSTEM_ROOT) - AC_SUBST(TARGET_SYSTEM_ROOT_DEFINE) - -+AC_ARG_ENABLE([poison-system-directories], -+ AS_HELP_STRING([--enable-poison-system-directories], -+ [warn for use of native system library directories]),, -+ [enable_poison_system_directories=no]) -+if test "x${enable_poison_system_directories}" = "xyes"; then -+ AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES], -+ [1], -+ [Define to warn for use of native system library directories]) -+fi -+ - dnl Use --enable-gold to decide if this linker should be the default. - dnl "install_as_default" is set to false if gold is the default linker. - dnl "installed_linker" is the installed BFD linker name. -diff --git a/ld/ld.h b/ld/ld.h -index 71fd781267..5c7843100b 100644 ---- a/ld/ld.h -+++ b/ld/ld.h -@@ -166,6 +166,14 @@ typedef struct - in the linker script. */ - bfd_boolean force_group_allocation; - -+ /* If TRUE (the default) warn for uses of system directories when -+ cross linking. */ -+ bfd_boolean poison_system_directories; -+ -+ /* If TRUE (default FALSE) give an error for uses of system -+ directories when cross linking instead of a warning. */ -+ bfd_boolean error_poison_system_directories; -+ - /* Big or little endian as set on command line. */ - enum endian_enum endian; - -diff --git a/ld/ld.texi b/ld/ld.texi -index eb7bcb9933..3c73d445a0 100644 ---- a/ld/ld.texi -+++ b/ld/ld.texi -@@ -2551,6 +2551,18 @@ string identifying the original linked file does not change. - - Passing @code{none} for @var{style} disables the setting from any - @code{--build-id} options earlier on the command line. -+ -+@kindex --no-poison-system-directories -+@item --no-poison-system-directories -+Do not warn for @option{-L} options using system directories such as -+@file{/usr/lib} when cross linking. This option is intended for use -+in chroot environments when such directories contain the correct -+libraries for the target system rather than the host. -+ -+@kindex --error-poison-system-directories -+@item --error-poison-system-directories -+Give an error instead of a warning for @option{-L} options using -+system directories when cross linking. - @end table - - @c man end -diff --git a/ld/ldfile.c b/ld/ldfile.c -index 411f7ddf97..17db16c2cc 100644 ---- a/ld/ldfile.c -+++ b/ld/ldfile.c -@@ -117,6 +117,23 @@ ldfile_add_library_path (const char *name, bfd_boolean cmdline) - new_dirs->name = concat (ld_sysroot, name + strlen ("$SYSROOT"), (const char *) NULL); - else - new_dirs->name = xstrdup (name); -+ -+#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES -+ if (command_line.poison_system_directories -+ && ((!strncmp (name, "/lib", 4)) -+ || (!strncmp (name, "/usr/lib", 8)) -+ || (!strncmp (name, "/usr/local/lib", 14)) -+ || (!strncmp (name, "/usr/X11R6/lib", 14)))) -+ { -+ if (command_line.error_poison_system_directories) -+ einfo (_("%X%P: error: library search path \"%s\" is unsafe for " -+ "cross-compilation\n"), name); -+ else -+ einfo (_("%P: warning: library search path \"%s\" is unsafe for " -+ "cross-compilation\n"), name); -+ } -+#endif -+ - } - - /* Try to open a BFD for a lang_input_statement. */ -diff --git a/ld/ldlex.h b/ld/ldlex.h -index 5287f19a7f..55096e4fc9 100644 ---- a/ld/ldlex.h -+++ b/ld/ldlex.h -@@ -150,6 +150,8 @@ enum option_values - OPTION_FORCE_GROUP_ALLOCATION, - OPTION_PRINT_MAP_DISCARDED, - OPTION_NO_PRINT_MAP_DISCARDED, -+ OPTION_NO_POISON_SYSTEM_DIRECTORIES, -+ OPTION_ERROR_POISON_SYSTEM_DIRECTORIES, - }; - - /* The initial parser states. */ -diff --git a/ld/ldmain.c b/ld/ldmain.c -index da1ad17763..12d0b07d8a 100644 ---- a/ld/ldmain.c -+++ b/ld/ldmain.c -@@ -274,6 +274,8 @@ main (int argc, char **argv) - command_line.warn_mismatch = TRUE; - command_line.warn_search_mismatch = TRUE; - command_line.check_section_addresses = -1; -+ command_line.poison_system_directories = TRUE; -+ command_line.error_poison_system_directories = FALSE; - - /* We initialize DEMANGLING based on the environment variable - COLLECT_NO_DEMANGLE. The gcc collect2 program will demangle the -diff --git a/ld/lexsup.c b/ld/lexsup.c -index 3d15cc491d..0e8b4f2b7a 100644 ---- a/ld/lexsup.c -+++ b/ld/lexsup.c -@@ -550,6 +550,14 @@ static const struct ld_option ld_options[] = - { {"no-print-map-discarded", no_argument, NULL, OPTION_NO_PRINT_MAP_DISCARDED}, - '\0', NULL, N_("Do not show discarded sections in map file output"), - TWO_DASHES }, -+ { {"no-poison-system-directories", no_argument, NULL, -+ OPTION_NO_POISON_SYSTEM_DIRECTORIES}, -+ '\0', NULL, N_("Do not warn for -L options using system directories"), -+ TWO_DASHES }, -+ { {"error-poison-system-directories", no_argument, NULL, -+ + OPTION_ERROR_POISON_SYSTEM_DIRECTORIES}, -+ '\0', NULL, N_("Give an error for -L options using system directories"), -+ TWO_DASHES }, - }; - - #define OPTION_COUNT ARRAY_SIZE (ld_options) -@@ -1603,6 +1611,13 @@ parse_args (unsigned argc, char **argv) - - case OPTION_PRINT_MAP_DISCARDED: - config.print_map_discarded = TRUE; -+ -+ case OPTION_NO_POISON_SYSTEM_DIRECTORIES: -+ command_line.poison_system_directories = FALSE; -+ break; -+ -+ case OPTION_ERROR_POISON_SYSTEM_DIRECTORIES: -+ command_line.error_poison_system_directories = TRUE; - break; - } - } diff --git a/poky/meta/recipes-devtools/binutils/binutils/0010-Add-support-for-Netlogic-XLP.patch b/poky/meta/recipes-devtools/binutils/binutils/0010-Add-support-for-Netlogic-XLP.patch new file mode 100644 index 000000000..277db4ec5 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0010-Add-support-for-Netlogic-XLP.patch @@ -0,0 +1,409 @@ +From 69ab45c16f80f18fa78121f6e774750b9e9a200b Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Sun, 14 Feb 2016 17:06:19 +0000 +Subject: [PATCH 10/17] Add support for Netlogic XLP + +Patch From: Nebu Philips + +Using the mipsisa64r2nlm target, add support for XLP from +Netlogic. Also, update vendor name to NLM wherever applicable. + +Use 0x00000080 for INSN_XLP, the value 0x00000040 has already been +assigned to INSN_OCTEON3 + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +Signed-off-by: Baoshan Pang +Signed-off-by: Mark Hatle +--- + bfd/aoutx.h | 1 + + bfd/archures.c | 1 + + bfd/bfd-in2.h | 1 + + bfd/config.bfd | 5 +++++ + bfd/cpu-mips.c | 6 ++++-- + bfd/elfxx-mips.c | 8 ++++++++ + binutils/readelf.c | 1 + + gas/config/tc-mips.c | 4 +++- + gas/configure | 3 +++ + gas/configure.ac | 3 +++ + include/elf/mips.h | 1 + + include/opcode/mips.h | 6 ++++++ + ld/configure.tgt | 3 +++ + opcodes/mips-dis.c | 12 +++++------- + opcodes/mips-opc.c | 31 ++++++++++++++++++++----------- + 15 files changed, 65 insertions(+), 21 deletions(-) + +diff --git a/bfd/aoutx.h b/bfd/aoutx.h +index d352a1a3e44..2e2c3c1af86 100644 +--- a/bfd/aoutx.h ++++ b/bfd/aoutx.h +@@ -810,6 +810,7 @@ NAME (aout, machine_type) (enum bfd_architecture arch, + case bfd_mach_mipsisa64r6: + case bfd_mach_mips_sb1: + case bfd_mach_mips_xlr: ++ case bfd_mach_mips_xlp: + /* FIXME: These should be MIPS3, MIPS4, MIPS16, MIPS32, etc. */ + arch_flags = M_MIPS2; + break; +diff --git a/bfd/archures.c b/bfd/archures.c +index 551ec8732f0..2665b378623 100644 +--- a/bfd/archures.c ++++ b/bfd/archures.c +@@ -185,6 +185,7 @@ DESCRIPTION + .#define bfd_mach_mips_octeon3 6503 + .#define bfd_mach_mips_xlr 887682 {* decimal 'XLR'. *} + .#define bfd_mach_mips_interaptiv_mr2 736550 {* decimal 'IA2'. *} ++.#define bfd_mach_mips_xlp 887680 {* decimal 'XLP'. *} + .#define bfd_mach_mipsisa32 32 + .#define bfd_mach_mipsisa32r2 33 + .#define bfd_mach_mipsisa32r3 34 +diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h +index 35ef4d755bb..48db00af80b 100644 +--- a/bfd/bfd-in2.h ++++ b/bfd/bfd-in2.h +@@ -1585,6 +1585,7 @@ enum bfd_architecture + #define bfd_mach_mips_octeon3 6503 + #define bfd_mach_mips_xlr 887682 /* decimal 'XLR'. */ + #define bfd_mach_mips_interaptiv_mr2 736550 /* decimal 'IA2'. */ ++#define bfd_mach_mips_xlp 887680 /* decimal 'XLP'. */ + #define bfd_mach_mipsisa32 32 + #define bfd_mach_mipsisa32r2 33 + #define bfd_mach_mipsisa32r3 34 +diff --git a/bfd/config.bfd b/bfd/config.bfd +index e5233cd1f7e..2e4ae6bbdff 100644 +--- a/bfd/config.bfd ++++ b/bfd/config.bfd +@@ -877,6 +877,11 @@ case "${targ}" in + targ_defvec=mips_elf32_le_vec + targ_selvecs="mips_elf32_be_vec mips_elf64_be_vec mips_elf64_le_vec" + ;; ++ mipsisa64*-*-elf*) ++ targ_defvec=mips_elf32_trad_be_vec ++ targ_selvecs="mips_elf32_trad_le_vec mips_elf64_trad_be_vec mips_elf64_trad_le_vec" ++ want64=true ++ ;; + mips*-*-elf* | mips*-*-rtems* | mips*-*-windiss | mips*-*-none) + targ_defvec=mips_elf32_be_vec + targ_selvecs="mips_elf32_le_vec mips_elf64_be_vec mips_elf64_le_vec" +diff --git a/bfd/cpu-mips.c b/bfd/cpu-mips.c +index 802acb45f1e..fd9ec4c0ad4 100644 +--- a/bfd/cpu-mips.c ++++ b/bfd/cpu-mips.c +@@ -108,7 +108,8 @@ enum + I_mipsocteon3, + I_xlr, + I_interaptiv_mr2, +- I_micromips ++ I_micromips, ++ I_xlp + }; + + #define NN(index) (&arch_info_struct[(index) + 1]) +@@ -163,7 +164,8 @@ static const bfd_arch_info_type arch_info_struct[] = + N (64, 64, bfd_mach_mips_xlr, "mips:xlr", FALSE, NN(I_xlr)), + N (32, 32, bfd_mach_mips_interaptiv_mr2, "mips:interaptiv-mr2", FALSE, + NN(I_interaptiv_mr2)), +- N (64, 64, bfd_mach_mips_micromips, "mips:micromips", FALSE, NULL) ++ N (64, 64, bfd_mach_mips_micromips, "mips:micromips", FALSE, NN(I_micromips)), ++ N (64, 64, bfd_mach_mips_xlp, "mips:xlp", FALSE, NULL) + }; + + /* The default architecture is mips:3000, but with a machine number of +diff --git a/bfd/elfxx-mips.c b/bfd/elfxx-mips.c +index 160febec94c..4c9499cc2cf 100644 +--- a/bfd/elfxx-mips.c ++++ b/bfd/elfxx-mips.c +@@ -6982,6 +6982,9 @@ _bfd_elf_mips_mach (flagword flags) + case E_MIPS_MACH_IAMR2: + return bfd_mach_mips_interaptiv_mr2; + ++ case E_MIPS_MACH_XLP: ++ return bfd_mach_mips_xlp; ++ + default: + switch (flags & EF_MIPS_ARCH) + { +@@ -12356,6 +12359,10 @@ mips_set_isa_flags (bfd *abfd) + val = E_MIPS_ARCH_64R2 | E_MIPS_MACH_OCTEON2; + break; + ++ case bfd_mach_mips_xlp: ++ val = E_MIPS_ARCH_64R2 | E_MIPS_MACH_XLP; ++ break; ++ + case bfd_mach_mipsisa32: + val = E_MIPS_ARCH_32; + break; +@@ -14373,6 +14380,7 @@ static const struct mips_mach_extension mips_mach_extensions[] = + { bfd_mach_mips_gs264e, bfd_mach_mips_gs464e }, + { bfd_mach_mips_gs464e, bfd_mach_mips_gs464 }, + { bfd_mach_mips_gs464, bfd_mach_mipsisa64r2 }, ++ { bfd_mach_mips_xlp, bfd_mach_mipsisa64r2 }, + + /* MIPS64 extensions. */ + { bfd_mach_mipsisa64r2, bfd_mach_mipsisa64 }, +diff --git a/binutils/readelf.c b/binutils/readelf.c +index 6057515a89b..ea119b0b254 100644 +--- a/binutils/readelf.c ++++ b/binutils/readelf.c +@@ -3483,6 +3483,7 @@ get_machine_flags (Filedata * filedata, unsigned e_flags, unsigned e_machine) + case E_MIPS_MACH_OCTEON3: strcat (buf, ", octeon3"); break; + case E_MIPS_MACH_XLR: strcat (buf, ", xlr"); break; + case E_MIPS_MACH_IAMR2: strcat (buf, ", interaptiv-mr2"); break; ++ case E_MIPS_MACH_XLP: strcat (buf, ", xlp"); break; + case 0: + /* We simply ignore the field in this case to avoid confusion: + MIPS ELF does not specify EF_MIPS_MACH, it is a GNU +diff --git a/gas/config/tc-mips.c b/gas/config/tc-mips.c +index 31acb77d78a..0ead168d51e 100644 +--- a/gas/config/tc-mips.c ++++ b/gas/config/tc-mips.c +@@ -568,6 +568,7 @@ static int mips_32bitmode = 0; + || mips_opts.arch == CPU_RM7000 \ + || mips_opts.arch == CPU_VR5500 \ + || mips_opts.micromips \ ++ || mips_opts.arch == CPU_XLP \ + ) + + /* Whether the processor uses hardware interlocks to protect reads +@@ -597,6 +598,7 @@ static int mips_32bitmode = 0; + && mips_opts.isa != ISA_MIPS3) \ + || mips_opts.arch == CPU_R4300 \ + || mips_opts.micromips \ ++ || mips_opts.arch == CPU_XLP \ + ) + + /* Whether the processor uses hardware interlocks to protect reads +@@ -20174,7 +20176,7 @@ static const struct mips_cpu_info mips_cpu_info_table[] = + /* Broadcom XLP. + XLP is mostly like XLR, with the prominent exception that it is + MIPS64R2 rather than MIPS64. */ +- { "xlp", 0, 0, ISA_MIPS64R2, CPU_XLR }, ++ { "xlp", 0, 0, ISA_MIPS64R2, CPU_XLP }, + + /* MIPS 64 Release 6. */ + { "i6400", 0, ASE_VIRT | ASE_MSA, ISA_MIPS64R6, CPU_MIPS64R6}, +diff --git a/gas/configure b/gas/configure +index 5bccfd9d1b7..d4b13e6fc8b 100755 +--- a/gas/configure ++++ b/gas/configure +@@ -12722,6 +12722,9 @@ _ACEOF + mipsisa64r6 | mipsisa64r6el) + mips_cpu=mips64r6 + ;; ++ mipsisa64r2nlm | mipsisa64r2nlmel) ++ mips_cpu=xlp ++ ;; + mipstx39 | mipstx39el) + mips_cpu=r3900 + ;; +diff --git a/gas/configure.ac b/gas/configure.ac +index b65108fecb2..ce1f2dad9bc 100644 +--- a/gas/configure.ac ++++ b/gas/configure.ac +@@ -325,6 +325,9 @@ changequote([,])dnl + mipsisa64r6 | mipsisa64r6el) + mips_cpu=mips64r6 + ;; ++ mipsisa64r2nlm | mipsisa64r2nlmel) ++ mips_cpu=xlp ++ ;; + mipstx39 | mipstx39el) + mips_cpu=r3900 + ;; +diff --git a/include/elf/mips.h b/include/elf/mips.h +index cc08ebd4318..bb518575ce1 100644 +--- a/include/elf/mips.h ++++ b/include/elf/mips.h +@@ -290,6 +290,7 @@ END_RELOC_NUMBERS (R_MIPS_maxext) + #define E_MIPS_MACH_SB1 0x008a0000 + #define E_MIPS_MACH_OCTEON 0x008b0000 + #define E_MIPS_MACH_XLR 0x008c0000 ++#define E_MIPS_MACH_XLP 0x008f0000 + #define E_MIPS_MACH_OCTEON2 0x008d0000 + #define E_MIPS_MACH_OCTEON3 0x008e0000 + #define E_MIPS_MACH_5400 0x00910000 +diff --git a/include/opcode/mips.h b/include/opcode/mips.h +index fd031f37588..a96a44df840 100644 +--- a/include/opcode/mips.h ++++ b/include/opcode/mips.h +@@ -1260,6 +1260,8 @@ static const unsigned int mips_isa_table[] = { + #define INSN_XLR 0x00000020 + /* Imagination interAptiv MR2. */ + #define INSN_INTERAPTIV_MR2 0x04000000 ++/* Netlogic XlP instruction */ ++#define INSN_XLP 0x00000080 + + /* DSP ASE */ + #define ASE_DSP 0x00000001 +@@ -1384,6 +1386,7 @@ static const unsigned int mips_isa_table[] = { + #define CPU_OCTEON3 6503 + #define CPU_XLR 887682 /* decimal 'XLR' */ + #define CPU_INTERAPTIV_MR2 736550 /* decimal 'IA2' */ ++#define CPU_XLP 887680 /* decimal 'XLP' */ + + /* Return true if the given CPU is included in INSN_* mask MASK. */ + +@@ -1461,6 +1464,9 @@ cpu_is_member (int cpu, unsigned int mask) + return ((mask & INSN_ISA_MASK) == INSN_ISA32R6) + || ((mask & INSN_ISA_MASK) == INSN_ISA64R6); + ++ case CPU_XLP: ++ return (mask & INSN_XLP) != 0; ++ + default: + return FALSE; + } +diff --git a/ld/configure.tgt b/ld/configure.tgt +index 9b4bf2ca964..f6d7171dff7 100644 +--- a/ld/configure.tgt ++++ b/ld/configure.tgt +@@ -510,6 +510,9 @@ mips*-sde-elf* | mips*-mti-elf* | mips*-img-elf*) + targ_emul=elf32btsmip + targ_extra_emuls="elf32ltsmip elf32btsmipn32 elf64btsmip elf32ltsmipn32 elf64ltsmip" + ;; ++mipsisa64*-*-elf*) targ_emul=elf32btsmip ++ targ_extra_emuls="elf32ltsmip elf64btsmip elf64ltsmip" ++ ;; + mips64*el-ps2-elf*) targ_emul=elf32lr5900n32 + targ_extra_emuls="elf32lr5900" + targ_extra_libpath=$targ_extra_emuls +diff --git a/opcodes/mips-dis.c b/opcodes/mips-dis.c +index 755bbe294bd..ce22ef683a6 100644 +--- a/opcodes/mips-dis.c ++++ b/opcodes/mips-dis.c +@@ -674,13 +674,11 @@ const struct mips_arch_choice mips_arch_choices[] = + mips_cp0sel_names_xlr, ARRAY_SIZE (mips_cp0sel_names_xlr), + mips_cp1_names_mips3264, mips_hwr_names_numeric }, + +- /* XLP is mostly like XLR, with the prominent exception it is being +- MIPS64R2. */ +- { "xlp", 1, bfd_mach_mips_xlr, CPU_XLR, +- ISA_MIPS64R2 | INSN_XLR, 0, +- mips_cp0_names_xlr, +- mips_cp0sel_names_xlr, ARRAY_SIZE (mips_cp0sel_names_xlr), +- mips_cp1_names_mips3264, mips_hwr_names_numeric }, ++ { "xlp", 1, bfd_mach_mips_xlp, CPU_XLP, ++ ISA_MIPS64R2 | INSN_XLP, 0, ++ mips_cp0_names_mips3264r2, ++ mips_cp0sel_names_mips3264r2, ARRAY_SIZE (mips_cp0sel_names_mips3264r2), ++ mips_cp1_names_mips3264, mips_hwr_names_mips3264r2 }, + + /* This entry, mips16, is here only for ISA/processor selection; do + not print its name. */ +diff --git a/opcodes/mips-opc.c b/opcodes/mips-opc.c +index 5270aeefa80..d17dc78cd71 100644 +--- a/opcodes/mips-opc.c ++++ b/opcodes/mips-opc.c +@@ -328,6 +328,7 @@ decode_mips_operand (const char *p) + #define IOCT3 INSN_OCTEON3 + #define XLR INSN_XLR + #define IAMR2 INSN_INTERAPTIV_MR2 ++#define XLP INSN_XLP + #define IVIRT ASE_VIRT + #define IVIRT64 ASE_VIRT64 + +@@ -990,6 +991,7 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"clo", "U,s", 0x70000021, 0xfc0007ff, WR_1|RD_2, 0, I32|N55, 0, I37 }, + {"clz", "d,s", 0x00000050, 0xfc1f07ff, WR_1|RD_2, 0, I37, 0, 0 }, + {"clz", "U,s", 0x70000020, 0xfc0007ff, WR_1|RD_2, 0, I32|N55, 0, I37 }, ++{"crc", "d,s,t", 0x7000001c, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, + /* ctc0 is at the bottom of the table. */ + {"ctc1", "t,G", 0x44c00000, 0xffe007ff, RD_1|WR_CC|CM, 0, I1, 0, 0 }, + {"ctc1", "t,S", 0x44c00000, 0xffe007ff, RD_1|WR_CC|CM, 0, I1, 0, 0 }, +@@ -1022,12 +1024,13 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"daddiu", "t,r,j", 0x64000000, 0xfc000000, WR_1|RD_2, 0, I3, 0, 0 }, + {"daddu", "d,v,t", 0x0000002d, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I3, 0, 0 }, + {"daddu", "t,r,I", 0, (int) M_DADDU_I, INSN_MACRO, 0, I3, 0, 0 }, +-{"daddwc", "d,s,t", 0x70000038, 0xfc0007ff, WR_1|RD_2|RD_3|WR_C0|RD_C0, 0, XLR, 0, 0 }, ++{"daddwc", "d,s,t", 0x70000038, 0xfc0007ff, WR_1|RD_2|RD_3|WR_C0|RD_C0, 0, XLR|XLP, 0, 0 }, + {"dbreak", "", 0x7000003f, 0xffffffff, 0, 0, N5, 0, 0 }, + {"dclo", "d,s", 0x00000053, 0xfc1f07ff, WR_1|RD_2, 0, I69, 0, 0 }, + {"dclo", "U,s", 0x70000025, 0xfc0007ff, WR_1|RD_2, 0, I64|N55, 0, I69 }, + {"dclz", "d,s", 0x00000052, 0xfc1f07ff, WR_1|RD_2, 0, I69, 0, 0 }, + {"dclz", "U,s", 0x70000024, 0xfc0007ff, WR_1|RD_2, 0, I64|N55, 0, I69 }, ++{"dcrc", "d,s,t", 0x7000001d, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, + /* dctr and dctw are used on the r5000. */ + {"dctr", "o(b)", 0xbc050000, 0xfc1f0000, RD_2, 0, I3, 0, 0 }, + {"dctw", "o(b)", 0xbc090000, 0xfc1f0000, RD_2, 0, I3, 0, 0 }, +@@ -1099,6 +1102,7 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"dmfc0", "t,G,H", 0x40200000, 0xffe007f8, WR_1|RD_C0|LC, 0, I64, 0, 0 }, + {"dmfgc0", "t,G", 0x40600100, 0xffe007ff, WR_1|RD_C0|LC, 0, 0, IVIRT64, 0 }, + {"dmfgc0", "t,G,H", 0x40600100, 0xffe007f8, WR_1|RD_C0|LC, 0, 0, IVIRT64, 0 }, ++{"dmfur", "t,d", 0x7000001e, 0xffe007ff, WR_1, 0, XLP, 0, 0 }, + {"dmt", "", 0x41600bc1, 0xffffffff, TRAP, 0, 0, MT32, 0 }, + {"dmt", "t", 0x41600bc1, 0xffe0ffff, WR_1|TRAP, 0, 0, MT32, 0 }, + {"dmtc0", "t,G", 0x40a00000, 0xffe007ff, RD_1|WR_C0|WR_CC|CM, 0, I3, 0, EE }, +@@ -1114,6 +1118,8 @@ const struct mips_opcode mips_builtin_opcodes[] = + /* dmfc3 is at the bottom of the table. */ + /* dmtc3 is at the bottom of the table. */ + {"dmuh", "d,s,t", 0x000000dc, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I69, 0, 0 }, ++{"dmtur", "t,d", 0x7000001f, 0xffe007ff, RD_1, 0, XLP, 0, 0 }, ++{"dmul", "d,s,t", 0x70000006, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, + {"dmul", "d,s,t", 0x0000009c, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I69, 0, 0 }, + {"dmul", "d,v,t", 0x70000003, 0xfc0007ff, WR_1|RD_2|RD_3|WR_HILO, 0, IOCT, 0, 0 }, + {"dmul", "d,v,t", 0, (int) M_DMUL, INSN_MACRO, 0, I3, 0, M32|I69 }, +@@ -1267,9 +1273,9 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"ld", "s,-b(+R)", 0xec180000, 0xfc1c0000, WR_1, RD_pc, I69, 0, 0 }, + {"ld", "t,A(b)", 0, (int) M_LD_AB, INSN_MACRO, 0, I1, 0, 0 }, + {"ld", "t,o(b)", 0xdc000000, 0xfc000000, WR_1|RD_3|LM, 0, I3, 0, 0 }, +-{"ldaddw", "t,b", 0x70000010, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, +-{"ldaddwu", "t,b", 0x70000011, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, +-{"ldaddd", "t,b", 0x70000012, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, ++{"ldaddw", "t,b", 0x70000010, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, ++{"ldaddwu", "t,b", 0x70000011, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, ++{"ldaddd", "t,b", 0x70000012, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, + {"ldc1", "T,o(b)", 0xd4000000, 0xfc000000, WR_1|RD_3|CLD|FP_D, 0, I2, 0, SF }, + {"ldc1", "E,o(b)", 0xd4000000, 0xfc000000, WR_1|RD_3|CLD|FP_D, 0, I2, 0, SF }, + {"ldc1", "T,A(b)", 0, (int) M_LDC1_AB, INSN_MACRO, INSN2_M_FP_D, I2, 0, SF }, +@@ -1438,7 +1444,7 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"mflo", "d,9", 0x00000012, 0xff9f07ff, WR_1|RD_LO, 0, 0, D32, 0 }, + {"mflo1", "d", 0x70000012, 0xffff07ff, WR_1|RD_LO, 0, EE, 0, 0 }, + {"mflhxu", "d", 0x00000052, 0xffff07ff, WR_1|MOD_HILO, 0, 0, SMT, 0 }, +-{"mfcr", "t,s", 0x70000018, 0xfc00ffff, WR_1|RD_2, 0, XLR, 0, 0 }, ++{"mfcr", "t,s", 0x70000018, 0xfc00ffff, WR_1, 0, XLR|XLP, 0, 0 }, + {"mfsa", "d", 0x00000028, 0xffff07ff, WR_1, 0, EE, 0, 0 }, + {"min.ob", "X,Y,Q", 0x78000006, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, SB1, MX, 0 }, + {"min.ob", "D,S,Q", 0x48000006, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, N54, 0, 0 }, +@@ -1483,10 +1489,13 @@ const struct mips_opcode mips_builtin_opcodes[] = + /* move is at the top of the table. */ + {"msgn.qh", "X,Y,Q", 0x78200000, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, 0, MX, 0 }, + {"msgsnd", "t", 0, (int) M_MSGSND, INSN_MACRO, 0, XLR, 0, 0 }, ++{"msgsnds", "d,t", 0x4a000001, 0xffe007ff, WR_1|RD_2|RD_C0|WR_C0, 0, XLP, 0, 0 }, + {"msgld", "", 0, (int) M_MSGLD, INSN_MACRO, 0, XLR, 0, 0 }, + {"msgld", "t", 0, (int) M_MSGLD_T, INSN_MACRO, 0, XLR, 0, 0 }, +-{"msgwait", "", 0, (int) M_MSGWAIT, INSN_MACRO, 0, XLR, 0, 0 }, +-{"msgwait", "t", 0, (int) M_MSGWAIT_T,INSN_MACRO, 0, XLR, 0, 0 }, ++{"msglds", "d,t", 0x4a000002, 0xffe007ff, WR_1|RD_2|RD_C0|WR_C0, 0, XLP, 0, 0 }, ++{"msgwait", "", 0, (int) M_MSGWAIT, INSN_MACRO, 0, XLR|XLP, 0, 0 }, ++{"msgwait", "t", 0, (int) M_MSGWAIT_T,INSN_MACRO, 0, XLR|XLP, 0, 0 }, ++{"msgsync", "", 0x4a000004, 0xffffffff,0, 0, XLP, 0, 0 }, + {"msub.d", "D,R,S,T", 0x4c000029, 0xfc00003f, WR_1|RD_2|RD_3|RD_4|FP_D, 0, I4_33, 0, I37 }, + {"msub.d", "D,S,T", 0x46200019, 0xffe0003f, WR_1|RD_2|RD_3|FP_D, 0, IL2E, 0, 0 }, + {"msub.d", "D,S,T", 0x72200019, 0xffe0003f, WR_1|RD_2|RD_3|FP_D, 0, IL2F, 0, 0 }, +@@ -1536,7 +1545,7 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"mtlo", "s,7", 0x00000013, 0xfc1fe7ff, RD_1|WR_LO, 0, 0, D32, 0 }, + {"mtlo1", "s", 0x70000013, 0xfc1fffff, RD_1|WR_LO, 0, EE, 0, 0 }, + {"mtlhx", "s", 0x00000053, 0xfc1fffff, RD_1|MOD_HILO, 0, 0, SMT, 0 }, +-{"mtcr", "t,s", 0x70000019, 0xfc00ffff, RD_1|RD_2, 0, XLR, 0, 0 }, ++{"mtcr", "t,s", 0x70000019, 0xfc00ffff, RD_1, 0, XLR|XLP, 0, 0 }, + {"mtm0", "s", 0x70000008, 0xfc1fffff, RD_1, 0, IOCT, 0, 0 }, + {"mtm0", "s,t", 0x70000008, 0xfc00ffff, RD_1|RD_2, 0, IOCT3, 0, 0 }, + {"mtm1", "s", 0x7000000c, 0xfc1fffff, RD_1, 0, IOCT, 0, 0 }, +@@ -1978,9 +1987,9 @@ const struct mips_opcode mips_builtin_opcodes[] = + {"suxc1", "S,t(b)", 0x4c00000d, 0xfc0007ff, RD_1|RD_2|RD_3|SM|FP_D, 0, I5_33|N55, 0, I37}, + {"sw", "t,o(b)", 0xac000000, 0xfc000000, RD_1|RD_3|SM, 0, I1, 0, 0 }, + {"sw", "t,A(b)", 0, (int) M_SW_AB, INSN_MACRO, 0, I1, 0, 0 }, +-{"swapw", "t,b", 0x70000014, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, +-{"swapwu", "t,b", 0x70000015, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, +-{"swapd", "t,b", 0x70000016, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, ++{"swapw", "t,b", 0x70000014, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, ++{"swapwu", "t,b", 0x70000015, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, ++{"swapd", "t,b", 0x70000016, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, + {"swc0", "E,o(b)", 0xe0000000, 0xfc000000, RD_3|RD_C0|SM, 0, I1, 0, IOCT|IOCTP|IOCT2|I37 }, + {"swc0", "E,A(b)", 0, (int) M_SWC0_AB, INSN_MACRO, 0, I1, 0, IOCT|IOCTP|IOCT2|I37 }, + {"swc1", "T,o(b)", 0xe4000000, 0xfc000000, RD_1|RD_3|SM|FP_S, 0, I1, 0, 0 }, +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0010-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch b/poky/meta/recipes-devtools/binutils/binutils/0010-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch deleted file mode 100644 index f4f1a068c..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0010-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 70f3f2d9e912ea777fa113e02cdbc3465a66e40d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 2 Mar 2015 01:42:38 +0000 -Subject: [PATCH] Fix rpath in libtool when sysroot is enabled - -Enabling sysroot support in libtool exposed a bug where the final -library had an RPATH encoded into it which still pointed to the -sysroot. This works around the issue until it gets sorted out -upstream. - -Fix suggested by Richard Purdie - -Upstream-Status: Inappropriate [embedded specific] - -Signed-off-by: Scott Garman -Signed-off-by: Khem Raj ---- - ltmain.sh | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/ltmain.sh b/ltmain.sh -index 70e856e065..11ee684ccc 100644 ---- a/ltmain.sh -+++ b/ltmain.sh -@@ -8035,9 +8035,11 @@ EOF - test "$opt_mode" != relink && rpath="$compile_rpath$rpath" - for libdir in $rpath; do - if test -n "$hardcode_libdir_flag_spec"; then -+ func_replace_sysroot "$libdir" -+ libdir=$func_replace_sysroot_result -+ func_stripname '=' '' "$libdir" -+ libdir=$func_stripname_result - if test -n "$hardcode_libdir_separator"; then -- func_replace_sysroot "$libdir" -- libdir=$func_replace_sysroot_result - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" - else -@@ -8770,6 +8772,10 @@ EOF - hardcode_libdirs= - for libdir in $compile_rpath $finalize_rpath; do - if test -n "$hardcode_libdir_flag_spec"; then -+ func_replace_sysroot "$libdir" -+ libdir=$func_replace_sysroot_result -+ func_stripname '=' '' "$libdir" -+ libdir=$func_stripname_result - if test -n "$hardcode_libdir_separator"; then - if test -z "$hardcode_libdirs"; then - hardcode_libdirs="$libdir" diff --git a/poky/meta/recipes-devtools/binutils/binutils/0011-Change-default-emulation-for-mips64-linux.patch b/poky/meta/recipes-devtools/binutils/binutils/0011-Change-default-emulation-for-mips64-linux.patch deleted file mode 100644 index c49b8e863..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0011-Change-default-emulation-for-mips64-linux.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 2182791a453f34bee6968b3e72848608cf7d7abe Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 2 Mar 2015 01:44:14 +0000 -Subject: [PATCH] Change default emulation for mips64*-*-linux - -we change the default emulations to be N64 instead of N32 - -Upstream-Status: Inappropriate [ OE configuration Specific] - -Signed-off-by: Khem Raj ---- - bfd/config.bfd | 8 ++++---- - ld/configure.tgt | 8 ++++---- - 2 files changed, 8 insertions(+), 8 deletions(-) - -diff --git a/bfd/config.bfd b/bfd/config.bfd -index b96931f52e..847f9f0ba9 100644 ---- a/bfd/config.bfd -+++ b/bfd/config.bfd -@@ -911,12 +911,12 @@ case "${targ}" in - targ_selvecs="mips_elf32_le_vec mips_elf64_be_vec mips_elf64_le_vec mips_ecoff_be_vec mips_ecoff_le_vec" - ;; - mips64*el-*-linux*) -- targ_defvec=mips_elf32_ntrad_le_vec -- targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_trad_le_vec mips_elf32_trad_be_vec mips_elf64_trad_le_vec mips_elf64_trad_be_vec" -+ targ_defvec=mips_elf64_trad_le_vec -+ targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_ntrad_le_vec mips_elf32_trad_le_vec mips_elf32_trad_be_vec mips_elf64_trad_be_vec" - ;; - mips64*-*-linux*) -- targ_defvec=mips_elf32_ntrad_be_vec -- targ_selvecs="mips_elf32_ntrad_le_vec mips_elf32_trad_be_vec mips_elf32_trad_le_vec mips_elf64_trad_be_vec mips_elf64_trad_le_vec" -+ targ_defvec=mips_elf64_trad_be_vec -+ targ_selvecs="mips_elf32_ntrad_be_vec mips_elf32_ntrad_be_vec mips_elf32_trad_be_vec mips_elf32_trad_le_vec mips_elf64_trad_le_vec" - ;; - mips*el-*-linux*) - targ_defvec=mips_elf32_trad_le_vec -diff --git a/ld/configure.tgt b/ld/configure.tgt -index 23194e357f..f4f0eaf9b2 100644 ---- a/ld/configure.tgt -+++ b/ld/configure.tgt -@@ -541,12 +541,12 @@ mips*-*-vxworks*) targ_emul=elf32ebmipvxworks - ;; - mips*-*-windiss) targ_emul=elf32mipswindiss - ;; --mips64*el-*-linux-*) targ_emul=elf32ltsmipn32 -- targ_extra_emuls="elf32btsmipn32 elf32ltsmip elf32btsmip elf64ltsmip elf64btsmip" -+mips64*el-*-linux-*) targ_emul=elf64ltsmip -+ targ_extra_emuls="elf32btsmipn32 elf32ltsmipn32 elf32ltsmip elf32btsmip elf64btsmip" - targ_extra_libpath=$targ_extra_emuls - ;; --mips64*-*-linux-*) targ_emul=elf32btsmipn32 -- targ_extra_emuls="elf32ltsmipn32 elf32btsmip elf32ltsmip elf64btsmip elf64ltsmip" -+mips64*-*-linux-*) targ_emul=elf64btsmip -+ targ_extra_emuls="elf32btsmipn32 elf32ltsmipn32 elf32btsmip elf32ltsmip elf64ltsmip" - targ_extra_libpath=$targ_extra_emuls - ;; - mips*el-*-linux-*) targ_emul=elf32ltsmip diff --git a/poky/meta/recipes-devtools/binutils/binutils/0011-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch b/poky/meta/recipes-devtools/binutils/binutils/0011-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch new file mode 100644 index 000000000..10c3d8086 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0011-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch @@ -0,0 +1,37 @@ +From 7836f8aa56ef0f18c8658dc7e4952a9d097ba7e8 Mon Sep 17 00:00:00 2001 +From: Zhenhua Luo +Date: Sat, 11 Jun 2016 22:08:29 -0500 +Subject: [PATCH 11/17] fix the incorrect assembling for ppc wait mnemonic + +Signed-off-by: Zhenhua Luo + +Upstream-Status: Pending +--- + opcodes/ppc-opc.c | 4 +--- + 1 file changed, 1 insertion(+), 3 deletions(-) + +diff --git a/opcodes/ppc-opc.c b/opcodes/ppc-opc.c +index 5e20d617664..4c9656ecf08 100644 +--- a/opcodes/ppc-opc.c ++++ b/opcodes/ppc-opc.c +@@ -6265,8 +6265,6 @@ const struct powerpc_opcode powerpc_opcodes[] = { + {"waitasec", X(31,30), XRTRARB_MASK, POWER8, POWER9, {0}}, + {"waitrsv", XWCPL(31,30,1,0),0xffffffff, POWER10, 0, {0}}, + {"pause_short", XWCPL(31,30,2,0),0xffffffff, POWER10, 0, {0}}, +-{"wait", X(31,30), XWCPL_MASK, POWER10, 0, {WC, PL}}, +-{"wait", X(31,30), XWC_MASK, POWER9, POWER10, {WC}}, + + {"lwepx", X(31,31), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, + +@@ -6326,7 +6324,7 @@ const struct powerpc_opcode powerpc_opcodes[] = { + + {"waitrsv", X(31,62)|(1<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, + {"waitimpl", X(31,62)|(2<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, +-{"wait", X(31,62), XWC_MASK, E500MC|PPCA2, 0, {WC}}, ++{"wait", X(31,62), XWC_MASK, E500MC|PPCA2|POWER9|POWER10, 0, {WC}}, + + {"dcbstep", XRT(31,63,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, + +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0012-Add-support-for-Netlogic-XLP.patch b/poky/meta/recipes-devtools/binutils/binutils/0012-Add-support-for-Netlogic-XLP.patch deleted file mode 100644 index c32867a23..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0012-Add-support-for-Netlogic-XLP.patch +++ /dev/null @@ -1,406 +0,0 @@ -From a0237ec2d1e58bd35c236df39dd5a06504c6d2ed Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Sun, 14 Feb 2016 17:06:19 +0000 -Subject: [PATCH] Add support for Netlogic XLP - -Patch From: Nebu Philips - -Using the mipsisa64r2nlm target, add support for XLP from -Netlogic. Also, update vendor name to NLM wherever applicable. - -Use 0x00000080 for INSN_XLP, the value 0x00000040 has already been -assigned to INSN_OCTEON3 - -Upstream-Status: Pending - -Signed-off-by: Khem Raj -Signed-off-by: Baoshan Pang -Signed-off-by: Mark Hatle ---- - bfd/aoutx.h | 1 + - bfd/archures.c | 1 + - bfd/bfd-in2.h | 1 + - bfd/config.bfd | 5 +++++ - bfd/cpu-mips.c | 6 ++++-- - bfd/elfxx-mips.c | 8 ++++++++ - binutils/readelf.c | 1 + - gas/config/tc-mips.c | 4 +++- - gas/configure | 3 +++ - gas/configure.ac | 3 +++ - include/elf/mips.h | 1 + - include/opcode/mips.h | 6 ++++++ - ld/configure.tgt | 3 +++ - opcodes/mips-dis.c | 12 +++++------- - opcodes/mips-opc.c | 31 ++++++++++++++++++++----------- - 15 files changed, 65 insertions(+), 21 deletions(-) - -diff --git a/bfd/aoutx.h b/bfd/aoutx.h -index fa3a9746b6..5078024fd5 100644 ---- a/bfd/aoutx.h -+++ b/bfd/aoutx.h -@@ -799,6 +799,7 @@ NAME (aout, machine_type) (enum bfd_architecture arch, - case bfd_mach_mipsisa64r6: - case bfd_mach_mips_sb1: - case bfd_mach_mips_xlr: -+ case bfd_mach_mips_xlp: - /* FIXME: These should be MIPS3, MIPS4, MIPS16, MIPS32, etc. */ - arch_flags = M_MIPS2; - break; -diff --git a/bfd/archures.c b/bfd/archures.c -index 232103817c..b2b3b8c124 100644 ---- a/bfd/archures.c -+++ b/bfd/archures.c -@@ -185,6 +185,7 @@ DESCRIPTION - .#define bfd_mach_mips_octeon3 6503 - .#define bfd_mach_mips_xlr 887682 {* decimal 'XLR'. *} - .#define bfd_mach_mips_interaptiv_mr2 736550 {* decimal 'IA2'. *} -+.#define bfd_mach_mips_xlp 887680 {* decimal 'XLP'. *} - .#define bfd_mach_mipsisa32 32 - .#define bfd_mach_mipsisa32r2 33 - .#define bfd_mach_mipsisa32r3 34 -diff --git a/bfd/bfd-in2.h b/bfd/bfd-in2.h -index 7c13bc8c91..2e453c50c1 100644 ---- a/bfd/bfd-in2.h -+++ b/bfd/bfd-in2.h -@@ -1568,6 +1568,7 @@ enum bfd_architecture - #define bfd_mach_mips_octeon3 6503 - #define bfd_mach_mips_xlr 887682 /* decimal 'XLR'. */ - #define bfd_mach_mips_interaptiv_mr2 736550 /* decimal 'IA2'. */ -+#define bfd_mach_mips_xlp 887680 /* decimal 'XLP'. */ - #define bfd_mach_mipsisa32 32 - #define bfd_mach_mipsisa32r2 33 - #define bfd_mach_mipsisa32r3 34 -diff --git a/bfd/config.bfd b/bfd/config.bfd -index 847f9f0ba9..a12707f827 100644 ---- a/bfd/config.bfd -+++ b/bfd/config.bfd -@@ -894,6 +894,11 @@ case "${targ}" in - targ_defvec=mips_elf32_le_vec - targ_selvecs="mips_elf32_be_vec mips_elf64_be_vec mips_elf64_le_vec" - ;; -+ mipsisa64*-*-elf*) -+ targ_defvec=mips_elf32_trad_be_vec -+ targ_selvecs="mips_elf32_trad_le_vec mips_elf64_trad_be_vec mips_elf64_trad_le_vec" -+ want64=true -+ ;; - mips*-*-elf* | mips*-*-rtems* | mips*-*-windiss | mips*-*-none) - targ_defvec=mips_elf32_be_vec - targ_selvecs="mips_elf32_le_vec mips_elf64_be_vec mips_elf64_le_vec" -diff --git a/bfd/cpu-mips.c b/bfd/cpu-mips.c -index 802acb45f1..fd9ec4c0ad 100644 ---- a/bfd/cpu-mips.c -+++ b/bfd/cpu-mips.c -@@ -108,7 +108,8 @@ enum - I_mipsocteon3, - I_xlr, - I_interaptiv_mr2, -- I_micromips -+ I_micromips, -+ I_xlp - }; - - #define NN(index) (&arch_info_struct[(index) + 1]) -@@ -163,7 +164,8 @@ static const bfd_arch_info_type arch_info_struct[] = - N (64, 64, bfd_mach_mips_xlr, "mips:xlr", FALSE, NN(I_xlr)), - N (32, 32, bfd_mach_mips_interaptiv_mr2, "mips:interaptiv-mr2", FALSE, - NN(I_interaptiv_mr2)), -- N (64, 64, bfd_mach_mips_micromips, "mips:micromips", FALSE, NULL) -+ N (64, 64, bfd_mach_mips_micromips, "mips:micromips", FALSE, NN(I_micromips)), -+ N (64, 64, bfd_mach_mips_xlp, "mips:xlp", FALSE, NULL) - }; - - /* The default architecture is mips:3000, but with a machine number of -diff --git a/bfd/elfxx-mips.c b/bfd/elfxx-mips.c -index d7e3aed3b6..7baeee9ee3 100644 ---- a/bfd/elfxx-mips.c -+++ b/bfd/elfxx-mips.c -@@ -6999,6 +6999,9 @@ _bfd_elf_mips_mach (flagword flags) - case E_MIPS_MACH_IAMR2: - return bfd_mach_mips_interaptiv_mr2; - -+ case E_MIPS_MACH_XLP: -+ return bfd_mach_mips_xlp; -+ - default: - switch (flags & EF_MIPS_ARCH) - { -@@ -12355,6 +12358,10 @@ mips_set_isa_flags (bfd *abfd) - val = E_MIPS_ARCH_64R2 | E_MIPS_MACH_OCTEON2; - break; - -+ case bfd_mach_mips_xlp: -+ val = E_MIPS_ARCH_64R2 | E_MIPS_MACH_XLP; -+ break; -+ - case bfd_mach_mipsisa32: - val = E_MIPS_ARCH_32; - break; -@@ -14379,6 +14386,7 @@ static const struct mips_mach_extension mips_mach_extensions[] = - { bfd_mach_mips_gs264e, bfd_mach_mips_gs464e }, - { bfd_mach_mips_gs464e, bfd_mach_mips_gs464 }, - { bfd_mach_mips_gs464, bfd_mach_mipsisa64r2 }, -+ { bfd_mach_mips_xlp, bfd_mach_mipsisa64r2 }, - - /* MIPS64 extensions. */ - { bfd_mach_mipsisa64r2, bfd_mach_mipsisa64 }, -diff --git a/binutils/readelf.c b/binutils/readelf.c -index 6b5bebe743..d15a7828db 100644 ---- a/binutils/readelf.c -+++ b/binutils/readelf.c -@@ -3440,6 +3440,7 @@ get_machine_flags (Filedata * filedata, unsigned e_flags, unsigned e_machine) - case E_MIPS_MACH_OCTEON3: strcat (buf, ", octeon3"); break; - case E_MIPS_MACH_XLR: strcat (buf, ", xlr"); break; - case E_MIPS_MACH_IAMR2: strcat (buf, ", interaptiv-mr2"); break; -+ case E_MIPS_MACH_XLP: strcat (buf, ", xlp"); break; - case 0: - /* We simply ignore the field in this case to avoid confusion: - MIPS ELF does not specify EF_MIPS_MACH, it is a GNU -diff --git a/gas/config/tc-mips.c b/gas/config/tc-mips.c -index fc6898834e..2c7151ccdb 100644 ---- a/gas/config/tc-mips.c -+++ b/gas/config/tc-mips.c -@@ -568,6 +568,7 @@ static int mips_32bitmode = 0; - || mips_opts.arch == CPU_RM7000 \ - || mips_opts.arch == CPU_VR5500 \ - || mips_opts.micromips \ -+ || mips_opts.arch == CPU_XLP \ - ) - - /* Whether the processor uses hardware interlocks to protect reads -@@ -597,6 +598,7 @@ static int mips_32bitmode = 0; - && mips_opts.isa != ISA_MIPS3) \ - || mips_opts.arch == CPU_R4300 \ - || mips_opts.micromips \ -+ || mips_opts.arch == CPU_XLP \ - ) - - /* Whether the processor uses hardware interlocks to protect reads -@@ -20135,7 +20137,7 @@ static const struct mips_cpu_info mips_cpu_info_table[] = - /* Broadcom XLP. - XLP is mostly like XLR, with the prominent exception that it is - MIPS64R2 rather than MIPS64. */ -- { "xlp", 0, 0, ISA_MIPS64R2, CPU_XLR }, -+ { "xlp", 0, 0, ISA_MIPS64R2, CPU_XLP }, - - /* MIPS 64 Release 6. */ - { "i6400", 0, ASE_VIRT | ASE_MSA, ISA_MIPS64R6, CPU_MIPS64R6}, -diff --git a/gas/configure b/gas/configure -index 60c1a055ae..59d6d11215 100755 ---- a/gas/configure -+++ b/gas/configure -@@ -12722,6 +12722,9 @@ _ACEOF - mipsisa64r6 | mipsisa64r6el) - mips_cpu=mips64r6 - ;; -+ mipsisa64r2nlm | mipsisa64r2nlmel) -+ mips_cpu=xlp -+ ;; - mipstx39 | mipstx39el) - mips_cpu=r3900 - ;; -diff --git a/gas/configure.ac b/gas/configure.ac -index 6f32e55a1a..11c2e0d273 100644 ---- a/gas/configure.ac -+++ b/gas/configure.ac -@@ -325,6 +325,9 @@ changequote([,])dnl - mipsisa64r6 | mipsisa64r6el) - mips_cpu=mips64r6 - ;; -+ mipsisa64r2nlm | mipsisa64r2nlmel) -+ mips_cpu=xlp -+ ;; - mipstx39 | mipstx39el) - mips_cpu=r3900 - ;; -diff --git a/include/elf/mips.h b/include/elf/mips.h -index d116b036b6..dceeb3f156 100644 ---- a/include/elf/mips.h -+++ b/include/elf/mips.h -@@ -290,6 +290,7 @@ END_RELOC_NUMBERS (R_MIPS_maxext) - #define E_MIPS_MACH_SB1 0x008a0000 - #define E_MIPS_MACH_OCTEON 0x008b0000 - #define E_MIPS_MACH_XLR 0x008c0000 -+#define E_MIPS_MACH_XLP 0x008f0000 - #define E_MIPS_MACH_OCTEON2 0x008d0000 - #define E_MIPS_MACH_OCTEON3 0x008e0000 - #define E_MIPS_MACH_5400 0x00910000 -diff --git a/include/opcode/mips.h b/include/opcode/mips.h -index fd031f3758..a96a44df84 100644 ---- a/include/opcode/mips.h -+++ b/include/opcode/mips.h -@@ -1260,6 +1260,8 @@ static const unsigned int mips_isa_table[] = { - #define INSN_XLR 0x00000020 - /* Imagination interAptiv MR2. */ - #define INSN_INTERAPTIV_MR2 0x04000000 -+/* Netlogic XlP instruction */ -+#define INSN_XLP 0x00000080 - - /* DSP ASE */ - #define ASE_DSP 0x00000001 -@@ -1384,6 +1386,7 @@ static const unsigned int mips_isa_table[] = { - #define CPU_OCTEON3 6503 - #define CPU_XLR 887682 /* decimal 'XLR' */ - #define CPU_INTERAPTIV_MR2 736550 /* decimal 'IA2' */ -+#define CPU_XLP 887680 /* decimal 'XLP' */ - - /* Return true if the given CPU is included in INSN_* mask MASK. */ - -@@ -1461,6 +1464,9 @@ cpu_is_member (int cpu, unsigned int mask) - return ((mask & INSN_ISA_MASK) == INSN_ISA32R6) - || ((mask & INSN_ISA_MASK) == INSN_ISA64R6); - -+ case CPU_XLP: -+ return (mask & INSN_XLP) != 0; -+ - default: - return FALSE; - } -diff --git a/ld/configure.tgt b/ld/configure.tgt -index f4f0eaf9b2..0da3eca19c 100644 ---- a/ld/configure.tgt -+++ b/ld/configure.tgt -@@ -520,6 +520,9 @@ mips*-sde-elf* | mips*-mti-elf* | mips*-img-elf*) - targ_emul=elf32btsmip - targ_extra_emuls="elf32ltsmip elf32btsmipn32 elf64btsmip elf32ltsmipn32 elf64ltsmip" - ;; -+mipsisa64*-*-elf*) targ_emul=elf32btsmip -+ targ_extra_emuls="elf32ltsmip elf64btsmip elf64ltsmip" -+ ;; - mips64*el-ps2-elf*) targ_emul=elf32lr5900n32 - targ_extra_emuls="elf32lr5900" - targ_extra_libpath=$targ_extra_emuls -diff --git a/opcodes/mips-dis.c b/opcodes/mips-dis.c -index 755bbe294b..ce22ef683a 100644 ---- a/opcodes/mips-dis.c -+++ b/opcodes/mips-dis.c -@@ -674,13 +674,11 @@ const struct mips_arch_choice mips_arch_choices[] = - mips_cp0sel_names_xlr, ARRAY_SIZE (mips_cp0sel_names_xlr), - mips_cp1_names_mips3264, mips_hwr_names_numeric }, - -- /* XLP is mostly like XLR, with the prominent exception it is being -- MIPS64R2. */ -- { "xlp", 1, bfd_mach_mips_xlr, CPU_XLR, -- ISA_MIPS64R2 | INSN_XLR, 0, -- mips_cp0_names_xlr, -- mips_cp0sel_names_xlr, ARRAY_SIZE (mips_cp0sel_names_xlr), -- mips_cp1_names_mips3264, mips_hwr_names_numeric }, -+ { "xlp", 1, bfd_mach_mips_xlp, CPU_XLP, -+ ISA_MIPS64R2 | INSN_XLP, 0, -+ mips_cp0_names_mips3264r2, -+ mips_cp0sel_names_mips3264r2, ARRAY_SIZE (mips_cp0sel_names_mips3264r2), -+ mips_cp1_names_mips3264, mips_hwr_names_mips3264r2 }, - - /* This entry, mips16, is here only for ISA/processor selection; do - not print its name. */ -diff --git a/opcodes/mips-opc.c b/opcodes/mips-opc.c -index 5270aeefa8..d17dc78cd7 100644 ---- a/opcodes/mips-opc.c -+++ b/opcodes/mips-opc.c -@@ -328,6 +328,7 @@ decode_mips_operand (const char *p) - #define IOCT3 INSN_OCTEON3 - #define XLR INSN_XLR - #define IAMR2 INSN_INTERAPTIV_MR2 -+#define XLP INSN_XLP - #define IVIRT ASE_VIRT - #define IVIRT64 ASE_VIRT64 - -@@ -990,6 +991,7 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"clo", "U,s", 0x70000021, 0xfc0007ff, WR_1|RD_2, 0, I32|N55, 0, I37 }, - {"clz", "d,s", 0x00000050, 0xfc1f07ff, WR_1|RD_2, 0, I37, 0, 0 }, - {"clz", "U,s", 0x70000020, 0xfc0007ff, WR_1|RD_2, 0, I32|N55, 0, I37 }, -+{"crc", "d,s,t", 0x7000001c, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, - /* ctc0 is at the bottom of the table. */ - {"ctc1", "t,G", 0x44c00000, 0xffe007ff, RD_1|WR_CC|CM, 0, I1, 0, 0 }, - {"ctc1", "t,S", 0x44c00000, 0xffe007ff, RD_1|WR_CC|CM, 0, I1, 0, 0 }, -@@ -1022,12 +1024,13 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"daddiu", "t,r,j", 0x64000000, 0xfc000000, WR_1|RD_2, 0, I3, 0, 0 }, - {"daddu", "d,v,t", 0x0000002d, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I3, 0, 0 }, - {"daddu", "t,r,I", 0, (int) M_DADDU_I, INSN_MACRO, 0, I3, 0, 0 }, --{"daddwc", "d,s,t", 0x70000038, 0xfc0007ff, WR_1|RD_2|RD_3|WR_C0|RD_C0, 0, XLR, 0, 0 }, -+{"daddwc", "d,s,t", 0x70000038, 0xfc0007ff, WR_1|RD_2|RD_3|WR_C0|RD_C0, 0, XLR|XLP, 0, 0 }, - {"dbreak", "", 0x7000003f, 0xffffffff, 0, 0, N5, 0, 0 }, - {"dclo", "d,s", 0x00000053, 0xfc1f07ff, WR_1|RD_2, 0, I69, 0, 0 }, - {"dclo", "U,s", 0x70000025, 0xfc0007ff, WR_1|RD_2, 0, I64|N55, 0, I69 }, - {"dclz", "d,s", 0x00000052, 0xfc1f07ff, WR_1|RD_2, 0, I69, 0, 0 }, - {"dclz", "U,s", 0x70000024, 0xfc0007ff, WR_1|RD_2, 0, I64|N55, 0, I69 }, -+{"dcrc", "d,s,t", 0x7000001d, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, - /* dctr and dctw are used on the r5000. */ - {"dctr", "o(b)", 0xbc050000, 0xfc1f0000, RD_2, 0, I3, 0, 0 }, - {"dctw", "o(b)", 0xbc090000, 0xfc1f0000, RD_2, 0, I3, 0, 0 }, -@@ -1099,6 +1102,7 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"dmfc0", "t,G,H", 0x40200000, 0xffe007f8, WR_1|RD_C0|LC, 0, I64, 0, 0 }, - {"dmfgc0", "t,G", 0x40600100, 0xffe007ff, WR_1|RD_C0|LC, 0, 0, IVIRT64, 0 }, - {"dmfgc0", "t,G,H", 0x40600100, 0xffe007f8, WR_1|RD_C0|LC, 0, 0, IVIRT64, 0 }, -+{"dmfur", "t,d", 0x7000001e, 0xffe007ff, WR_1, 0, XLP, 0, 0 }, - {"dmt", "", 0x41600bc1, 0xffffffff, TRAP, 0, 0, MT32, 0 }, - {"dmt", "t", 0x41600bc1, 0xffe0ffff, WR_1|TRAP, 0, 0, MT32, 0 }, - {"dmtc0", "t,G", 0x40a00000, 0xffe007ff, RD_1|WR_C0|WR_CC|CM, 0, I3, 0, EE }, -@@ -1114,6 +1118,8 @@ const struct mips_opcode mips_builtin_opcodes[] = - /* dmfc3 is at the bottom of the table. */ - /* dmtc3 is at the bottom of the table. */ - {"dmuh", "d,s,t", 0x000000dc, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I69, 0, 0 }, -+{"dmtur", "t,d", 0x7000001f, 0xffe007ff, RD_1, 0, XLP, 0, 0 }, -+{"dmul", "d,s,t", 0x70000006, 0xfc0007ff, WR_1|RD_2|RD_3, 0, XLP, 0, 0 }, - {"dmul", "d,s,t", 0x0000009c, 0xfc0007ff, WR_1|RD_2|RD_3, 0, I69, 0, 0 }, - {"dmul", "d,v,t", 0x70000003, 0xfc0007ff, WR_1|RD_2|RD_3|WR_HILO, 0, IOCT, 0, 0 }, - {"dmul", "d,v,t", 0, (int) M_DMUL, INSN_MACRO, 0, I3, 0, M32|I69 }, -@@ -1267,9 +1273,9 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"ld", "s,-b(+R)", 0xec180000, 0xfc1c0000, WR_1, RD_pc, I69, 0, 0 }, - {"ld", "t,A(b)", 0, (int) M_LD_AB, INSN_MACRO, 0, I1, 0, 0 }, - {"ld", "t,o(b)", 0xdc000000, 0xfc000000, WR_1|RD_3|LM, 0, I3, 0, 0 }, --{"ldaddw", "t,b", 0x70000010, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, --{"ldaddwu", "t,b", 0x70000011, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, --{"ldaddd", "t,b", 0x70000012, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, -+{"ldaddw", "t,b", 0x70000010, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, -+{"ldaddwu", "t,b", 0x70000011, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, -+{"ldaddd", "t,b", 0x70000012, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, - {"ldc1", "T,o(b)", 0xd4000000, 0xfc000000, WR_1|RD_3|CLD|FP_D, 0, I2, 0, SF }, - {"ldc1", "E,o(b)", 0xd4000000, 0xfc000000, WR_1|RD_3|CLD|FP_D, 0, I2, 0, SF }, - {"ldc1", "T,A(b)", 0, (int) M_LDC1_AB, INSN_MACRO, INSN2_M_FP_D, I2, 0, SF }, -@@ -1438,7 +1444,7 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"mflo", "d,9", 0x00000012, 0xff9f07ff, WR_1|RD_LO, 0, 0, D32, 0 }, - {"mflo1", "d", 0x70000012, 0xffff07ff, WR_1|RD_LO, 0, EE, 0, 0 }, - {"mflhxu", "d", 0x00000052, 0xffff07ff, WR_1|MOD_HILO, 0, 0, SMT, 0 }, --{"mfcr", "t,s", 0x70000018, 0xfc00ffff, WR_1|RD_2, 0, XLR, 0, 0 }, -+{"mfcr", "t,s", 0x70000018, 0xfc00ffff, WR_1, 0, XLR|XLP, 0, 0 }, - {"mfsa", "d", 0x00000028, 0xffff07ff, WR_1, 0, EE, 0, 0 }, - {"min.ob", "X,Y,Q", 0x78000006, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, SB1, MX, 0 }, - {"min.ob", "D,S,Q", 0x48000006, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, N54, 0, 0 }, -@@ -1483,10 +1489,13 @@ const struct mips_opcode mips_builtin_opcodes[] = - /* move is at the top of the table. */ - {"msgn.qh", "X,Y,Q", 0x78200000, 0xfc20003f, WR_1|RD_2|RD_3|FP_D, 0, 0, MX, 0 }, - {"msgsnd", "t", 0, (int) M_MSGSND, INSN_MACRO, 0, XLR, 0, 0 }, -+{"msgsnds", "d,t", 0x4a000001, 0xffe007ff, WR_1|RD_2|RD_C0|WR_C0, 0, XLP, 0, 0 }, - {"msgld", "", 0, (int) M_MSGLD, INSN_MACRO, 0, XLR, 0, 0 }, - {"msgld", "t", 0, (int) M_MSGLD_T, INSN_MACRO, 0, XLR, 0, 0 }, --{"msgwait", "", 0, (int) M_MSGWAIT, INSN_MACRO, 0, XLR, 0, 0 }, --{"msgwait", "t", 0, (int) M_MSGWAIT_T,INSN_MACRO, 0, XLR, 0, 0 }, -+{"msglds", "d,t", 0x4a000002, 0xffe007ff, WR_1|RD_2|RD_C0|WR_C0, 0, XLP, 0, 0 }, -+{"msgwait", "", 0, (int) M_MSGWAIT, INSN_MACRO, 0, XLR|XLP, 0, 0 }, -+{"msgwait", "t", 0, (int) M_MSGWAIT_T,INSN_MACRO, 0, XLR|XLP, 0, 0 }, -+{"msgsync", "", 0x4a000004, 0xffffffff,0, 0, XLP, 0, 0 }, - {"msub.d", "D,R,S,T", 0x4c000029, 0xfc00003f, WR_1|RD_2|RD_3|RD_4|FP_D, 0, I4_33, 0, I37 }, - {"msub.d", "D,S,T", 0x46200019, 0xffe0003f, WR_1|RD_2|RD_3|FP_D, 0, IL2E, 0, 0 }, - {"msub.d", "D,S,T", 0x72200019, 0xffe0003f, WR_1|RD_2|RD_3|FP_D, 0, IL2F, 0, 0 }, -@@ -1536,7 +1545,7 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"mtlo", "s,7", 0x00000013, 0xfc1fe7ff, RD_1|WR_LO, 0, 0, D32, 0 }, - {"mtlo1", "s", 0x70000013, 0xfc1fffff, RD_1|WR_LO, 0, EE, 0, 0 }, - {"mtlhx", "s", 0x00000053, 0xfc1fffff, RD_1|MOD_HILO, 0, 0, SMT, 0 }, --{"mtcr", "t,s", 0x70000019, 0xfc00ffff, RD_1|RD_2, 0, XLR, 0, 0 }, -+{"mtcr", "t,s", 0x70000019, 0xfc00ffff, RD_1, 0, XLR|XLP, 0, 0 }, - {"mtm0", "s", 0x70000008, 0xfc1fffff, RD_1, 0, IOCT, 0, 0 }, - {"mtm0", "s,t", 0x70000008, 0xfc00ffff, RD_1|RD_2, 0, IOCT3, 0, 0 }, - {"mtm1", "s", 0x7000000c, 0xfc1fffff, RD_1, 0, IOCT, 0, 0 }, -@@ -1978,9 +1987,9 @@ const struct mips_opcode mips_builtin_opcodes[] = - {"suxc1", "S,t(b)", 0x4c00000d, 0xfc0007ff, RD_1|RD_2|RD_3|SM|FP_D, 0, I5_33|N55, 0, I37}, - {"sw", "t,o(b)", 0xac000000, 0xfc000000, RD_1|RD_3|SM, 0, I1, 0, 0 }, - {"sw", "t,A(b)", 0, (int) M_SW_AB, INSN_MACRO, 0, I1, 0, 0 }, --{"swapw", "t,b", 0x70000014, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, --{"swapwu", "t,b", 0x70000015, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, --{"swapd", "t,b", 0x70000016, 0xfc00ffff, MOD_1|RD_2|LM|SM, 0, XLR, 0, 0 }, -+{"swapw", "t,b", 0x70000014, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, -+{"swapwu", "t,b", 0x70000015, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, -+{"swapd", "t,b", 0x70000016, 0xfc00ffff, MOD_1|RD_2|SM, 0, XLR|XLP, 0, 0 }, - {"swc0", "E,o(b)", 0xe0000000, 0xfc000000, RD_3|RD_C0|SM, 0, I1, 0, IOCT|IOCTP|IOCT2|I37 }, - {"swc0", "E,A(b)", 0, (int) M_SWC0_AB, INSN_MACRO, 0, I1, 0, IOCT|IOCTP|IOCT2|I37 }, - {"swc1", "T,o(b)", 0xe4000000, 0xfc000000, RD_1|RD_3|SM|FP_S, 0, I1, 0, 0 }, diff --git a/poky/meta/recipes-devtools/binutils/binutils/0012-Detect-64-bit-MIPS-targets.patch b/poky/meta/recipes-devtools/binutils/binutils/0012-Detect-64-bit-MIPS-targets.patch new file mode 100644 index 000000000..ef8b253b6 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0012-Detect-64-bit-MIPS-targets.patch @@ -0,0 +1,50 @@ +From dbff6bdf2123f5495b8be930304d9aa5e88006a7 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 31 Mar 2017 11:42:03 -0700 +Subject: [PATCH 12/17] Detect 64-bit MIPS targets + +Add mips64 target triplets and default to N64 + +Upstream-Status: Submitted +https://sourceware.org/ml/binutils/2016-08/msg00048.html + +Signed-off-by: Khem Raj +--- + gold/configure.tgt | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/gold/configure.tgt b/gold/configure.tgt +index aa7ec552aec..470515062e4 100644 +--- a/gold/configure.tgt ++++ b/gold/configure.tgt +@@ -153,6 +153,13 @@ aarch64*-*) + targ_big_endian=false + targ_extra_big_endian=true + ;; ++mips*64*el*-*-*|mips*64*le*-*-*) ++ targ_obj=mips ++ targ_machine=EM_MIPS_RS3_LE ++ targ_size=64 ++ targ_big_endian=false ++ targ_extra_big_endian=true ++ ;; + mips*el*-*-*|mips*le*-*-*) + targ_obj=mips + targ_machine=EM_MIPS_RS3_LE +@@ -160,6 +167,13 @@ mips*el*-*-*|mips*le*-*-*) + targ_big_endian=false + targ_extra_big_endian=true + ;; ++mips*64*-*-*) ++ targ_obj=mips ++ targ_machine=EM_MIPS ++ targ_size=64 ++ targ_big_endian=true ++ targ_extra_big_endian=false ++ ;; + mips*-*-*) + targ_obj=mips + targ_machine=EM_MIPS +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0013-Use-libtool-2.4.patch b/poky/meta/recipes-devtools/binutils/binutils/0013-Use-libtool-2.4.patch new file mode 100644 index 000000000..987a96f4b --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0013-Use-libtool-2.4.patch @@ -0,0 +1,23153 @@ +From 7e2ddee8704c9c3b73760dd9464b18fade563d85 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Sun, 14 Feb 2016 17:04:07 +0000 +Subject: [PATCH 13/17] Use libtool 2.4 + +get libtool sysroot support + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + bfd/configure | 1322 +++++++++++++++----- + bfd/configure.ac | 2 +- + binutils/configure | 1320 +++++++++++++++----- + gas/configure | 1320 +++++++++++++++----- + gprof/configure | 1320 +++++++++++++++----- + ld/configure | 1695 ++++++++++++++++++------- + libctf/configure | 1319 +++++++++++++++----- + libtool.m4 | 1080 +++++++++++----- + ltmain.sh | 2925 +++++++++++++++++++++++++++++--------------- + ltoptions.m4 | 2 +- + ltversion.m4 | 12 +- + lt~obsolete.m4 | 2 +- + opcodes/configure | 1320 +++++++++++++++----- + zlib/configure | 1320 +++++++++++++++----- + 14 files changed, 10931 insertions(+), 4028 deletions(-) + +diff --git a/bfd/configure b/bfd/configure +index 0340ed541b5..2012656b3b7 100755 +--- a/bfd/configure ++++ b/bfd/configure +@@ -704,6 +704,9 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL ++ac_ct_AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -822,6 +825,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_plugins + enable_largefile +@@ -1504,6 +1508,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + --with-mmap try using mmap for BFD input files if available + --with-separate-debug-dir=DIR + Look for global separate debug info in DIR +@@ -5693,8 +5699,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5734,7 +5740,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6420,8 +6426,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6470,6 +6476,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6486,6 +6566,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6654,7 +6739,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6808,6 +6894,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6821,11 +6922,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6841,7 +7095,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6861,11 +7115,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6881,7 +7139,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6900,6 +7158,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6911,16 +7173,72 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ + ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a + ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } + ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi + + + +@@ -7262,8 +7580,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -7299,6 +7617,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -7340,6 +7659,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -7351,7 +7682,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -7377,8 +7708,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -7388,8 +7719,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7426,6 +7757,16 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ + + + +@@ -7442,6 +7783,45 @@ fi + + + ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7653,6 +8033,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -8216,6 +8713,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -8380,7 +8879,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8469,7 +8969,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8767,8 +9267,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8934,6 +9432,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8996,7 +9500,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -9053,13 +9557,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -9120,6 +9628,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9470,7 +9983,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9569,12 +10083,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9588,8 +10102,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9607,8 +10121,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9654,8 +10168,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9785,7 +10299,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9798,22 +10318,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9825,7 +10352,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9838,22 +10371,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9898,20 +10438,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9972,7 +10555,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9980,7 +10563,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9996,7 +10579,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -10020,10 +10603,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -10102,23 +10685,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -10203,7 +10799,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -10222,9 +10818,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10800,8 +11396,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10834,13 +11431,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10932,7 +11587,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11728,7 +12383,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11731 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11772,10 +12427,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11834,7 +12489,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11837 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11878,10 +12533,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -14578,7 +15233,7 @@ SHARED_LDFLAGS= + if test "$enable_shared" = "yes"; then + x=`sed -n -e 's/^[ ]*PICFLAG[ ]*=[ ]*//p' < ../libiberty/Makefile | sed -n '$p'` + if test -n "$x"; then +- SHARED_LIBADD="-L`pwd`/../libiberty/pic -liberty" ++ SHARED_LIBADD="`pwd`/../libiberty/pic/libiberty.a" + fi + + case "${host}" in +@@ -17197,13 +17852,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -17218,14 +17880,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -17258,12 +17923,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -17318,8 +17983,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -17329,12 +17999,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -17350,7 +18022,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -17386,6 +18057,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -18174,7 +18846,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -18277,19 +18950,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -18319,6 +19015,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -18328,6 +19030,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -18442,12 +19147,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -18534,9 +19239,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -18552,6 +19254,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -18584,210 +19289,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/bfd/configure.ac b/bfd/configure.ac +index 8e86f8399ce..e8700c9d4d2 100644 +--- a/bfd/configure.ac ++++ b/bfd/configure.ac +@@ -314,7 +314,7 @@ changequote(,)dnl + x=`sed -n -e 's/^[ ]*PICFLAG[ ]*=[ ]*//p' < ../libiberty/Makefile | sed -n '$p'` + changequote([,])dnl + if test -n "$x"; then +- SHARED_LIBADD="-L`pwd`/../libiberty/pic -liberty" ++ SHARED_LIBADD="`pwd`/../libiberty/pic/libiberty.a" + fi + + case "${host}" in +diff --git a/binutils/configure b/binutils/configure +index 89c99abfeba..d3289a5efcd 100755 +--- a/binutils/configure ++++ b/binutils/configure +@@ -692,8 +692,11 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL + RANLIB ++ac_ct_AR + AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -810,6 +813,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_plugins + enable_largefile +@@ -1490,6 +1494,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + --with-debuginfod Enable debuginfo lookups with debuginfod + (auto/yes/no) + --with-system-zlib use installed libz +@@ -5467,8 +5473,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5508,7 +5514,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6194,8 +6200,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6244,6 +6250,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6260,6 +6340,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6428,7 +6513,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6582,6 +6668,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6597,9 +6698,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6615,7 +6869,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6635,11 +6889,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6655,7 +6913,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6674,6 +6932,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6685,12 +6947,10 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} + + + +@@ -6702,6 +6962,64 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } ++ ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. + set dummy ${ac_tool_prefix}strip; ac_word=$2 +@@ -7036,8 +7354,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -7073,6 +7391,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -7114,6 +7433,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -7125,7 +7456,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -7151,8 +7482,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -7162,8 +7493,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7200,6 +7531,21 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + +@@ -7216,6 +7562,40 @@ fi + + + ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7427,6 +7807,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -7990,6 +8487,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -8185,7 +8684,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8274,7 +8774,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8572,8 +9072,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8739,6 +9237,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8801,7 +9305,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -8858,13 +9362,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8925,6 +9433,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9275,7 +9788,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9374,12 +9888,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9393,8 +9907,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9412,8 +9926,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9459,8 +9973,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9590,7 +10104,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9603,22 +10123,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9630,7 +10157,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9643,22 +10176,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9703,20 +10243,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9777,7 +10360,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9785,7 +10368,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9801,7 +10384,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -9825,10 +10408,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -9907,23 +10490,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -10008,7 +10604,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -10027,9 +10623,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10605,8 +11201,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10639,13 +11236,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10737,7 +11392,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11533,7 +12188,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11536 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11577,10 +12232,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11639,7 +12294,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11642 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11683,10 +12338,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -16076,13 +16731,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -16097,14 +16759,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -16137,12 +16802,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -16197,8 +16862,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -16208,12 +16878,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -16229,7 +16901,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -16265,6 +16936,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -17031,7 +17703,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -17134,19 +17807,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -17176,6 +17872,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -17185,6 +17887,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -17299,12 +18004,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -17391,9 +18096,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -17409,6 +18111,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -17441,210 +18146,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/gas/configure b/gas/configure +index d4b13e6fc8b..dd5224c5c81 100755 +--- a/gas/configure ++++ b/gas/configure +@@ -681,8 +681,11 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL + RANLIB ++ac_ct_AR + AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -799,6 +802,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_plugins + enable_largefile +@@ -1490,6 +1494,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + --with-cpu=CPU default cpu variant is CPU (currently only supported + on ARC) + --with-system-zlib use installed libz +@@ -5277,8 +5283,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5318,7 +5324,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6004,8 +6010,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6054,6 +6060,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6070,6 +6150,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6238,7 +6323,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6392,6 +6478,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6407,9 +6508,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6425,7 +6679,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6445,11 +6699,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6465,7 +6723,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6484,6 +6742,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6495,12 +6757,10 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} + + + +@@ -6512,6 +6772,64 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } ++ ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. + set dummy ${ac_tool_prefix}strip; ac_word=$2 +@@ -6846,8 +7164,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -6883,6 +7201,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -6924,6 +7243,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -6935,7 +7266,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -6961,8 +7292,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -6972,8 +7303,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7010,6 +7341,21 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + +@@ -7026,6 +7372,40 @@ fi + + + ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7237,6 +7617,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -7800,6 +8297,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -7995,7 +8494,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8084,7 +8584,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8382,8 +8882,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8549,6 +9047,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8611,7 +9115,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -8668,13 +9172,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8735,6 +9243,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9085,7 +9598,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9184,12 +9698,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9203,8 +9717,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9222,8 +9736,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9269,8 +9783,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9400,7 +9914,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9413,22 +9933,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9440,7 +9967,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9453,22 +9986,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9513,20 +10053,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9587,7 +10170,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9595,7 +10178,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9611,7 +10194,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -9635,10 +10218,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -9717,23 +10300,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -9818,7 +10414,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -9837,9 +10433,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10415,8 +11011,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10449,13 +11046,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10547,7 +11202,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11343,7 +11998,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11346 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11387,10 +12042,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11449,7 +12104,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11452 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11493,10 +12148,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -15981,13 +16636,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -16002,14 +16664,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -16042,12 +16707,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -16102,8 +16767,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -16113,12 +16783,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -16134,7 +16806,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -16170,6 +16841,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -16943,7 +17615,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -17046,19 +17719,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -17088,6 +17784,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -17097,6 +17799,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -17211,12 +17916,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -17303,9 +18008,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -17321,6 +18023,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -17353,210 +18058,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/gprof/configure b/gprof/configure +index 3cf41b79116..e5d57f52912 100755 +--- a/gprof/configure ++++ b/gprof/configure +@@ -662,8 +662,11 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL + RANLIB ++ac_ct_AR + AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -780,6 +783,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_plugins + enable_largefile +@@ -1442,6 +1446,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + + Some influential environment variables: + CC C compiler command +@@ -5124,8 +5130,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5165,7 +5171,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -5851,8 +5857,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -5901,6 +5907,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -5917,6 +5997,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6085,7 +6170,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6239,6 +6325,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6252,11 +6353,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6272,7 +6526,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6292,11 +6546,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6312,7 +6570,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6331,6 +6589,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6342,16 +6604,72 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ + ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a + ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } + ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi + + + +@@ -6693,8 +7011,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -6730,6 +7048,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -6771,6 +7090,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -6782,7 +7113,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -6808,8 +7139,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -6819,8 +7150,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -6857,6 +7188,20 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ ++ + + + +@@ -6873,6 +7218,41 @@ fi + + + ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7084,6 +7464,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -7647,6 +8144,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -7842,7 +8341,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -7931,7 +8431,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8229,8 +8729,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8396,6 +8894,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8458,7 +8962,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -8515,13 +9019,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8582,6 +9090,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -8932,7 +9445,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9031,12 +9545,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9050,8 +9564,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9069,8 +9583,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9116,8 +9630,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9247,7 +9761,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9260,22 +9780,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9287,7 +9814,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9300,22 +9833,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9360,20 +9900,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9434,7 +10017,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9442,7 +10025,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9458,7 +10041,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -9482,10 +10065,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -9564,23 +10147,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -9665,7 +10261,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -9684,9 +10280,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10262,8 +10858,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10296,13 +10893,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10394,7 +11049,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11190,7 +11845,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11193 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11234,10 +11889,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11296,7 +11951,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11299 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11340,10 +11995,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -13309,13 +13964,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -13330,14 +13992,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -13370,12 +14035,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -13430,8 +14095,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -13441,12 +14111,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -13462,7 +14134,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -13498,6 +14169,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -14263,7 +14935,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -14366,19 +15039,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -14408,6 +15104,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -14417,6 +15119,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -14531,12 +15236,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -14623,9 +15328,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -14641,6 +15343,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -14673,210 +15378,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/ld/configure b/ld/configure +index 1c872c0db5f..fe938e6c99f 100755 +--- a/ld/configure ++++ b/ld/configure +@@ -691,8 +691,11 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL + RANLIB ++ac_ct_AR + AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -819,6 +822,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_plugins + enable_largefile +@@ -1520,6 +1524,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + --with-lib-path=dir1:dir2... set default LIB_PATH + --with-sysroot=DIR Search for usr/lib et al within DIR. + --with-system-zlib use installed libz +@@ -5973,8 +5979,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -6014,7 +6020,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6700,8 +6706,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6750,6 +6756,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6766,6 +6846,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6934,7 +7019,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -7088,6 +7174,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -7103,9 +7204,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -7121,7 +7375,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -7141,11 +7395,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -7161,7 +7419,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -7180,6 +7438,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -7191,12 +7453,12 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ + + + +@@ -7206,6 +7468,62 @@ test -z "$AR_FLAGS" && AR_FLAGS=cru + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ ++ ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } ++ ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++ ++ ++ ++ ++ + + + if test -n "$ac_tool_prefix"; then +@@ -7542,8 +7860,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -7579,6 +7897,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -7620,6 +7939,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -7631,7 +7962,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -7657,8 +7988,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -7668,8 +7999,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7706,6 +8037,19 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ + + + +@@ -7722,6 +8066,42 @@ fi + + + ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7933,6 +8313,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -8496,6 +8993,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -8564,6 +9063,16 @@ done + + + ++func_stripname_cnf () ++{ ++ case ${2} in ++ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; ++ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; ++ esac ++} # func_stripname_cnf ++ ++ ++ + + + # Set options +@@ -8692,7 +9201,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8781,7 +9291,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -9079,8 +9589,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -9246,6 +9754,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -9308,7 +9822,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -9365,13 +9879,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -9432,6 +9950,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9782,7 +10305,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9881,12 +10405,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9900,8 +10424,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9919,8 +10443,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9966,8 +10490,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -10097,7 +10621,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -10110,22 +10640,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -10137,7 +10674,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -10150,22 +10693,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -10209,21 +10759,64 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is +- # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ # no search path for DLLs. ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -10284,7 +10877,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -10292,7 +10885,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -10308,7 +10901,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -10332,10 +10925,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -10414,23 +11007,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -10515,7 +11121,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -10534,9 +11140,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -11112,8 +11718,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -11146,13 +11753,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -11244,7 +11909,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -12040,7 +12705,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 12040 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -12084,10 +12749,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -12146,7 +12811,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 12146 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -12190,10 +12855,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -12585,6 +13250,7 @@ $RM -r conftest* + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC ++ lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX +@@ -12602,6 +13268,7 @@ $RM -r conftest* + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} ++ CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + for cc_temp in $compiler""; do +@@ -12884,7 +13551,13 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath__CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -12897,22 +13570,29 @@ main () + _ACEOF + if ac_fn_cxx_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath__CXX ++fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + +@@ -12925,7 +13605,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath__CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -12938,22 +13624,29 @@ main () + _ACEOF + if ac_fn_cxx_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath__CXX"; then ++ lt_cv_aix_libpath__CXX="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath__CXX ++fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -12996,29 +13689,75 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) +- # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, +- # as there is no search path for DLLs. +- hardcode_libdir_flag_spec_CXX='-L$libdir' +- export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' +- allow_undefined_flag_CXX=unsupported +- always_export_symbols_CXX=no +- enable_shared_with_static_runtimes_CXX=yes +- +- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file (1st line +- # is EXPORTS), use it as is; otherwise, prepend... +- archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- else +- ld_shlibs_CXX=no +- fi +- ;; ++ case $GXX,$cc_basename in ++ ,cl* | no,cl*) ++ # Native MSVC ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ hardcode_libdir_flag_spec_CXX=' ' ++ allow_undefined_flag_CXX=unsupported ++ always_export_symbols_CXX=yes ++ file_list_spec_CXX='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' ++ enable_shared_with_static_runtimes_CXX=yes ++ # Don't use ranlib ++ old_postinstall_cmds_CXX='chmod 644 $oldlib' ++ postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ func_to_tool_file "$lt_outputfile"~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # g++ ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, ++ # as there is no search path for DLLs. ++ hardcode_libdir_flag_spec_CXX='-L$libdir' ++ export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' ++ allow_undefined_flag_CXX=unsupported ++ always_export_symbols_CXX=no ++ enable_shared_with_static_runtimes_CXX=yes ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file (1st line ++ # is EXPORTS), use it as is; otherwise, prepend... ++ archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ ld_shlibs_CXX=no ++ fi ++ ;; ++ esac ++ ;; + darwin* | rhapsody*) + + +@@ -13124,7 +13863,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + ;; + *) + if test "$GXX" = yes; then +- archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no +@@ -13195,10 +13934,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) +- archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) +- archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi +@@ -13239,9 +13978,9 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then +- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else +- archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes +@@ -13311,20 +14050,20 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ +- compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' ++ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ +- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ ++ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' +@@ -13519,7 +14258,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) +- archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + +@@ -13565,7 +14304,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' +@@ -13606,9 +14345,9 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then +- archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' ++ archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when +@@ -13743,6 +14482,13 @@ private: + }; + _LT_EOF + ++ ++_lt_libdeps_save_CFLAGS=$CFLAGS ++case "$CC $CFLAGS " in #( ++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; ++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; ++esac ++ + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? +@@ -13756,7 +14502,7 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do +- case $p in ++ case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. +@@ -13765,13 +14511,22 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + test $p = "-R"; then + prev=$p + continue +- else +- prev= + fi + ++ # Expand the sysroot to ease extracting the directories later. ++ if test -z "$prev"; then ++ case $p in ++ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; ++ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; ++ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; ++ esac ++ fi ++ case $p in ++ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; ++ esac + if test "$pre_test_object_deps_done" = no; then +- case $p in +- -L* | -R*) ++ case ${prev} in ++ -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. +@@ -13791,8 +14546,10 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi ++ prev= + ;; + ++ *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. +@@ -13828,6 +14585,7 @@ else + fi + + $RM -f confest.$objext ++CFLAGS=$_lt_libdeps_save_CFLAGS + + # PORTME: override above test on systems where it is broken + case $host_os in +@@ -13863,7 +14621,7 @@ linux*) + + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as +@@ -13928,8 +14686,6 @@ fi + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then +@@ -14034,6 +14790,11 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + ;; + esac + ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ++ ;; + dgux*) + case $cc_basename in + ec++*) +@@ -14186,7 +14947,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + ;; + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' +@@ -14251,10 +15012,17 @@ case $host_os in + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic_CXX" >&5 +-$as_echo "$lt_prog_compiler_pic_CXX" >&6; } +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic_CXX+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 ++$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } ++lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + + # + # Check to make sure the PIC flag actually works. +@@ -14312,6 +15080,8 @@ fi + + + ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -14489,6 +15259,7 @@ fi + $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. +@@ -14503,15 +15274,20 @@ $as_echo_n "checking whether the $compiler linker ($LD) supports shared librarie + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" +- ;; ++ ;; + cygwin* | mingw* | cegcc*) +- export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' +- ;; ++ case $cc_basename in ++ cl*) ;; ++ *) ++ export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ++ ;; ++ esac ++ ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- ;; ++ ;; + esac +- exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 + $as_echo "$ld_shlibs_CXX" >&6; } +@@ -14774,8 +15550,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -14807,13 +15584,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -14904,7 +15739,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -15363,6 +16198,7 @@ fi + fi # test -n "$compiler" + + CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC +@@ -18554,13 +19390,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -18575,14 +19418,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -18615,12 +19461,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -18659,8 +19505,8 @@ old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote + compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' + GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' + archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +@@ -18687,12 +19533,12 @@ hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_ + hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' + inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' + link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path_CXX='`$ECHO "$fix_srcfile_path_CXX" | $SED "$delay_single_quote_subst"`' + always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' + exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' + include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' + prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' ++postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' + file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' + hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' + compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +@@ -18730,8 +19576,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -18741,12 +19592,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -18762,7 +19615,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -18784,8 +19636,8 @@ LD_CXX \ + reload_flag_CXX \ + compiler_CXX \ + lt_prog_compiler_no_builtin_flag_CXX \ +-lt_prog_compiler_wl_CXX \ + lt_prog_compiler_pic_CXX \ ++lt_prog_compiler_wl_CXX \ + lt_prog_compiler_static_CXX \ + lt_cv_prog_compiler_c_o_CXX \ + export_dynamic_flag_spec_CXX \ +@@ -18797,7 +19649,6 @@ no_undefined_flag_CXX \ + hardcode_libdir_flag_spec_CXX \ + hardcode_libdir_flag_spec_ld_CXX \ + hardcode_libdir_separator_CXX \ +-fix_srcfile_path_CXX \ + exclude_expsyms_CXX \ + include_expsyms_CXX \ + file_list_spec_CXX \ +@@ -18831,6 +19682,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -18845,7 +19697,8 @@ archive_expsym_cmds_CXX \ + module_cmds_CXX \ + module_expsym_cmds_CXX \ + export_symbols_cmds_CXX \ +-prelink_cmds_CXX; do ++prelink_cmds_CXX \ ++postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" +@@ -19610,7 +20463,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -19713,19 +20567,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -19755,6 +20632,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -19764,6 +20647,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -19878,12 +20764,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -19970,9 +20856,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -19988,6 +20871,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -20034,210 +20920,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +@@ -20265,12 +21110,12 @@ with_gcc=$GCC_CXX + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl_CXX +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic_CXX + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl_CXX ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static_CXX + +@@ -20357,9 +21202,6 @@ inherit_rpath=$inherit_rpath_CXX + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs_CXX + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path_CXX +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols_CXX + +@@ -20375,6 +21217,9 @@ include_expsyms=$lt_include_expsyms_CXX + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds_CXX + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds_CXX ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec_CXX + +diff --git a/libctf/configure b/libctf/configure +index 1dc1b65fac3..c5c2f36bbc0 100755 +--- a/libctf/configure ++++ b/libctf/configure +@@ -663,6 +663,8 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -790,6 +792,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_largefile + enable_werror_always +@@ -1448,6 +1451,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + --with-system-zlib use installed libz + + Some influential environment variables: +@@ -5406,8 +5411,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5518,7 +5523,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6204,8 +6209,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6254,6 +6259,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6270,6 +6349,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6438,7 +6522,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6592,6 +6677,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6607,9 +6707,162 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6625,7 +6878,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6645,11 +6898,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6665,7 +6922,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6684,6 +6941,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6695,16 +6956,72 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ + ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a + ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } + ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi + + + +@@ -7046,8 +7363,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -7083,6 +7400,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -7124,6 +7442,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -7135,7 +7465,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -7161,8 +7491,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -7172,8 +7502,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7210,6 +7540,16 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ + + + +@@ -7226,6 +7566,45 @@ fi + + + ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7437,6 +7816,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -8000,6 +8496,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -8165,7 +8663,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8254,7 +8753,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8552,8 +9051,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8719,6 +9216,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8781,7 +9284,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -8838,13 +9341,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8905,6 +9412,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9255,7 +9767,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9354,12 +9867,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9373,8 +9886,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9392,8 +9905,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9439,8 +9952,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9570,7 +10083,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9583,22 +10102,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9610,7 +10136,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9623,22 +10155,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9683,20 +10222,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9757,7 +10339,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9765,7 +10347,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9781,7 +10363,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -9805,10 +10387,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -9887,23 +10469,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -9988,7 +10583,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -10007,9 +10602,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10585,8 +11180,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10619,13 +11215,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10717,7 +11371,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11513,7 +12167,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11516 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11557,10 +12211,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11619,7 +12273,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11622 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11663,10 +12317,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -14110,13 +14764,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -14131,14 +14792,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -14171,12 +14835,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -14231,8 +14895,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -14242,12 +14911,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -14263,7 +14934,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -14299,6 +14969,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -15055,7 +15726,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -15158,19 +15830,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -15200,6 +15895,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -15209,6 +15910,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -15323,12 +16027,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -15415,9 +16119,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -15433,6 +16134,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -15465,210 +16169,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/libtool.m4 b/libtool.m4 +index 434530059fa..e45fdc6998c 100644 +--- a/libtool.m4 ++++ b/libtool.m4 +@@ -1,7 +1,8 @@ + # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is free software; the Free Software Foundation gives +@@ -10,7 +11,8 @@ + + m4_define([_LT_COPYING], [dnl + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -37,7 +39,7 @@ m4_define([_LT_COPYING], [dnl + # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + ]) + +-# serial 56 LT_INIT ++# serial 57 LT_INIT + + + # LT_PREREQ(VERSION) +@@ -92,7 +94,8 @@ _LT_SET_OPTIONS([$0], [$1]) + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + AC_SUBST(LIBTOOL)dnl + + _LT_SETUP +@@ -166,10 +169,13 @@ _LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl + dnl + m4_require([_LT_FILEUTILS_DEFAULTS])dnl + m4_require([_LT_CHECK_SHELL_FEATURES])dnl ++m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl + m4_require([_LT_CMD_RELOAD])dnl + m4_require([_LT_CHECK_MAGIC_METHOD])dnl ++m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl + m4_require([_LT_CMD_OLD_ARCHIVE])dnl + m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl ++m4_require([_LT_WITH_SYSROOT])dnl + + _LT_CONFIG_LIBTOOL_INIT([ + # See if we are running on zsh, and set the options which allow our +@@ -199,7 +205,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -632,7 +638,7 @@ m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl + m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) + configured by $[0], generated by m4_PACKAGE_STRING. + +-Copyright (C) 2009 Free Software Foundation, Inc. ++Copyright (C) 2010 Free Software Foundation, Inc. + This config.lt script is free software; the Free Software Foundation + gives unlimited permision to copy, distribute and modify it." + +@@ -746,15 +752,12 @@ _LT_EOF + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) + +- _LT_PROG_XSI_SHELLFNS ++ _LT_PROG_REPLACE_SHELLFNS + +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + ], +@@ -980,6 +983,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD ++ echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD ++ $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -1069,30 +1074,41 @@ m4_defun([_LT_DARWIN_LINKER_FEATURES], + fi + ]) + +-# _LT_SYS_MODULE_PATH_AIX +-# ----------------------- ++# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) ++# ---------------------------------- + # Links a minimal program and checks the executable + # for the system default hardcoded library path. In most cases, + # this is /usr/lib:/lib, but when the MPI compilers are used + # the location of the communication and MPI libs are included too. + # If we don't find anything, use the default library path according + # to the aix ld manual. ++# Store the results from the different compilers for each TAGNAME. ++# Allow to override them for all tags through lt_cv_aix_libpath. + m4_defun([_LT_SYS_MODULE_PATH_AIX], + [m4_require([_LT_DECL_SED])dnl +-AC_LINK_IFELSE([AC_LANG_SOURCE([AC_LANG_PROGRAM])],[ +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi],[]) +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], ++ [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ ++ lt_aix_libpath_sed='[ ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }]' ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi],[]) ++ if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then ++ _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" ++ fi ++ ]) ++ aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) ++fi + ])# _LT_SYS_MODULE_PATH_AIX + + +@@ -1117,7 +1133,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + + AC_MSG_CHECKING([how to print strings]) + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -1161,6 +1177,39 @@ _LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) + ])# _LT_PROG_ECHO_BACKSLASH + + ++# _LT_WITH_SYSROOT ++# ---------------- ++AC_DEFUN([_LT_WITH_SYSROOT], ++[AC_MSG_CHECKING([for sysroot]) ++AC_ARG_WITH([libtool-sysroot], ++[ --with-libtool-sysroot[=DIR] Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified).], ++[], [with_libtool_sysroot=no]) ++ ++dnl lt_sysroot will always be passed unquoted. We quote it here ++dnl in case the user passed a directory name. ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ AC_MSG_RESULT([${with_libtool_sysroot}]) ++ AC_MSG_ERROR([The sysroot must be an absolute path.]) ++ ;; ++esac ++ ++ AC_MSG_RESULT([${lt_sysroot:-no}]) ++_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl ++[dependent libraries, and in which our libraries should be installed.])]) ++ + # _LT_ENABLE_LOCK + # --------------- + m4_defun([_LT_ENABLE_LOCK], +@@ -1320,14 +1369,47 @@ need_locks="$enable_libtool_lock" + ])# _LT_ENABLE_LOCK + + ++# _LT_PROG_AR ++# ----------- ++m4_defun([_LT_PROG_AR], ++[AC_CHECK_TOOLS(AR, [ar], false) ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++_LT_DECL([], [AR], [1], [The archiver]) ++_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) ++ ++AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], ++ [lt_cv_ar_at_file=no ++ AC_COMPILE_IFELSE([AC_LANG_PROGRAM], ++ [echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' ++ AC_TRY_EVAL([lt_ar_try]) ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ AC_TRY_EVAL([lt_ar_try]) ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a ++ ]) ++ ]) ++ ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi ++_LT_DECL([], [archiver_list_spec], [1], ++ [How to feed a file listing to the archiver]) ++])# _LT_PROG_AR ++ ++ + # _LT_CMD_OLD_ARCHIVE + # ------------------- + m4_defun([_LT_CMD_OLD_ARCHIVE], +-[AC_CHECK_TOOL(AR, ar, false) +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru +-_LT_DECL([], [AR], [1], [The archiver]) +-_LT_DECL([], [AR_FLAGS], [1]) ++[_LT_PROG_AR + + AC_CHECK_TOOL(STRIP, strip, :) + test -z "$STRIP" && STRIP=: +@@ -1623,7 +1705,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-[#line __oline__ "configure" ++[#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -1667,10 +1749,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -2210,8 +2292,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -2244,13 +2327,71 @@ m4_if([$1], [],[ + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -2342,7 +2483,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -2950,6 +3091,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -3016,7 +3162,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -3167,6 +3314,21 @@ tpf*) + ;; + esac + ]) ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -3174,7 +3336,11 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + _LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) + _LT_DECL([], [file_magic_cmd], [1], +- [Command to use when deplibs_check_method == "file_magic"]) ++ [Command to use when deplibs_check_method = "file_magic"]) ++_LT_DECL([], [file_magic_glob], [1], ++ [How to find potential files when deplibs_check_method = "file_magic"]) ++_LT_DECL([], [want_nocaseglob], [1], ++ [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) + ])# _LT_CHECK_MAGIC_METHOD + + +@@ -3277,6 +3443,67 @@ dnl aclocal-1.4 backwards compatibility: + dnl AC_DEFUN([AM_PROG_NM], []) + dnl AC_DEFUN([AC_PROG_NM], []) + ++# _LT_CHECK_SHAREDLIB_FROM_LINKLIB ++# -------------------------------- ++# how to determine the name of the shared library ++# associated with a specific link library. ++# -- PORTME fill in with the dynamic library characteristics ++m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], ++[m4_require([_LT_DECL_EGREP]) ++m4_require([_LT_DECL_OBJDUMP]) ++m4_require([_LT_DECL_DLLTOOL]) ++AC_CACHE_CHECK([how to associate runtime and link libraries], ++lt_cv_sharedlib_from_linklib_cmd, ++[lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++]) ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++_LT_DECL([], [sharedlib_from_linklib_cmd], [1], ++ [Command to associate shared and link libraries]) ++])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB ++ ++ ++# _LT_PATH_MANIFEST_TOOL ++# ---------------------- ++# locate the manifest tool ++m4_defun([_LT_PATH_MANIFEST_TOOL], ++[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], ++ [lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&AS_MESSAGE_LOG_FD ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest*]) ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl ++])# _LT_PATH_MANIFEST_TOOL ++ + + # LT_LIB_M + # -------- +@@ -3403,8 +3630,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -3440,6 +3667,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -3473,6 +3701,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT@&t@_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT@&t@_DLSYM_CONST ++#else ++# define LT@&t@_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -3484,7 +3724,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -3510,15 +3750,15 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi +@@ -3551,6 +3791,13 @@ else + AC_MSG_RESULT(ok) + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ + _LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) + _LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], +@@ -3561,6 +3808,8 @@ _LT_DECL([global_symbol_to_c_name_address], + _LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) ++_LT_DECL([], [nm_file_list_spec], [1], ++ [Specify filename containing input files for $NM]) + ]) # _LT_CMD_GLOBAL_SYMBOLS + + +@@ -3572,7 +3821,6 @@ _LT_TAGVAR(lt_prog_compiler_wl, $1)= + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)= + +-AC_MSG_CHECKING([for $compiler option to produce PIC]) + m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then +@@ -3678,6 +3926,12 @@ m4_if([$1], [CXX], [ + ;; + esac + ;; ++ mingw* | cygwin* | os2* | pw32* | cegcc*) ++ # This hack is so that the source file can tell whether it is being ++ # built for inclusion in a dll (and should export symbols for example). ++ m4_if([$1], [GCJ], [], ++ [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) ++ ;; + dgux*) + case $cc_basename in + ec++*) +@@ -3830,7 +4084,7 @@ m4_if([$1], [CXX], [ + ;; + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' +@@ -4053,6 +4307,12 @@ m4_if([$1], [CXX], [ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' ++ _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' ++ _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -4115,7 +4375,7 @@ m4_if([$1], [CXX], [ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; +@@ -4172,9 +4432,11 @@ case $host_os in + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; + esac +-AC_MSG_RESULT([$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +-_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], +- [How to pass a linker flag through the compiler]) ++ ++AC_CACHE_CHECK([for $compiler option to produce PIC], ++ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], ++ [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) ++_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + + # + # Check to make sure the PIC flag actually works. +@@ -4193,6 +4455,8 @@ fi + _LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + ++_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], ++ [How to pass a linker flag through the compiler]) + # + # Check to make sure the static flag actually works. + # +@@ -4213,6 +4477,7 @@ _LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + m4_defun([_LT_LINKER_SHLIBS], + [AC_REQUIRE([LT_PATH_LD])dnl + AC_REQUIRE([LT_PATH_NM])dnl ++m4_require([_LT_PATH_MANIFEST_TOOL])dnl + m4_require([_LT_FILEUTILS_DEFAULTS])dnl + m4_require([_LT_DECL_EGREP])dnl + m4_require([_LT_DECL_SED])dnl +@@ -4221,6 +4486,7 @@ m4_require([_LT_TAG_COMPILER])dnl + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. +@@ -4235,15 +4501,20 @@ m4_if([$1], [CXX], [ + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" +- ;; ++ ;; + cygwin* | mingw* | cegcc*) +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;/^.*[[ ]]__nm__/s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' +- ;; ++ case $cc_basename in ++ cl*) ;; ++ *) ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] ++ ;; ++ esac ++ ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' +- ;; ++ ;; + esac +- _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + ], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= +@@ -4411,7 +4682,8 @@ _LT_EOF + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' ++ _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -4510,12 +4782,12 @@ _LT_EOF + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_flag_spec_ld, $1)='-rpath $libdir' +- _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -4529,8 +4801,8 @@ _LT_EOF + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -4548,8 +4820,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +@@ -4595,8 +4867,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +@@ -4726,7 +4998,7 @@ _LT_EOF + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- _LT_SYS_MODULE_PATH_AIX ++ _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else +@@ -4737,7 +5009,7 @@ _LT_EOF + else + # Determine the default libpath from the value encoded in an + # empty executable. +- _LT_SYS_MODULE_PATH_AIX ++ _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. +@@ -4781,20 +5053,63 @@ _LT_EOF + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' +- # FIXME: Should let the user specify the lib program. +- _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' +- _LT_TAGVAR(fix_srcfile_path, $1)='`cygpath -w "$srcfile"`' +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' ++ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ # FIXME: Should let the user specify the lib program. ++ _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -4828,7 +5143,7 @@ _LT_EOF + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no +@@ -4836,7 +5151,7 @@ _LT_EOF + + hpux9*) + if test "$GCC" = yes; then +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -4852,7 +5167,7 @@ _LT_EOF + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -4876,10 +5191,10 @@ _LT_EOF + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -4926,16 +5241,31 @@ _LT_EOF + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- AC_LINK_IFELSE([AC_LANG_SOURCE([int foo(void) {}])], +- _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ) +- LDFLAGS="$save_LDFLAGS" ++ # This should be the same for all languages, so no per-tag cache variable. ++ AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], ++ [lt_cv_irix_exported_symbol], ++ [save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ AC_LINK_IFELSE( ++ [AC_LANG_SOURCE( ++ [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], ++ [C++], [[int foo (void) { return 0; }]], ++ [Fortran 77], [[ ++ subroutine foo ++ end]], ++ [Fortran], [[ ++ subroutine foo ++ end]])])], ++ [lt_cv_irix_exported_symbol=yes], ++ [lt_cv_irix_exported_symbol=no]) ++ LDFLAGS="$save_LDFLAGS"]) ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -5020,7 +5350,7 @@ _LT_EOF + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' +@@ -5039,9 +5369,9 @@ _LT_EOF + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -5313,8 +5643,6 @@ _LT_TAGDECL([], [inherit_rpath], [0], + to runtime path list]) + _LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +-_LT_TAGDECL([], [fix_srcfile_path], [1], +- [Fix the shell variable $srcfile for the compiler]) + _LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) + _LT_TAGDECL([], [export_symbols_cmds], [2], +@@ -5325,6 +5653,8 @@ _LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) + _LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) ++_LT_TAGDECL([], [postlink_cmds], [2], ++ [Commands necessary for finishing linking programs]) + _LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) + dnl FIXME: Not yet implemented +@@ -5426,6 +5756,7 @@ CC="$lt_save_CC" + m4_defun([_LT_LANG_CXX_CONFIG], + [m4_require([_LT_FILEUTILS_DEFAULTS])dnl + m4_require([_LT_DECL_EGREP])dnl ++m4_require([_LT_PATH_MANIFEST_TOOL])dnl + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then +@@ -5487,6 +5818,7 @@ if test "$_lt_caught_CXX_error" != yes; then + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC ++ lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX +@@ -5504,6 +5836,7 @@ if test "$_lt_caught_CXX_error" != yes; then + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} ++ CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) +@@ -5667,7 +6000,7 @@ if test "$_lt_caught_CXX_error" != yes; then + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. +- _LT_SYS_MODULE_PATH_AIX ++ _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -5679,7 +6012,7 @@ if test "$_lt_caught_CXX_error" != yes; then + else + # Determine the default libpath from the value encoded in an + # empty executable. +- _LT_SYS_MODULE_PATH_AIX ++ _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. +@@ -5721,29 +6054,75 @@ if test "$_lt_caught_CXX_error" != yes; then + ;; + + cygwin* | mingw* | pw32* | cegcc*) +- # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, +- # as there is no search path for DLLs. +- _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' +- _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' +- _LT_TAGVAR(allow_undefined_flag, $1)=unsupported +- _LT_TAGVAR(always_export_symbols, $1)=no +- _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes +- +- if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- # If the export-symbols file already is a .def file (1st line +- # is EXPORTS), use it as is; otherwise, prepend... +- _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then +- cp $export_symbols $output_objdir/$soname.def; +- else +- echo EXPORTS > $output_objdir/$soname.def; +- cat $export_symbols >> $output_objdir/$soname.def; +- fi~ +- $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +- else +- _LT_TAGVAR(ld_shlibs, $1)=no +- fi +- ;; ++ case $GXX,$cc_basename in ++ ,cl* | no,cl*) ++ # Native MSVC ++ # hardcode_libdir_flag_spec is actually meaningless, as there is ++ # no search path for DLLs. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=yes ++ _LT_TAGVAR(file_list_spec, $1)='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ # Don't use ranlib ++ _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' ++ _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ func_to_tool_file "$lt_outputfile"~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # g++ ++ # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, ++ # as there is no search path for DLLs. ++ _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' ++ _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' ++ _LT_TAGVAR(allow_undefined_flag, $1)=unsupported ++ _LT_TAGVAR(always_export_symbols, $1)=no ++ _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes ++ ++ if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ # If the export-symbols file already is a .def file (1st line ++ # is EXPORTS), use it as is; otherwise, prepend... ++ _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ cp $export_symbols $output_objdir/$soname.def; ++ else ++ echo EXPORTS > $output_objdir/$soname.def; ++ cat $export_symbols >> $output_objdir/$soname.def; ++ fi~ ++ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' ++ else ++ _LT_TAGVAR(ld_shlibs, $1)=no ++ fi ++ ;; ++ esac ++ ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; +@@ -5818,7 +6197,7 @@ if test "$_lt_caught_CXX_error" != yes; then + ;; + *) + if test "$GXX" = yes; then +- _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no +@@ -5889,10 +6268,10 @@ if test "$_lt_caught_CXX_error" != yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi +@@ -5933,9 +6312,9 @@ if test "$_lt_caught_CXX_error" != yes; then + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes +@@ -6005,20 +6384,20 @@ if test "$_lt_caught_CXX_error" != yes; then + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ +- compile_command="$compile_command `find $tpldir -name \*.o | $NL2SP`"' ++ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ +- $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | $NL2SP`~ ++ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ +- $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' ++ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' +@@ -6213,7 +6592,7 @@ if test "$_lt_caught_CXX_error" != yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + +@@ -6259,7 +6638,7 @@ if test "$_lt_caught_CXX_error" != yes; then + + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' +@@ -6300,9 +6679,9 @@ if test "$_lt_caught_CXX_error" != yes; then + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then +- _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' ++ _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when +@@ -6431,6 +6810,7 @@ if test "$_lt_caught_CXX_error" != yes; then + fi # test -n "$compiler" + + CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC +@@ -6445,6 +6825,29 @@ AC_LANG_POP + ])# _LT_LANG_CXX_CONFIG + + ++# _LT_FUNC_STRIPNAME_CNF ++# ---------------------- ++# func_stripname_cnf prefix suffix name ++# strip PREFIX and SUFFIX off of NAME. ++# PREFIX and SUFFIX must not contain globbing or regex special ++# characters, hashes, percent signs, but SUFFIX may contain a leading ++# dot (in which case that matches only a dot). ++# ++# This function is identical to the (non-XSI) version of func_stripname, ++# except this one can be used by m4 code that may be executed by configure, ++# rather than the libtool script. ++m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl ++AC_REQUIRE([_LT_DECL_SED]) ++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) ++func_stripname_cnf () ++{ ++ case ${2} in ++ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; ++ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; ++ esac ++} # func_stripname_cnf ++])# _LT_FUNC_STRIPNAME_CNF ++ + # _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) + # --------------------------------- + # Figure out "hidden" library dependencies from verbose +@@ -6453,6 +6856,7 @@ AC_LANG_POP + # objects, libraries and library flags. + m4_defun([_LT_SYS_HIDDEN_LIBDEPS], + [m4_require([_LT_FILEUTILS_DEFAULTS])dnl ++AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl + # Dependencies to place before and after the object being linked: + _LT_TAGVAR(predep_objects, $1)= + _LT_TAGVAR(postdep_objects, $1)= +@@ -6503,6 +6907,13 @@ public class foo { + }; + _LT_EOF + ]) ++ ++_lt_libdeps_save_CFLAGS=$CFLAGS ++case "$CC $CFLAGS " in #( ++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; ++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; ++esac ++ + dnl Parse the compiler output and extract the necessary + dnl objects, libraries and library flags. + if AC_TRY_EVAL(ac_compile); then +@@ -6514,7 +6925,7 @@ if AC_TRY_EVAL(ac_compile); then + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do +- case $p in ++ case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. +@@ -6523,13 +6934,22 @@ if AC_TRY_EVAL(ac_compile); then + test $p = "-R"; then + prev=$p + continue +- else +- prev= + fi + ++ # Expand the sysroot to ease extracting the directories later. ++ if test -z "$prev"; then ++ case $p in ++ -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; ++ -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; ++ -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; ++ esac ++ fi ++ case $p in ++ =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; ++ esac + if test "$pre_test_object_deps_done" = no; then +- case $p in +- -L* | -R*) ++ case ${prev} in ++ -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. +@@ -6549,8 +6969,10 @@ if AC_TRY_EVAL(ac_compile); then + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi ++ prev= + ;; + ++ *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. +@@ -6586,6 +7008,7 @@ else + fi + + $RM -f confest.$objext ++CFLAGS=$_lt_libdeps_save_CFLAGS + + # PORTME: override above test on systems where it is broken + m4_if([$1], [CXX], +@@ -6622,7 +7045,7 @@ linux*) + + solaris*) + case $cc_basename in +- CC*) ++ CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as +@@ -6735,7 +7158,9 @@ if test "$_lt_disable_F77" != yes; then + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC ++ lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} ++ CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) +@@ -6789,6 +7214,7 @@ if test "$_lt_disable_F77" != yes; then + + GCC=$lt_save_GCC + CC="$lt_save_CC" ++ CFLAGS="$lt_save_CFLAGS" + fi # test "$_lt_disable_F77" != yes + + AC_LANG_POP +@@ -6865,7 +7291,9 @@ if test "$_lt_disable_FC" != yes; then + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC ++ lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} ++ CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + +@@ -6921,7 +7349,8 @@ if test "$_lt_disable_FC" != yes; then + fi # test -n "$compiler" + + GCC=$lt_save_GCC +- CC="$lt_save_CC" ++ CC=$lt_save_CC ++ CFLAGS=$lt_save_CFLAGS + fi # test "$_lt_disable_FC" != yes + + AC_LANG_POP +@@ -6958,10 +7387,12 @@ _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. +-lt_save_CC="$CC" ++lt_save_CC=$CC ++lt_save_CFLAGS=$CFLAGS + lt_save_GCC=$GCC + GCC=yes + CC=${GCJ-"gcj"} ++CFLAGS=$GCJFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_TAGVAR(LD, $1)="$LD" +@@ -6992,7 +7423,8 @@ fi + AC_LANG_RESTORE + + GCC=$lt_save_GCC +-CC="$lt_save_CC" ++CC=$lt_save_CC ++CFLAGS=$lt_save_CFLAGS + ])# _LT_LANG_GCJ_CONFIG + + +@@ -7027,9 +7459,11 @@ _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" ++lt_save_CFLAGS=$CFLAGS + lt_save_GCC=$GCC + GCC= + CC=${RC-"windres"} ++CFLAGS= + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) +@@ -7042,7 +7476,8 @@ fi + + GCC=$lt_save_GCC + AC_LANG_RESTORE +-CC="$lt_save_CC" ++CC=$lt_save_CC ++CFLAGS=$lt_save_CFLAGS + ])# _LT_LANG_RC_CONFIG + + +@@ -7101,6 +7536,15 @@ _LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) + AC_SUBST([OBJDUMP]) + ]) + ++# _LT_DECL_DLLTOOL ++# ---------------- ++# Ensure DLLTOOL variable is set. ++m4_defun([_LT_DECL_DLLTOOL], ++[AC_CHECK_TOOL(DLLTOOL, dlltool, false) ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) ++AC_SUBST([DLLTOOL]) ++]) + + # _LT_DECL_SED + # ------------ +@@ -7194,8 +7638,8 @@ m4_defun([_LT_CHECK_SHELL_FEATURES], + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -7234,206 +7678,162 @@ _LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl + ])# _LT_CHECK_SHELL_FEATURES + + +-# _LT_PROG_XSI_SHELLFNS +-# --------------------- +-# Bourne and XSI compatible variants of some useful shell functions. +-m4_defun([_LT_PROG_XSI_SHELLFNS], +-[case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $[*] )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} ++# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) ++# ------------------------------------------------------ ++# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and ++# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. ++m4_defun([_LT_PROG_FUNCTION_REPLACE], ++[dnl { ++sed -e '/^$1 ()$/,/^} # $1 /c\ ++$1 ()\ ++{\ ++m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) ++} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++]) + +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" + +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} ++# _LT_PROG_REPLACE_SHELLFNS ++# ------------------------- ++# Replace existing portable implementations of several shell functions with ++# equivalent extended shell implementations where those features are available.. ++m4_defun([_LT_PROG_REPLACE_SHELLFNS], ++[if test x"$xsi_shell" = xyes; then ++ _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl ++ case ${1} in ++ */*) func_dirname_result="${1%/*}${2}" ;; ++ * ) func_dirname_result="${3}" ;; ++ esac]) ++ ++ _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl ++ func_basename_result="${1##*/}"]) ++ ++ _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl ++ case ${1} in ++ */*) func_dirname_result="${1%/*}${2}" ;; ++ * ) func_dirname_result="${3}" ;; ++ esac ++ func_basename_result="${1##*/}"]) + +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} ++ _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl ++ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are ++ # positional parameters, so assign one to ordinary parameter first. ++ func_stripname_result=${3} ++ func_stripname_result=${func_stripname_result#"${1}"} ++ func_stripname_result=${func_stripname_result%"${2}"}]) + +-dnl func_dirname_and_basename +-dnl A portable version of this function is already defined in general.m4sh +-dnl so there is no need for it here. ++ _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl ++ func_split_long_opt_name=${1%%=*} ++ func_split_long_opt_arg=${1#*=}]) + +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} ++ _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl ++ func_split_short_opt_arg=${1#??} ++ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) + +-# sed scripts: +-my_sed_long_opt='1s/^\(-[[^=]]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[[^=]]*=//' ++ _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl ++ case ${1} in ++ *.lo) func_lo2o_result=${1%.lo}.${objext} ;; ++ *) func_lo2o_result=${1} ;; ++ esac]) + +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} ++ _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) + +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} ++ _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) + +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[[^.]]*$/.lo/'` +-} ++ _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) ++fi + +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$[@]"` +-} ++if test x"$lt_shell_append" = xyes; then ++ _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) + +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$[1]" : ".*" 2>/dev/null || echo $max_cmd_len` +-} ++ _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl ++ func_quote_for_eval "${2}" ++dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ ++ eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) + +-_LT_EOF +-esac ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi + +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" ++if test x"$_lt_function_replace_fail" = x":"; then ++ AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) ++fi ++]) + +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$[1]+=\$[2]" +-} +-_LT_EOF ++# _LT_PATH_CONVERSION_FUNCTIONS ++# ----------------------------- ++# Determine which file name conversion functions should be used by ++# func_to_host_file (and, implicitly, by func_to_host_path). These are needed ++# for certain cross-compile configurations and native mingw. ++m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], ++[AC_REQUIRE([AC_CANONICAL_HOST])dnl ++AC_REQUIRE([AC_CANONICAL_BUILD])dnl ++AC_MSG_CHECKING([how to convert $build file names to $host format]) ++AC_CACHE_VAL(lt_cv_to_host_file_cmd, ++[case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac + ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$[1]=\$$[1]\$[2]" +-} +- +-_LT_EOF ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac + ;; +- esac ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++]) ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) ++_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], ++ [0], [convert $build file names to $host format])dnl ++ ++AC_MSG_CHECKING([how to convert $build file names to toolchain format]) ++AC_CACHE_VAL(lt_cv_to_tool_file_cmd, ++[#assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac + ]) ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) ++_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], ++ [0], [convert $build files to toolchain format])dnl ++])# _LT_PATH_CONVERSION_FUNCTIONS +diff --git a/ltmain.sh b/ltmain.sh +index 9503ec85d70..70e856e0659 100644 +--- a/ltmain.sh ++++ b/ltmain.sh +@@ -1,10 +1,9 @@ +-# Generated from ltmain.m4sh. + +-# libtool (GNU libtool 1.3134 2009-11-29) 2.2.7a ++# libtool (GNU libtool) 2.4 + # Written by Gordon Matzigkeit , 1996 + + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, +-# 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2007, 2008, 2009, 2010 Free Software Foundation, Inc. + # This is free software; see the source for copying conditions. There is NO + # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +@@ -38,7 +37,6 @@ + # -n, --dry-run display commands without modifying any files + # --features display basic configuration information and exit + # --mode=MODE use operation mode MODE +-# --no-finish let install mode avoid finish commands + # --preserve-dup-deps don't remove duplicate dependency libraries + # --quiet, --silent don't print informational messages + # --no-quiet, --no-silent +@@ -71,17 +69,19 @@ + # compiler: $LTCC + # compiler flags: $LTCFLAGS + # linker: $LD (gnu? $with_gnu_ld) +-# $progname: (GNU libtool 1.3134 2009-11-29) 2.2.7a ++# $progname: (GNU libtool) 2.4 + # automake: $automake_version + # autoconf: $autoconf_version + # + # Report bugs to . ++# GNU libtool home page: . ++# General help using GNU software: . + + PROGRAM=libtool + PACKAGE=libtool +-VERSION=2.2.7a +-TIMESTAMP=" 1.3134 2009-11-29" +-package_revision=1.3134 ++VERSION=2.4 ++TIMESTAMP="" ++package_revision=1.3293 + + # Be Bourne compatible + if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then +@@ -106,9 +106,6 @@ _LTECHO_EOF' + } + + # NLS nuisances: We save the old values to restore during execute mode. +-# Only set LANG and LC_ALL to C if already set. +-# These must not be set unconditionally because not all systems understand +-# e.g. LANG=C (notably SCO). + lt_user_locale= + lt_safe_locale= + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +@@ -121,15 +118,13 @@ do + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" + done ++LC_ALL=C ++LANGUAGE=C ++export LANGUAGE LC_ALL + + $lt_unset CDPATH + + +- +- +- +- +- + # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh + # is ksh but when the shell is invoked as "sh" and the current value of + # the _XPG environment variable is not equal to 1 (one), the special +@@ -140,7 +135,7 @@ progpath="$0" + + + : ${CP="cp -f"} +-: ${ECHO=$as_echo} ++test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} + : ${EGREP="/bin/grep -E"} + : ${FGREP="/bin/grep -F"} + : ${GREP="/bin/grep"} +@@ -149,7 +144,7 @@ progpath="$0" + : ${MKDIR="mkdir"} + : ${MV="mv -f"} + : ${RM="rm -f"} +-: ${SED="/mount/endor/wildenhu/local-x86_64/bin/sed"} ++: ${SED="/bin/sed"} + : ${SHELL="${CONFIG_SHELL-/bin/sh}"} + : ${Xsed="$SED -e 1s/^X//"} + +@@ -169,6 +164,27 @@ IFS=" $lt_nl" + dirname="s,/[^/]*$,," + basename="s,^.*/,," + ++# func_dirname file append nondir_replacement ++# Compute the dirname of FILE. If nonempty, add APPEND to the result, ++# otherwise set result to NONDIR_REPLACEMENT. ++func_dirname () ++{ ++ func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` ++ if test "X$func_dirname_result" = "X${1}"; then ++ func_dirname_result="${3}" ++ else ++ func_dirname_result="$func_dirname_result${2}" ++ fi ++} # func_dirname may be replaced by extended shell implementation ++ ++ ++# func_basename file ++func_basename () ++{ ++ func_basename_result=`$ECHO "${1}" | $SED "$basename"` ++} # func_basename may be replaced by extended shell implementation ++ ++ + # func_dirname_and_basename file append nondir_replacement + # perform func_basename and func_dirname in a single function + # call: +@@ -183,17 +199,31 @@ basename="s,^.*/,," + # those functions but instead duplicate the functionality here. + func_dirname_and_basename () + { +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +- func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` +-} ++ # Extract subdirectory from the argument. ++ func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` ++ if test "X$func_dirname_result" = "X${1}"; then ++ func_dirname_result="${3}" ++ else ++ func_dirname_result="$func_dirname_result${2}" ++ fi ++ func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` ++} # func_dirname_and_basename may be replaced by extended shell implementation ++ ++ ++# func_stripname prefix suffix name ++# strip PREFIX and SUFFIX off of NAME. ++# PREFIX and SUFFIX must not contain globbing or regex special ++# characters, hashes, percent signs, but SUFFIX may contain a leading ++# dot (in which case that matches only a dot). ++# func_strip_suffix prefix name ++func_stripname () ++{ ++ case ${2} in ++ .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; ++ *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; ++ esac ++} # func_stripname may be replaced by extended shell implementation + +-# Generated shell functions inserted here. + + # These SED scripts presuppose an absolute path with a trailing slash. + pathcar='s,^/\([^/]*\).*$,\1,' +@@ -376,6 +406,15 @@ sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + # Same as above, but do not quote variable references. + double_quote_subst='s/\(["`\\]\)/\\\1/g' + ++# Sed substitution that turns a string into a regex matching for the ++# string literally. ++sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' ++ ++# Sed substitution that converts a w32 file name or path ++# which contains forward slashes, into one that contains ++# (escaped) backslashes. A very naive implementation. ++lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' ++ + # Re-`\' parameter expansions in output of double_quote_subst that were + # `\'-ed in input to the same. If an odd number of `\' preceded a '$' + # in input to double_quote_subst, that '$' was protected from expansion. +@@ -404,7 +443,7 @@ opt_warning=: + # name if it has been set yet. + func_echo () + { +- $ECHO "$progname${mode+: }$mode: $*" ++ $ECHO "$progname: ${opt_mode+$opt_mode: }$*" + } + + # func_verbose arg... +@@ -430,14 +469,14 @@ func_echo_all () + # Echo program name prefixed message to standard error. + func_error () + { +- $ECHO "$progname${mode+: }$mode: "${1+"$@"} 1>&2 ++ $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 + } + + # func_warning arg... + # Echo program name prefixed warning message to standard error. + func_warning () + { +- $opt_warning && $ECHO "$progname${mode+: }$mode: warning: "${1+"$@"} 1>&2 ++ $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +@@ -656,19 +695,35 @@ func_show_eval_locale () + fi + } + +- +- ++# func_tr_sh ++# Turn $1 into a string suitable for a shell variable name. ++# Result is stored in $func_tr_sh_result. All characters ++# not in the set a-zA-Z0-9_ are replaced with '_'. Further, ++# if $1 begins with a digit, a '_' is prepended as well. ++func_tr_sh () ++{ ++ case $1 in ++ [0-9]* | *[!a-zA-Z0-9_]*) ++ func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` ++ ;; ++ * ) ++ func_tr_sh_result=$1 ++ ;; ++ esac ++} + + + # func_version + # Echo version message to standard output and exit. + func_version () + { ++ $opt_debug ++ + $SED -n '/(C)/!b go + :more + /\./!{ + N +- s/\n# // ++ s/\n# / / + b more + } + :go +@@ -685,7 +740,9 @@ func_version () + # Echo short help message to standard output and exit. + func_usage () + { +- $SED -n '/^# Usage:/,/^# *-h/ { ++ $opt_debug ++ ++ $SED -n '/^# Usage:/,/^# *.*--help/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ +@@ -701,7 +758,10 @@ func_usage () + # unless 'noexit' is passed as argument. + func_help () + { ++ $opt_debug ++ + $SED -n '/^# Usage:/,/# Report bugs to/ { ++ :print + s/^# // + s/^# *$// + s*\$progname*'$progname'* +@@ -714,7 +774,11 @@ func_help () + s/\$automake_version/'"`(automake --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(autoconf --version) 2>/dev/null |$SED 1q`"'/ + p +- }' < "$progpath" ++ d ++ } ++ /^# .* home page:/b print ++ /^# General help using/b print ++ ' < "$progpath" + ret=$? + if test -z "$1"; then + exit $ret +@@ -726,12 +790,39 @@ func_help () + # exit_cmd. + func_missing_arg () + { +- func_error "missing argument for $1" ++ $opt_debug ++ ++ func_error "missing argument for $1." + exit_cmd=exit + } + +-exit_cmd=: + ++# func_split_short_opt shortopt ++# Set func_split_short_opt_name and func_split_short_opt_arg shell ++# variables after splitting SHORTOPT after the 2nd character. ++func_split_short_opt () ++{ ++ my_sed_short_opt='1s/^\(..\).*$/\1/;q' ++ my_sed_short_rest='1s/^..\(.*\)$/\1/;q' ++ ++ func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` ++ func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` ++} # func_split_short_opt may be replaced by extended shell implementation ++ ++ ++# func_split_long_opt longopt ++# Set func_split_long_opt_name and func_split_long_opt_arg shell ++# variables after splitting LONGOPT at the `=' sign. ++func_split_long_opt () ++{ ++ my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' ++ my_sed_long_arg='1s/^--[^=]*=//' ++ ++ func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` ++ func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` ++} # func_split_long_opt may be replaced by extended shell implementation ++ ++exit_cmd=: + + + +@@ -741,26 +832,64 @@ magic="%%%MAGIC variable%%%" + magic_exe="%%%MAGIC EXE variable%%%" + + # Global variables. +-# $mode is unset + nonopt= +-execute_dlfiles= + preserve_args= + lo2o="s/\\.lo\$/.${objext}/" + o2lo="s/\\.${objext}\$/.lo/" + extracted_archives= + extracted_serial=0 + +-opt_dry_run=false +-opt_finish=: +-opt_duplicate_deps=false +-opt_silent=false +-opt_debug=: +- + # If this variable is set in any of the actions, the command in it + # will be execed at the end. This prevents here-documents from being + # left over by shells. + exec_cmd= + ++# func_append var value ++# Append VALUE to the end of shell variable VAR. ++func_append () ++{ ++ eval "${1}=\$${1}\${2}" ++} # func_append may be replaced by extended shell implementation ++ ++# func_append_quoted var value ++# Quote VALUE and append to the end of shell variable VAR, separated ++# by a space. ++func_append_quoted () ++{ ++ func_quote_for_eval "${2}" ++ eval "${1}=\$${1}\\ \$func_quote_for_eval_result" ++} # func_append_quoted may be replaced by extended shell implementation ++ ++ ++# func_arith arithmetic-term... ++func_arith () ++{ ++ func_arith_result=`expr "${@}"` ++} # func_arith may be replaced by extended shell implementation ++ ++ ++# func_len string ++# STRING may not start with a hyphen. ++func_len () ++{ ++ func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` ++} # func_len may be replaced by extended shell implementation ++ ++ ++# func_lo2o object ++func_lo2o () ++{ ++ func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` ++} # func_lo2o may be replaced by extended shell implementation ++ ++ ++# func_xform libobj-or-source ++func_xform () ++{ ++ func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` ++} # func_xform may be replaced by extended shell implementation ++ ++ + # func_fatal_configuration arg... + # Echo program name prefixed message to standard error, followed by + # a configuration failure hint, and exit. +@@ -850,130 +979,204 @@ func_enable_tag () + esac + } + +-# Parse options once, thoroughly. This comes as soon as possible in +-# the script to make things like `libtool --version' happen quickly. ++# func_check_version_match ++# Ensure that we are using m4 macros, and libtool script from the same ++# release of libtool. ++func_check_version_match () + { ++ if test "$package_revision" != "$macro_revision"; then ++ if test "$VERSION" != "$macro_version"; then ++ if test -z "$macro_version"; then ++ cat >&2 <<_LT_EOF ++$progname: Version mismatch error. This is $PACKAGE $VERSION, but the ++$progname: definition of this LT_INIT comes from an older release. ++$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION ++$progname: and run autoconf again. ++_LT_EOF ++ else ++ cat >&2 <<_LT_EOF ++$progname: Version mismatch error. This is $PACKAGE $VERSION, but the ++$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. ++$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION ++$progname: and run autoconf again. ++_LT_EOF ++ fi ++ else ++ cat >&2 <<_LT_EOF ++$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, ++$progname: but the definition of this LT_INIT comes from revision $macro_revision. ++$progname: You should recreate aclocal.m4 with macros from revision $package_revision ++$progname: of $PACKAGE $VERSION and run autoconf again. ++_LT_EOF ++ fi + +- # Shorthand for --mode=foo, only valid as the first argument +- case $1 in +- clean|clea|cle|cl) +- shift; set dummy --mode clean ${1+"$@"}; shift +- ;; +- compile|compil|compi|comp|com|co|c) +- shift; set dummy --mode compile ${1+"$@"}; shift +- ;; +- execute|execut|execu|exec|exe|ex|e) +- shift; set dummy --mode execute ${1+"$@"}; shift +- ;; +- finish|finis|fini|fin|fi|f) +- shift; set dummy --mode finish ${1+"$@"}; shift +- ;; +- install|instal|insta|inst|ins|in|i) +- shift; set dummy --mode install ${1+"$@"}; shift +- ;; +- link|lin|li|l) +- shift; set dummy --mode link ${1+"$@"}; shift +- ;; +- uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) +- shift; set dummy --mode uninstall ${1+"$@"}; shift +- ;; +- esac ++ exit $EXIT_MISMATCH ++ fi ++} ++ ++ ++# Shorthand for --mode=foo, only valid as the first argument ++case $1 in ++clean|clea|cle|cl) ++ shift; set dummy --mode clean ${1+"$@"}; shift ++ ;; ++compile|compil|compi|comp|com|co|c) ++ shift; set dummy --mode compile ${1+"$@"}; shift ++ ;; ++execute|execut|execu|exec|exe|ex|e) ++ shift; set dummy --mode execute ${1+"$@"}; shift ++ ;; ++finish|finis|fini|fin|fi|f) ++ shift; set dummy --mode finish ${1+"$@"}; shift ++ ;; ++install|instal|insta|inst|ins|in|i) ++ shift; set dummy --mode install ${1+"$@"}; shift ++ ;; ++link|lin|li|l) ++ shift; set dummy --mode link ${1+"$@"}; shift ++ ;; ++uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) ++ shift; set dummy --mode uninstall ${1+"$@"}; shift ++ ;; ++esac + +- # Parse non-mode specific arguments: +- while test "$#" -gt 0; do ++ ++ ++# Option defaults: ++opt_debug=: ++opt_dry_run=false ++opt_config=false ++opt_preserve_dup_deps=false ++opt_features=false ++opt_finish=false ++opt_help=false ++opt_help_all=false ++opt_silent=: ++opt_verbose=: ++opt_silent=false ++opt_verbose=false ++ ++ ++# Parse options once, thoroughly. This comes as soon as possible in the ++# script to make things like `--version' happen as quickly as we can. ++{ ++ # this just eases exit handling ++ while test $# -gt 0; do + opt="$1" + shift +- + case $opt in +- --config) func_config ;; +- +- --debug) preserve_args="$preserve_args $opt" ++ --debug|-x) opt_debug='set -x' + func_echo "enabling shell trace mode" +- opt_debug='set -x' + $opt_debug + ;; +- +- -dlopen) test "$#" -eq 0 && func_missing_arg "$opt" && break +- execute_dlfiles="$execute_dlfiles $1" +- shift ++ --dry-run|--dryrun|-n) ++ opt_dry_run=: + ;; +- +- --dry-run | -n) opt_dry_run=: ;; +- --features) func_features ;; +- --finish) mode="finish" ;; +- --no-finish) opt_finish=false ;; +- +- --mode) test "$#" -eq 0 && func_missing_arg "$opt" && break +- case $1 in +- # Valid mode arguments: +- clean) ;; +- compile) ;; +- execute) ;; +- finish) ;; +- install) ;; +- link) ;; +- relink) ;; +- uninstall) ;; +- +- # Catch anything else as an error +- *) func_error "invalid argument for $opt" +- exit_cmd=exit +- break +- ;; +- esac +- +- mode="$1" ++ --config) ++ opt_config=: ++func_config ++ ;; ++ --dlopen|-dlopen) ++ optarg="$1" ++ opt_dlopen="${opt_dlopen+$opt_dlopen ++}$optarg" + shift + ;; +- + --preserve-dup-deps) +- opt_duplicate_deps=: ;; +- +- --quiet|--silent) preserve_args="$preserve_args $opt" +- opt_silent=: +- opt_verbose=false ++ opt_preserve_dup_deps=: + ;; +- +- --no-quiet|--no-silent) +- preserve_args="$preserve_args $opt" +- opt_silent=false ++ --features) ++ opt_features=: ++func_features + ;; +- +- --verbose| -v) preserve_args="$preserve_args $opt" ++ --finish) ++ opt_finish=: ++set dummy --mode finish ${1+"$@"}; shift ++ ;; ++ --help) ++ opt_help=: ++ ;; ++ --help-all) ++ opt_help_all=: ++opt_help=': help-all' ++ ;; ++ --mode) ++ test $# = 0 && func_missing_arg $opt && break ++ optarg="$1" ++ opt_mode="$optarg" ++case $optarg in ++ # Valid mode arguments: ++ clean|compile|execute|finish|install|link|relink|uninstall) ;; ++ ++ # Catch anything else as an error ++ *) func_error "invalid argument for $opt" ++ exit_cmd=exit ++ break ++ ;; ++esac ++ shift ++ ;; ++ --no-silent|--no-quiet) + opt_silent=false +- opt_verbose=: ++func_append preserve_args " $opt" + ;; +- +- --no-verbose) preserve_args="$preserve_args $opt" ++ --no-verbose) + opt_verbose=false ++func_append preserve_args " $opt" + ;; +- +- --tag) test "$#" -eq 0 && func_missing_arg "$opt" && break +- preserve_args="$preserve_args $opt $1" +- func_enable_tag "$1" # tagname is set here ++ --silent|--quiet) ++ opt_silent=: ++func_append preserve_args " $opt" ++ opt_verbose=false ++ ;; ++ --verbose|-v) ++ opt_verbose=: ++func_append preserve_args " $opt" ++opt_silent=false ++ ;; ++ --tag) ++ test $# = 0 && func_missing_arg $opt && break ++ optarg="$1" ++ opt_tag="$optarg" ++func_append preserve_args " $opt $optarg" ++func_enable_tag "$optarg" + shift + ;; + ++ -\?|-h) func_usage ;; ++ --help) func_help ;; ++ --version) func_version ;; ++ + # Separate optargs to long options: +- -dlopen=*|--mode=*|--tag=*) +- func_opt_split "$opt" +- set dummy "$func_opt_split_opt" "$func_opt_split_arg" ${1+"$@"} ++ --*=*) ++ func_split_long_opt "$opt" ++ set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} + shift + ;; + +- -\?|-h) func_usage ;; +- --help) opt_help=: ;; +- --help-all) opt_help=': help-all' ;; +- --version) func_version ;; +- +- -*) func_fatal_help "unrecognized option \`$opt'" ;; +- +- *) nonopt="$opt" +- break ++ # Separate non-argument short options: ++ -\?*|-h*|-n*|-v*) ++ func_split_short_opt "$opt" ++ set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} ++ shift + ;; ++ ++ --) break ;; ++ -*) func_fatal_help "unrecognized option \`$opt'" ;; ++ *) set dummy "$opt" ${1+"$@"}; shift; break ;; + esac + done + ++ # Validate options: ++ ++ # save first non-option argument ++ if test "$#" -gt 0; then ++ nonopt="$opt" ++ shift ++ fi ++ ++ # preserve --debug ++ test "$opt_debug" = : || func_append preserve_args " --debug" + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* ) +@@ -981,82 +1184,44 @@ func_enable_tag () + opt_duplicate_compiler_generated_deps=: + ;; + *) +- opt_duplicate_compiler_generated_deps=$opt_duplicate_deps ++ opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + +- # Having warned about all mis-specified options, bail out if +- # anything was wrong. +- $exit_cmd $EXIT_FAILURE +-} ++ $opt_help || { ++ # Sanity checks first: ++ func_check_version_match + +-# func_check_version_match +-# Ensure that we are using m4 macros, and libtool script from the same +-# release of libtool. +-func_check_version_match () +-{ +- if test "$package_revision" != "$macro_revision"; then +- if test "$VERSION" != "$macro_version"; then +- if test -z "$macro_version"; then +- cat >&2 <<_LT_EOF +-$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +-$progname: definition of this LT_INIT comes from an older release. +-$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +-$progname: and run autoconf again. +-_LT_EOF +- else +- cat >&2 <<_LT_EOF +-$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +-$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +-$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +-$progname: and run autoconf again. +-_LT_EOF +- fi +- else +- cat >&2 <<_LT_EOF +-$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +-$progname: but the definition of this LT_INIT comes from revision $macro_revision. +-$progname: You should recreate aclocal.m4 with macros from revision $package_revision +-$progname: of $PACKAGE $VERSION and run autoconf again. +-_LT_EOF ++ if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then ++ func_fatal_configuration "not configured to build any kind of library" + fi + +- exit $EXIT_MISMATCH +- fi +-} +- ++ # Darwin sucks ++ eval std_shrext=\"$shrext_cmds\" + +-## ----------- ## +-## Main. ## +-## ----------- ## +- +-$opt_help || { +- # Sanity checks first: +- func_check_version_match +- +- if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then +- func_fatal_configuration "not configured to build any kind of library" +- fi ++ # Only execute mode is allowed to have -dlopen flags. ++ if test -n "$opt_dlopen" && test "$opt_mode" != execute; then ++ func_error "unrecognized option \`-dlopen'" ++ $ECHO "$help" 1>&2 ++ exit $EXIT_FAILURE ++ fi + +- test -z "$mode" && func_fatal_error "error: you must specify a MODE." ++ # Change the help message to a mode-specific one. ++ generic_help="$help" ++ help="Try \`$progname --help --mode=$opt_mode' for more information." ++ } + + +- # Darwin sucks +- eval "std_shrext=\"$shrext_cmds\"" ++ # Bail if the options were screwed ++ $exit_cmd $EXIT_FAILURE ++} + + +- # Only execute mode is allowed to have -dlopen flags. +- if test -n "$execute_dlfiles" && test "$mode" != execute; then +- func_error "unrecognized option \`-dlopen'" +- $ECHO "$help" 1>&2 +- exit $EXIT_FAILURE +- fi + +- # Change the help message to a mode-specific one. +- generic_help="$help" +- help="Try \`$progname --help --mode=$mode' for more information." +-} + ++## ----------- ## ++## Main. ## ++## ----------- ## + + # func_lalib_p file + # True iff FILE is a libtool `.la' library or `.lo' object file. +@@ -1121,12 +1286,9 @@ func_ltwrapper_executable_p () + # temporary ltwrapper_script. + func_ltwrapper_scriptname () + { +- func_ltwrapper_scriptname_result="" +- if func_ltwrapper_executable_p "$1"; then +- func_dirname_and_basename "$1" "" "." +- func_stripname '' '.exe' "$func_basename_result" +- func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" +- fi ++ func_dirname_and_basename "$1" "" "." ++ func_stripname '' '.exe' "$func_basename_result" ++ func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" + } + + # func_ltwrapper_p file +@@ -1149,7 +1311,7 @@ func_execute_cmds () + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs +- eval "cmd=\"$cmd\"" ++ eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +@@ -1172,6 +1334,37 @@ func_source () + } + + ++# func_resolve_sysroot PATH ++# Replace a leading = in PATH with a sysroot. Store the result into ++# func_resolve_sysroot_result ++func_resolve_sysroot () ++{ ++ func_resolve_sysroot_result=$1 ++ case $func_resolve_sysroot_result in ++ =*) ++ func_stripname '=' '' "$func_resolve_sysroot_result" ++ func_resolve_sysroot_result=$lt_sysroot$func_stripname_result ++ ;; ++ esac ++} ++ ++# func_replace_sysroot PATH ++# If PATH begins with the sysroot, replace it with = and ++# store the result into func_replace_sysroot_result. ++func_replace_sysroot () ++{ ++ case "$lt_sysroot:$1" in ++ ?*:"$lt_sysroot"*) ++ func_stripname "$lt_sysroot" '' "$1" ++ func_replace_sysroot_result="=$func_stripname_result" ++ ;; ++ *) ++ # Including no sysroot. ++ func_replace_sysroot_result=$1 ++ ;; ++ esac ++} ++ + # func_infer_tag arg + # Infer tagged configuration to use if any are available and + # if one wasn't chosen via the "--tag" command line option. +@@ -1184,8 +1377,7 @@ func_infer_tag () + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do +- func_quote_for_eval "$arg" +- CC_quoted="$CC_quoted $func_quote_for_eval_result" ++ func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` +@@ -1204,8 +1396,7 @@ func_infer_tag () + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. +- func_quote_for_eval "$arg" +- CC_quoted="$CC_quoted $func_quote_for_eval_result" ++ func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` +@@ -1274,6 +1465,486 @@ EOF + } + } + ++ ++################################################## ++# FILE NAME AND PATH CONVERSION HELPER FUNCTIONS # ++################################################## ++ ++# func_convert_core_file_wine_to_w32 ARG ++# Helper function used by file name conversion functions when $build is *nix, ++# and $host is mingw, cygwin, or some other w32 environment. Relies on a ++# correctly configured wine environment available, with the winepath program ++# in $build's $PATH. ++# ++# ARG is the $build file name to be converted to w32 format. ++# Result is available in $func_convert_core_file_wine_to_w32_result, and will ++# be empty on error (or when ARG is empty) ++func_convert_core_file_wine_to_w32 () ++{ ++ $opt_debug ++ func_convert_core_file_wine_to_w32_result="$1" ++ if test -n "$1"; then ++ # Unfortunately, winepath does not exit with a non-zero error code, so we ++ # are forced to check the contents of stdout. On the other hand, if the ++ # command is not found, the shell will set an exit code of 127 and print ++ # *an error message* to stdout. So we must check for both error code of ++ # zero AND non-empty stdout, which explains the odd construction: ++ func_convert_core_file_wine_to_w32_tmp=`winepath -w "$1" 2>/dev/null` ++ if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then ++ func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | ++ $SED -e "$lt_sed_naive_backslashify"` ++ else ++ func_convert_core_file_wine_to_w32_result= ++ fi ++ fi ++} ++# end: func_convert_core_file_wine_to_w32 ++ ++ ++# func_convert_core_path_wine_to_w32 ARG ++# Helper function used by path conversion functions when $build is *nix, and ++# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly ++# configured wine environment available, with the winepath program in $build's ++# $PATH. Assumes ARG has no leading or trailing path separator characters. ++# ++# ARG is path to be converted from $build format to win32. ++# Result is available in $func_convert_core_path_wine_to_w32_result. ++# Unconvertible file (directory) names in ARG are skipped; if no directory names ++# are convertible, then the result may be empty. ++func_convert_core_path_wine_to_w32 () ++{ ++ $opt_debug ++ # unfortunately, winepath doesn't convert paths, only file names ++ func_convert_core_path_wine_to_w32_result="" ++ if test -n "$1"; then ++ oldIFS=$IFS ++ IFS=: ++ for func_convert_core_path_wine_to_w32_f in $1; do ++ IFS=$oldIFS ++ func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" ++ if test -n "$func_convert_core_file_wine_to_w32_result" ; then ++ if test -z "$func_convert_core_path_wine_to_w32_result"; then ++ func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" ++ else ++ func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" ++ fi ++ fi ++ done ++ IFS=$oldIFS ++ fi ++} ++# end: func_convert_core_path_wine_to_w32 ++ ++ ++# func_cygpath ARGS... ++# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when ++# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) ++# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or ++# (2), returns the Cygwin file name or path in func_cygpath_result (input ++# file name or path is assumed to be in w32 format, as previously converted ++# from $build's *nix or MSYS format). In case (3), returns the w32 file name ++# or path in func_cygpath_result (input file name or path is assumed to be in ++# Cygwin format). Returns an empty string on error. ++# ++# ARGS are passed to cygpath, with the last one being the file name or path to ++# be converted. ++# ++# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH ++# environment variable; do not put it in $PATH. ++func_cygpath () ++{ ++ $opt_debug ++ if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then ++ func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` ++ if test "$?" -ne 0; then ++ # on failure, ensure result is empty ++ func_cygpath_result= ++ fi ++ else ++ func_cygpath_result= ++ func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" ++ fi ++} ++#end: func_cygpath ++ ++ ++# func_convert_core_msys_to_w32 ARG ++# Convert file name or path ARG from MSYS format to w32 format. Return ++# result in func_convert_core_msys_to_w32_result. ++func_convert_core_msys_to_w32 () ++{ ++ $opt_debug ++ # awkward: cmd appends spaces to result ++ func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | ++ $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` ++} ++#end: func_convert_core_msys_to_w32 ++ ++ ++# func_convert_file_check ARG1 ARG2 ++# Verify that ARG1 (a file name in $build format) was converted to $host ++# format in ARG2. Otherwise, emit an error message, but continue (resetting ++# func_to_host_file_result to ARG1). ++func_convert_file_check () ++{ ++ $opt_debug ++ if test -z "$2" && test -n "$1" ; then ++ func_error "Could not determine host file name corresponding to" ++ func_error " \`$1'" ++ func_error "Continuing, but uninstalled executables may not work." ++ # Fallback: ++ func_to_host_file_result="$1" ++ fi ++} ++# end func_convert_file_check ++ ++ ++# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH ++# Verify that FROM_PATH (a path in $build format) was converted to $host ++# format in TO_PATH. Otherwise, emit an error message, but continue, resetting ++# func_to_host_file_result to a simplistic fallback value (see below). ++func_convert_path_check () ++{ ++ $opt_debug ++ if test -z "$4" && test -n "$3"; then ++ func_error "Could not determine the host path corresponding to" ++ func_error " \`$3'" ++ func_error "Continuing, but uninstalled executables may not work." ++ # Fallback. This is a deliberately simplistic "conversion" and ++ # should not be "improved". See libtool.info. ++ if test "x$1" != "x$2"; then ++ lt_replace_pathsep_chars="s|$1|$2|g" ++ func_to_host_path_result=`echo "$3" | ++ $SED -e "$lt_replace_pathsep_chars"` ++ else ++ func_to_host_path_result="$3" ++ fi ++ fi ++} ++# end func_convert_path_check ++ ++ ++# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG ++# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT ++# and appending REPL if ORIG matches BACKPAT. ++func_convert_path_front_back_pathsep () ++{ ++ $opt_debug ++ case $4 in ++ $1 ) func_to_host_path_result="$3$func_to_host_path_result" ++ ;; ++ esac ++ case $4 in ++ $2 ) func_append func_to_host_path_result "$3" ++ ;; ++ esac ++} ++# end func_convert_path_front_back_pathsep ++ ++ ++################################################## ++# $build to $host FILE NAME CONVERSION FUNCTIONS # ++################################################## ++# invoked via `$to_host_file_cmd ARG' ++# ++# In each case, ARG is the path to be converted from $build to $host format. ++# Result will be available in $func_to_host_file_result. ++ ++ ++# func_to_host_file ARG ++# Converts the file name ARG from $build format to $host format. Return result ++# in func_to_host_file_result. ++func_to_host_file () ++{ ++ $opt_debug ++ $to_host_file_cmd "$1" ++} ++# end func_to_host_file ++ ++ ++# func_to_tool_file ARG LAZY ++# converts the file name ARG from $build format to toolchain format. Return ++# result in func_to_tool_file_result. If the conversion in use is listed ++# in (the comma separated) LAZY, no conversion takes place. ++func_to_tool_file () ++{ ++ $opt_debug ++ case ,$2, in ++ *,"$to_tool_file_cmd",*) ++ func_to_tool_file_result=$1 ++ ;; ++ *) ++ $to_tool_file_cmd "$1" ++ func_to_tool_file_result=$func_to_host_file_result ++ ;; ++ esac ++} ++# end func_to_tool_file ++ ++ ++# func_convert_file_noop ARG ++# Copy ARG to func_to_host_file_result. ++func_convert_file_noop () ++{ ++ func_to_host_file_result="$1" ++} ++# end func_convert_file_noop ++ ++ ++# func_convert_file_msys_to_w32 ARG ++# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic ++# conversion to w32 is not available inside the cwrapper. Returns result in ++# func_to_host_file_result. ++func_convert_file_msys_to_w32 () ++{ ++ $opt_debug ++ func_to_host_file_result="$1" ++ if test -n "$1"; then ++ func_convert_core_msys_to_w32 "$1" ++ func_to_host_file_result="$func_convert_core_msys_to_w32_result" ++ fi ++ func_convert_file_check "$1" "$func_to_host_file_result" ++} ++# end func_convert_file_msys_to_w32 ++ ++ ++# func_convert_file_cygwin_to_w32 ARG ++# Convert file name ARG from Cygwin to w32 format. Returns result in ++# func_to_host_file_result. ++func_convert_file_cygwin_to_w32 () ++{ ++ $opt_debug ++ func_to_host_file_result="$1" ++ if test -n "$1"; then ++ # because $build is cygwin, we call "the" cygpath in $PATH; no need to use ++ # LT_CYGPATH in this case. ++ func_to_host_file_result=`cygpath -m "$1"` ++ fi ++ func_convert_file_check "$1" "$func_to_host_file_result" ++} ++# end func_convert_file_cygwin_to_w32 ++ ++ ++# func_convert_file_nix_to_w32 ARG ++# Convert file name ARG from *nix to w32 format. Requires a wine environment ++# and a working winepath. Returns result in func_to_host_file_result. ++func_convert_file_nix_to_w32 () ++{ ++ $opt_debug ++ func_to_host_file_result="$1" ++ if test -n "$1"; then ++ func_convert_core_file_wine_to_w32 "$1" ++ func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" ++ fi ++ func_convert_file_check "$1" "$func_to_host_file_result" ++} ++# end func_convert_file_nix_to_w32 ++ ++ ++# func_convert_file_msys_to_cygwin ARG ++# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. ++# Returns result in func_to_host_file_result. ++func_convert_file_msys_to_cygwin () ++{ ++ $opt_debug ++ func_to_host_file_result="$1" ++ if test -n "$1"; then ++ func_convert_core_msys_to_w32 "$1" ++ func_cygpath -u "$func_convert_core_msys_to_w32_result" ++ func_to_host_file_result="$func_cygpath_result" ++ fi ++ func_convert_file_check "$1" "$func_to_host_file_result" ++} ++# end func_convert_file_msys_to_cygwin ++ ++ ++# func_convert_file_nix_to_cygwin ARG ++# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed ++# in a wine environment, working winepath, and LT_CYGPATH set. Returns result ++# in func_to_host_file_result. ++func_convert_file_nix_to_cygwin () ++{ ++ $opt_debug ++ func_to_host_file_result="$1" ++ if test -n "$1"; then ++ # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. ++ func_convert_core_file_wine_to_w32 "$1" ++ func_cygpath -u "$func_convert_core_file_wine_to_w32_result" ++ func_to_host_file_result="$func_cygpath_result" ++ fi ++ func_convert_file_check "$1" "$func_to_host_file_result" ++} ++# end func_convert_file_nix_to_cygwin ++ ++ ++############################################# ++# $build to $host PATH CONVERSION FUNCTIONS # ++############################################# ++# invoked via `$to_host_path_cmd ARG' ++# ++# In each case, ARG is the path to be converted from $build to $host format. ++# The result will be available in $func_to_host_path_result. ++# ++# Path separators are also converted from $build format to $host format. If ++# ARG begins or ends with a path separator character, it is preserved (but ++# converted to $host format) on output. ++# ++# All path conversion functions are named using the following convention: ++# file name conversion function : func_convert_file_X_to_Y () ++# path conversion function : func_convert_path_X_to_Y () ++# where, for any given $build/$host combination the 'X_to_Y' value is the ++# same. If conversion functions are added for new $build/$host combinations, ++# the two new functions must follow this pattern, or func_init_to_host_path_cmd ++# will break. ++ ++ ++# func_init_to_host_path_cmd ++# Ensures that function "pointer" variable $to_host_path_cmd is set to the ++# appropriate value, based on the value of $to_host_file_cmd. ++to_host_path_cmd= ++func_init_to_host_path_cmd () ++{ ++ $opt_debug ++ if test -z "$to_host_path_cmd"; then ++ func_stripname 'func_convert_file_' '' "$to_host_file_cmd" ++ to_host_path_cmd="func_convert_path_${func_stripname_result}" ++ fi ++} ++ ++ ++# func_to_host_path ARG ++# Converts the path ARG from $build format to $host format. Return result ++# in func_to_host_path_result. ++func_to_host_path () ++{ ++ $opt_debug ++ func_init_to_host_path_cmd ++ $to_host_path_cmd "$1" ++} ++# end func_to_host_path ++ ++ ++# func_convert_path_noop ARG ++# Copy ARG to func_to_host_path_result. ++func_convert_path_noop () ++{ ++ func_to_host_path_result="$1" ++} ++# end func_convert_path_noop ++ ++ ++# func_convert_path_msys_to_w32 ARG ++# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic ++# conversion to w32 is not available inside the cwrapper. Returns result in ++# func_to_host_path_result. ++func_convert_path_msys_to_w32 () ++{ ++ $opt_debug ++ func_to_host_path_result="$1" ++ if test -n "$1"; then ++ # Remove leading and trailing path separator characters from ARG. MSYS ++ # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; ++ # and winepath ignores them completely. ++ func_stripname : : "$1" ++ func_to_host_path_tmp1=$func_stripname_result ++ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" ++ func_to_host_path_result="$func_convert_core_msys_to_w32_result" ++ func_convert_path_check : ";" \ ++ "$func_to_host_path_tmp1" "$func_to_host_path_result" ++ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" ++ fi ++} ++# end func_convert_path_msys_to_w32 ++ ++ ++# func_convert_path_cygwin_to_w32 ARG ++# Convert path ARG from Cygwin to w32 format. Returns result in ++# func_to_host_file_result. ++func_convert_path_cygwin_to_w32 () ++{ ++ $opt_debug ++ func_to_host_path_result="$1" ++ if test -n "$1"; then ++ # See func_convert_path_msys_to_w32: ++ func_stripname : : "$1" ++ func_to_host_path_tmp1=$func_stripname_result ++ func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` ++ func_convert_path_check : ";" \ ++ "$func_to_host_path_tmp1" "$func_to_host_path_result" ++ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" ++ fi ++} ++# end func_convert_path_cygwin_to_w32 ++ ++ ++# func_convert_path_nix_to_w32 ARG ++# Convert path ARG from *nix to w32 format. Requires a wine environment and ++# a working winepath. Returns result in func_to_host_file_result. ++func_convert_path_nix_to_w32 () ++{ ++ $opt_debug ++ func_to_host_path_result="$1" ++ if test -n "$1"; then ++ # See func_convert_path_msys_to_w32: ++ func_stripname : : "$1" ++ func_to_host_path_tmp1=$func_stripname_result ++ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" ++ func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" ++ func_convert_path_check : ";" \ ++ "$func_to_host_path_tmp1" "$func_to_host_path_result" ++ func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" ++ fi ++} ++# end func_convert_path_nix_to_w32 ++ ++ ++# func_convert_path_msys_to_cygwin ARG ++# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. ++# Returns result in func_to_host_file_result. ++func_convert_path_msys_to_cygwin () ++{ ++ $opt_debug ++ func_to_host_path_result="$1" ++ if test -n "$1"; then ++ # See func_convert_path_msys_to_w32: ++ func_stripname : : "$1" ++ func_to_host_path_tmp1=$func_stripname_result ++ func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" ++ func_cygpath -u -p "$func_convert_core_msys_to_w32_result" ++ func_to_host_path_result="$func_cygpath_result" ++ func_convert_path_check : : \ ++ "$func_to_host_path_tmp1" "$func_to_host_path_result" ++ func_convert_path_front_back_pathsep ":*" "*:" : "$1" ++ fi ++} ++# end func_convert_path_msys_to_cygwin ++ ++ ++# func_convert_path_nix_to_cygwin ARG ++# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a ++# a wine environment, working winepath, and LT_CYGPATH set. Returns result in ++# func_to_host_file_result. ++func_convert_path_nix_to_cygwin () ++{ ++ $opt_debug ++ func_to_host_path_result="$1" ++ if test -n "$1"; then ++ # Remove leading and trailing path separator characters from ++ # ARG. msys behavior is inconsistent here, cygpath turns them ++ # into '.;' and ';.', and winepath ignores them completely. ++ func_stripname : : "$1" ++ func_to_host_path_tmp1=$func_stripname_result ++ func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" ++ func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" ++ func_to_host_path_result="$func_cygpath_result" ++ func_convert_path_check : : \ ++ "$func_to_host_path_tmp1" "$func_to_host_path_result" ++ func_convert_path_front_back_pathsep ":*" "*:" : "$1" ++ fi ++} ++# end func_convert_path_nix_to_cygwin ++ ++ + # func_mode_compile arg... + func_mode_compile () + { +@@ -1314,12 +1985,12 @@ func_mode_compile () + ;; + + -pie | -fpie | -fPIE) +- pie_flag="$pie_flag $arg" ++ func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) +- later="$later $arg" ++ func_append later " $arg" + continue + ;; + +@@ -1340,15 +2011,14 @@ func_mode_compile () + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" +- func_quote_for_eval "$arg" +- lastarg="$lastarg $func_quote_for_eval_result" ++ func_append_quoted lastarg "$arg" + done + IFS="$save_ifs" + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. +- base_compile="$base_compile $lastarg" ++ func_append base_compile " $lastarg" + continue + ;; + +@@ -1364,8 +2034,7 @@ func_mode_compile () + esac # case $arg_mode + + # Aesthetically quote the previous argument. +- func_quote_for_eval "$lastarg" +- base_compile="$base_compile $func_quote_for_eval_result" ++ func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in +@@ -1496,17 +2165,16 @@ compiler." + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi +- removelist="$removelist $output_obj" ++ func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist +- removelist="$removelist $lockfile" ++ func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + +- if test -n "$fix_srcfile_path"; then +- eval "srcfile=\"$fix_srcfile_path\"" +- fi ++ func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 ++ srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + +@@ -1526,7 +2194,7 @@ compiler." + + if test -z "$output_obj"; then + # Place PIC objects in $objdir +- command="$command -o $lobj" ++ func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ +@@ -1573,11 +2241,11 @@ compiler." + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then +- command="$command -o $obj" ++ func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. +- command="$command$suppress_output" ++ func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + +@@ -1622,13 +2290,13 @@ compiler." + } + + $opt_help || { +- test "$mode" = compile && func_mode_compile ${1+"$@"} ++ test "$opt_mode" = compile && func_mode_compile ${1+"$@"} + } + + func_mode_help () + { + # We need to display help for each of the modes. +- case $mode in ++ case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. +@@ -1659,8 +2327,8 @@ This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes +- -prefer-pic try to building PIC objects only +- -prefer-non-pic try to building non-PIC objects only ++ -prefer-pic try to build PIC objects only ++ -prefer-non-pic try to build non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler +@@ -1804,7 +2472,7 @@ Otherwise, only FILE itself is deleted using RM." + ;; + + *) +- func_fatal_help "invalid operation mode \`$mode'" ++ func_fatal_help "invalid operation mode \`$opt_mode'" + ;; + esac + +@@ -1819,13 +2487,13 @@ if $opt_help; then + else + { + func_help noexit +- for mode in compile link execute install finish uninstall clean; do ++ for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | sed -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit +- for mode in compile link execute install finish uninstall clean; do ++ for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done +@@ -1854,13 +2522,16 @@ func_mode_execute () + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. +- for file in $execute_dlfiles; do ++ for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) ++ func_resolve_sysroot "$file" ++ file=$func_resolve_sysroot_result ++ + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" +@@ -1882,7 +2553,7 @@ func_mode_execute () + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then +- dir="$dir/$objdir" ++ func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" +@@ -1907,10 +2578,10 @@ func_mode_execute () + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. +- if eval test -z \"\$$shlibpath_var\"; then +- eval $shlibpath_var=\$dir ++ if eval "test -z \"\$$shlibpath_var\""; then ++ eval "$shlibpath_var=\"\$dir\"" + else +- eval $shlibpath_var=\$dir:\$$shlibpath_var ++ eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + +@@ -1939,8 +2610,7 @@ func_mode_execute () + ;; + esac + # Quote arguments (to preserve shell metacharacters). +- func_quote_for_eval "$file" +- args="$args $func_quote_for_eval_result" ++ func_append_quoted args "$file" + done + + if test "X$opt_dry_run" = Xfalse; then +@@ -1972,22 +2642,59 @@ func_mode_execute () + fi + } + +-test "$mode" = execute && func_mode_execute ${1+"$@"} ++test "$opt_mode" = execute && func_mode_execute ${1+"$@"} + + + # func_mode_finish arg... + func_mode_finish () + { + $opt_debug +- libdirs="$nonopt" ++ libs= ++ libdirs= + admincmds= + +- if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then +- for dir +- do +- libdirs="$libdirs $dir" +- done ++ for opt in "$nonopt" ${1+"$@"} ++ do ++ if test -d "$opt"; then ++ func_append libdirs " $opt" + ++ elif test -f "$opt"; then ++ if func_lalib_unsafe_p "$opt"; then ++ func_append libs " $opt" ++ else ++ func_warning "\`$opt' is not a valid libtool archive" ++ fi ++ ++ else ++ func_fatal_error "invalid argument \`$opt'" ++ fi ++ done ++ ++ if test -n "$libs"; then ++ if test -n "$lt_sysroot"; then ++ sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` ++ sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" ++ else ++ sysroot_cmd= ++ fi ++ ++ # Remove sysroot references ++ if $opt_dry_run; then ++ for lib in $libs; do ++ echo "removing references to $lt_sysroot and \`=' prefixes from $lib" ++ done ++ else ++ tmpdir=`func_mktempdir` ++ for lib in $libs; do ++ sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ ++ > $tmpdir/tmp-la ++ mv -f $tmpdir/tmp-la $lib ++ done ++ ${RM}r "$tmpdir" ++ fi ++ fi ++ ++ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. +@@ -1997,7 +2704,7 @@ func_mode_finish () + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" +- $opt_dry_run || eval "$cmds" || admincmds="$admincmds ++ $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done +@@ -2006,53 +2713,55 @@ func_mode_finish () + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + +- echo "----------------------------------------------------------------------" +- echo "Libraries have been installed in:" +- for libdir in $libdirs; do +- $ECHO " $libdir" +- done +- echo +- echo "If you ever happen to want to link against installed libraries" +- echo "in a given directory, LIBDIR, you must either use libtool, and" +- echo "specify the full pathname of the library, or use the \`-LLIBDIR'" +- echo "flag during linking and do at least one of the following:" +- if test -n "$shlibpath_var"; then +- echo " - add LIBDIR to the \`$shlibpath_var' environment variable" +- echo " during execution" +- fi +- if test -n "$runpath_var"; then +- echo " - add LIBDIR to the \`$runpath_var' environment variable" +- echo " during linking" +- fi +- if test -n "$hardcode_libdir_flag_spec"; then +- libdir=LIBDIR +- eval "flag=\"$hardcode_libdir_flag_spec\"" ++ if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then ++ echo "----------------------------------------------------------------------" ++ echo "Libraries have been installed in:" ++ for libdir in $libdirs; do ++ $ECHO " $libdir" ++ done ++ echo ++ echo "If you ever happen to want to link against installed libraries" ++ echo "in a given directory, LIBDIR, you must either use libtool, and" ++ echo "specify the full pathname of the library, or use the \`-LLIBDIR'" ++ echo "flag during linking and do at least one of the following:" ++ if test -n "$shlibpath_var"; then ++ echo " - add LIBDIR to the \`$shlibpath_var' environment variable" ++ echo " during execution" ++ fi ++ if test -n "$runpath_var"; then ++ echo " - add LIBDIR to the \`$runpath_var' environment variable" ++ echo " during linking" ++ fi ++ if test -n "$hardcode_libdir_flag_spec"; then ++ libdir=LIBDIR ++ eval flag=\"$hardcode_libdir_flag_spec\" + +- $ECHO " - use the \`$flag' linker flag" +- fi +- if test -n "$admincmds"; then +- $ECHO " - have your system administrator run these commands:$admincmds" +- fi +- if test -f /etc/ld.so.conf; then +- echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" +- fi +- echo ++ $ECHO " - use the \`$flag' linker flag" ++ fi ++ if test -n "$admincmds"; then ++ $ECHO " - have your system administrator run these commands:$admincmds" ++ fi ++ if test -f /etc/ld.so.conf; then ++ echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" ++ fi ++ echo + +- echo "See any operating system documentation about shared libraries for" +- case $host in +- solaris2.[6789]|solaris2.1[0-9]) +- echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" +- echo "pages." +- ;; +- *) +- echo "more information, such as the ld(1) and ld.so(8) manual pages." +- ;; +- esac +- echo "----------------------------------------------------------------------" ++ echo "See any operating system documentation about shared libraries for" ++ case $host in ++ solaris2.[6789]|solaris2.1[0-9]) ++ echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" ++ echo "pages." ++ ;; ++ *) ++ echo "more information, such as the ld(1) and ld.so(8) manual pages." ++ ;; ++ esac ++ echo "----------------------------------------------------------------------" ++ fi + exit $EXIT_SUCCESS + } + +-test "$mode" = finish && func_mode_finish ${1+"$@"} ++test "$opt_mode" = finish && func_mode_finish ${1+"$@"} + + + # func_mode_install arg... +@@ -2077,7 +2786,7 @@ func_mode_install () + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" +- install_prog="$install_prog$func_quote_for_eval_result" ++ func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; +@@ -2097,7 +2806,7 @@ func_mode_install () + do + arg2= + if test -n "$dest"; then +- files="$files $dest" ++ func_append files " $dest" + dest=$arg + continue + fi +@@ -2135,11 +2844,11 @@ func_mode_install () + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" +- install_prog="$install_prog $func_quote_for_eval_result" ++ func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi +- install_shared_prog="$install_shared_prog $func_quote_for_eval_result" ++ func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ +@@ -2151,7 +2860,7 @@ func_mode_install () + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" +- install_shared_prog="$install_shared_prog -m $func_quote_for_eval_result" ++ func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + +@@ -2209,10 +2918,13 @@ func_mode_install () + case $file in + *.$libext) + # Do the static libraries later. +- staticlibs="$staticlibs $file" ++ func_append staticlibs " $file" + ;; + + *.la) ++ func_resolve_sysroot "$file" ++ file=$func_resolve_sysroot_result ++ + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" +@@ -2226,23 +2938,30 @@ func_mode_install () + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; +- *) current_libdirs="$current_libdirs $libdir" ;; ++ *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; +- *) future_libdirs="$future_libdirs $libdir" ;; ++ *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" +- dir="$dir$objdir" ++ func_append dir "$objdir" + + if test -n "$relink_command"; then ++ # Strip any trailing slash from the destination. ++ func_stripname '' '/' "$libdir" ++ destlibdir=$func_stripname_result ++ ++ func_stripname '' '/' "$destdir" ++ s_destdir=$func_stripname_result ++ + # Determine the prefix the user has applied to our future dir. +- inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` ++ inst_prefix_dir=`$ECHO "X$s_destdir" | $Xsed -e "s%$destlibdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that +@@ -2315,7 +3034,7 @@ func_mode_install () + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. +- test -n "$old_library" && staticlibs="$staticlibs $dir/$old_library" ++ test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) +@@ -2503,7 +3222,7 @@ func_mode_install () + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + +- if test -n "$current_libdirs" && $opt_finish; then ++ if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' +@@ -2512,7 +3231,7 @@ func_mode_install () + fi + } + +-test "$mode" = install && func_mode_install ${1+"$@"} ++test "$opt_mode" = install && func_mode_install ${1+"$@"} + + + # func_generate_dlsyms outputname originator pic_p +@@ -2559,6 +3278,18 @@ extern \"C\" { + #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" + #endif + ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + /* External symbol declarations for the compiler. */\ + " + +@@ -2570,21 +3301,22 @@ extern \"C\" { + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do +- func_verbose "extracting global C symbols from \`$progfile'" +- $opt_dry_run || eval "$NM $progfile | $global_symbol_pipe >> '$nlist'" ++ func_to_tool_file "$progfile" func_convert_file_msys_to_w32 ++ func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" ++ $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { +- $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T +- $MV "$nlist"T "$nlist" ++ eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' ++ eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { +- $EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T +- $MV "$nlist"T "$nlist" ++ eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' ++ eval '$MV "$nlist"T "$nlist"' + } + fi + +@@ -2593,23 +3325,23 @@ extern \"C\" { + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols +- ${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' < "$nlist" > "$export_symbols" ++ eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) +- echo EXPORTS > "$output_objdir/$outputname.def" +- cat "$export_symbols" >> "$output_objdir/$outputname.def" ++ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' ++ eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { +- ${SED} -e 's/\([].[*^$]\)/\\\1/g' -e 's/^/ /' -e 's/$/$/' < "$export_symbols" > "$output_objdir/$outputname.exp" +- $GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T +- $MV "$nlist"T "$nlist" ++ eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' ++ eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' ++ eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) +- echo EXPORTS > "$output_objdir/$outputname.def" +- cat "$nlist" >> "$output_objdir/$outputname.def" ++ eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' ++ eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } +@@ -2620,10 +3352,52 @@ extern \"C\" { + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" +- $opt_dry_run || { +- $ECHO ": $name " >> "$nlist" +- eval "$NM $dlprefile 2>/dev/null | $global_symbol_pipe >> '$nlist'" +- } ++ case $host in ++ *cygwin* | *mingw* | *cegcc* ) ++ # if an import library, we need to obtain dlname ++ if func_win32_import_lib_p "$dlprefile"; then ++ func_tr_sh "$dlprefile" ++ eval "curr_lafile=\$libfile_$func_tr_sh_result" ++ dlprefile_dlbasename="" ++ if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then ++ # Use subshell, to avoid clobbering current variable values ++ dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` ++ if test -n "$dlprefile_dlname" ; then ++ func_basename "$dlprefile_dlname" ++ dlprefile_dlbasename="$func_basename_result" ++ else ++ # no lafile. user explicitly requested -dlpreopen . ++ $sharedlib_from_linklib_cmd "$dlprefile" ++ dlprefile_dlbasename=$sharedlib_from_linklib_result ++ fi ++ fi ++ $opt_dry_run || { ++ if test -n "$dlprefile_dlbasename" ; then ++ eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' ++ else ++ func_warning "Could not compute DLL name from $name" ++ eval '$ECHO ": $name " >> "$nlist"' ++ fi ++ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 ++ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | ++ $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" ++ } ++ else # not an import lib ++ $opt_dry_run || { ++ eval '$ECHO ": $name " >> "$nlist"' ++ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 ++ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" ++ } ++ fi ++ ;; ++ *) ++ $opt_dry_run || { ++ eval '$ECHO ": $name " >> "$nlist"' ++ func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 ++ eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" ++ } ++ ;; ++ esac + done + + $opt_dry_run || { +@@ -2661,26 +3435,9 @@ typedef struct { + const char *name; + void *address; + } lt_dlsymlist; +-" +- case $host in +- *cygwin* | *mingw* | *cegcc* ) +- echo >> "$output_objdir/$my_dlsyms" "\ +-/* DATA imports from DLLs on WIN32 con't be const, because +- runtime relocations are performed -- see ld's documentation +- on pseudo-relocs. */" +- lt_dlsym_const= ;; +- *osf5*) +- echo >> "$output_objdir/$my_dlsyms" "\ +-/* This system does not cope well with relocations in const data */" +- lt_dlsym_const= ;; +- *) +- lt_dlsym_const=const ;; +- esac +- +- echo >> "$output_objdir/$my_dlsyms" "\ +-extern $lt_dlsym_const lt_dlsymlist ++extern LT_DLSYM_CONST lt_dlsymlist + lt_${my_prefix}_LTX_preloaded_symbols[]; +-$lt_dlsym_const lt_dlsymlist ++LT_DLSYM_CONST lt_dlsymlist + lt_${my_prefix}_LTX_preloaded_symbols[] = + {\ + { \"$my_originator\", (void *) 0 }," +@@ -2736,7 +3493,7 @@ static const void *lt_preloaded_setup() { + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; +- *) symtab_cflags="$symtab_cflags $arg" ;; ++ *) func_append symtab_cflags " $arg" ;; + esac + done + +@@ -2796,9 +3553,11 @@ func_win32_libid () + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static +- if $OBJDUMP -f "$1" | $SED -e '10q' 2>/dev/null | +- $EGREP 'file format (pe-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then +- win32_nmres=`$NM -f posix -A "$1" | ++ # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. ++ if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | ++ $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then ++ func_to_tool_file "$1" func_convert_file_msys_to_w32 ++ win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ +@@ -2827,6 +3586,131 @@ func_win32_libid () + $ECHO "$win32_libid_type" + } + ++# func_cygming_dll_for_implib ARG ++# ++# Platform-specific function to extract the ++# name of the DLL associated with the specified ++# import library ARG. ++# Invoked by eval'ing the libtool variable ++# $sharedlib_from_linklib_cmd ++# Result is available in the variable ++# $sharedlib_from_linklib_result ++func_cygming_dll_for_implib () ++{ ++ $opt_debug ++ sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` ++} ++ ++# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs ++# ++# The is the core of a fallback implementation of a ++# platform-specific function to extract the name of the ++# DLL associated with the specified import library LIBNAME. ++# ++# SECTION_NAME is either .idata$6 or .idata$7, depending ++# on the platform and compiler that created the implib. ++# ++# Echos the name of the DLL associated with the ++# specified import library. ++func_cygming_dll_for_implib_fallback_core () ++{ ++ $opt_debug ++ match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` ++ $OBJDUMP -s --section "$1" "$2" 2>/dev/null | ++ $SED '/^Contents of section '"$match_literal"':/{ ++ # Place marker at beginning of archive member dllname section ++ s/.*/====MARK====/ ++ p ++ d ++ } ++ # These lines can sometimes be longer than 43 characters, but ++ # are always uninteresting ++ /:[ ]*file format pe[i]\{,1\}-/d ++ /^In archive [^:]*:/d ++ # Ensure marker is printed ++ /^====MARK====/p ++ # Remove all lines with less than 43 characters ++ /^.\{43\}/!d ++ # From remaining lines, remove first 43 characters ++ s/^.\{43\}//' | ++ $SED -n ' ++ # Join marker and all lines until next marker into a single line ++ /^====MARK====/ b para ++ H ++ $ b para ++ b ++ :para ++ x ++ s/\n//g ++ # Remove the marker ++ s/^====MARK====// ++ # Remove trailing dots and whitespace ++ s/[\. \t]*$// ++ # Print ++ /./p' | ++ # we now have a list, one entry per line, of the stringified ++ # contents of the appropriate section of all members of the ++ # archive which possess that section. Heuristic: eliminate ++ # all those which have a first or second character that is ++ # a '.' (that is, objdump's representation of an unprintable ++ # character.) This should work for all archives with less than ++ # 0x302f exports -- but will fail for DLLs whose name actually ++ # begins with a literal '.' or a single character followed by ++ # a '.'. ++ # ++ # Of those that remain, print the first one. ++ $SED -e '/^\./d;/^.\./d;q' ++} ++ ++# func_cygming_gnu_implib_p ARG ++# This predicate returns with zero status (TRUE) if ++# ARG is a GNU/binutils-style import library. Returns ++# with nonzero status (FALSE) otherwise. ++func_cygming_gnu_implib_p () ++{ ++ $opt_debug ++ func_to_tool_file "$1" func_convert_file_msys_to_w32 ++ func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` ++ test -n "$func_cygming_gnu_implib_tmp" ++} ++ ++# func_cygming_ms_implib_p ARG ++# This predicate returns with zero status (TRUE) if ++# ARG is an MS-style import library. Returns ++# with nonzero status (FALSE) otherwise. ++func_cygming_ms_implib_p () ++{ ++ $opt_debug ++ func_to_tool_file "$1" func_convert_file_msys_to_w32 ++ func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` ++ test -n "$func_cygming_ms_implib_tmp" ++} ++ ++# func_cygming_dll_for_implib_fallback ARG ++# Platform-specific function to extract the ++# name of the DLL associated with the specified ++# import library ARG. ++# ++# This fallback implementation is for use when $DLLTOOL ++# does not support the --identify-strict option. ++# Invoked by eval'ing the libtool variable ++# $sharedlib_from_linklib_cmd ++# Result is available in the variable ++# $sharedlib_from_linklib_result ++func_cygming_dll_for_implib_fallback () ++{ ++ $opt_debug ++ if func_cygming_gnu_implib_p "$1" ; then ++ # binutils import library ++ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` ++ elif func_cygming_ms_implib_p "$1" ; then ++ # ms-generated import library ++ sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` ++ else ++ # unknown ++ sharedlib_from_linklib_result="" ++ fi ++} + + + # func_extract_an_archive dir oldlib +@@ -2917,7 +3801,7 @@ func_extract_archives () + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do +- darwin_files=`find unfat-$$ -name $darwin_file -print | $NL2SP` ++ darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ +@@ -2932,7 +3816,7 @@ func_extract_archives () + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac +- my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | $NL2SP` ++ my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +@@ -3014,7 +3898,110 @@ func_fallback_echo () + _LTECHO_EOF' + } + ECHO=\"$qECHO\" +- fi\ ++ fi ++ ++# Very basic option parsing. These options are (a) specific to ++# the libtool wrapper, (b) are identical between the wrapper ++# /script/ and the wrapper /executable/ which is used only on ++# windows platforms, and (c) all begin with the string "--lt-" ++# (application programs are unlikely to have options which match ++# this pattern). ++# ++# There are only two supported options: --lt-debug and ++# --lt-dump-script. There is, deliberately, no --lt-help. ++# ++# The first argument to this parsing function should be the ++# script's $0 value, followed by "$@". ++lt_option_debug= ++func_parse_lt_options () ++{ ++ lt_script_arg0=\$0 ++ shift ++ for lt_opt ++ do ++ case \"\$lt_opt\" in ++ --lt-debug) lt_option_debug=1 ;; ++ --lt-dump-script) ++ lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` ++ test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. ++ lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` ++ cat \"\$lt_dump_D/\$lt_dump_F\" ++ exit 0 ++ ;; ++ --lt-*) ++ \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 ++ exit 1 ++ ;; ++ esac ++ done ++ ++ # Print the debug banner immediately: ++ if test -n \"\$lt_option_debug\"; then ++ echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 ++ fi ++} ++ ++# Used when --lt-debug. Prints its arguments to stdout ++# (redirection is the responsibility of the caller) ++func_lt_dump_args () ++{ ++ lt_dump_args_N=1; ++ for lt_arg ++ do ++ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" ++ lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` ++ done ++} ++ ++# Core function for launching the target application ++func_exec_program_core () ++{ ++" ++ case $host in ++ # Backslashes separate directories on plain windows ++ *-*-mingw | *-*-os2* | *-cegcc*) ++ $ECHO "\ ++ if test -n \"\$lt_option_debug\"; then ++ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 ++ func_lt_dump_args \${1+\"\$@\"} 1>&2 ++ fi ++ exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} ++" ++ ;; ++ ++ *) ++ $ECHO "\ ++ if test -n \"\$lt_option_debug\"; then ++ \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 ++ func_lt_dump_args \${1+\"\$@\"} 1>&2 ++ fi ++ exec \"\$progdir/\$program\" \${1+\"\$@\"} ++" ++ ;; ++ esac ++ $ECHO "\ ++ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 ++ exit 1 ++} ++ ++# A function to encapsulate launching the target application ++# Strips options in the --lt-* namespace from \$@ and ++# launches target application with the remaining arguments. ++func_exec_program () ++{ ++ for lt_wr_arg ++ do ++ case \$lt_wr_arg in ++ --lt-*) ;; ++ *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; ++ esac ++ shift ++ done ++ func_exec_program_core \${1+\"\$@\"} ++} ++ ++ # Parse options ++ func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` +@@ -3078,7 +4065,7 @@ _LTECHO_EOF' + + # relink executable if necessary + if test -n \"\$relink_command\"; then +- if relink_command_output=\`eval \"\$relink_command\" 2>&1\`; then : ++ if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" +@@ -3102,6 +4089,18 @@ _LTECHO_EOF' + + if test -f \"\$progdir/\$program\"; then" + ++ # fixup the dll searchpath if we need to. ++ # ++ # Fix the DLL searchpath if we need to. Do this before prepending ++ # to shlibpath, because on Windows, both are PATH and uninstalled ++ # libraries must come first. ++ if test -n "$dllsearchpath"; then ++ $ECHO "\ ++ # Add the dll search path components to the executable PATH ++ PATH=$dllsearchpath:\$PATH ++" ++ fi ++ + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ +@@ -3116,35 +4115,10 @@ _LTECHO_EOF' + " + fi + +- # fixup the dll searchpath if we need to. +- if test -n "$dllsearchpath"; then +- $ECHO "\ +- # Add the dll search path components to the executable PATH +- PATH=$dllsearchpath:\$PATH +-" +- fi +- + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. +-" +- case $host in +- # Backslashes separate directories on plain windows +- *-*-mingw | *-*-os2* | *-cegcc*) +- $ECHO "\ +- exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +-" +- ;; +- +- *) +- $ECHO "\ +- exec \"\$progdir/\$program\" \${1+\"\$@\"} +-" +- ;; +- esac +- $ECHO "\ +- \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 +- exit 1 ++ func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. +@@ -3158,166 +4132,6 @@ fi\ + } + + +-# func_to_host_path arg +-# +-# Convert paths to host format when used with build tools. +-# Intended for use with "native" mingw (where libtool itself +-# is running under the msys shell), or in the following cross- +-# build environments: +-# $build $host +-# mingw (msys) mingw [e.g. native] +-# cygwin mingw +-# *nix + wine mingw +-# where wine is equipped with the `winepath' executable. +-# In the native mingw case, the (msys) shell automatically +-# converts paths for any non-msys applications it launches, +-# but that facility isn't available from inside the cwrapper. +-# Similar accommodations are necessary for $host mingw and +-# $build cygwin. Calling this function does no harm for other +-# $host/$build combinations not listed above. +-# +-# ARG is the path (on $build) that should be converted to +-# the proper representation for $host. The result is stored +-# in $func_to_host_path_result. +-func_to_host_path () +-{ +- func_to_host_path_result="$1" +- if test -n "$1"; then +- case $host in +- *mingw* ) +- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' +- case $build in +- *mingw* ) # actually, msys +- # awkward: cmd appends spaces to result +- func_to_host_path_result=`( cmd //c echo "$1" ) 2>/dev/null | +- $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +- ;; +- *cygwin* ) +- func_to_host_path_result=`cygpath -w "$1" | +- $SED -e "$lt_sed_naive_backslashify"` +- ;; +- * ) +- # Unfortunately, winepath does not exit with a non-zero +- # error code, so we are forced to check the contents of +- # stdout. On the other hand, if the command is not +- # found, the shell will set an exit code of 127 and print +- # *an error message* to stdout. So we must check for both +- # error code of zero AND non-empty stdout, which explains +- # the odd construction: +- func_to_host_path_tmp1=`winepath -w "$1" 2>/dev/null` +- if test "$?" -eq 0 && test -n "${func_to_host_path_tmp1}"; then +- func_to_host_path_result=`$ECHO "$func_to_host_path_tmp1" | +- $SED -e "$lt_sed_naive_backslashify"` +- else +- # Allow warning below. +- func_to_host_path_result= +- fi +- ;; +- esac +- if test -z "$func_to_host_path_result" ; then +- func_error "Could not determine host path corresponding to" +- func_error " \`$1'" +- func_error "Continuing, but uninstalled executables may not work." +- # Fallback: +- func_to_host_path_result="$1" +- fi +- ;; +- esac +- fi +-} +-# end: func_to_host_path +- +-# func_to_host_pathlist arg +-# +-# Convert pathlists to host format when used with build tools. +-# See func_to_host_path(), above. This function supports the +-# following $build/$host combinations (but does no harm for +-# combinations not listed here): +-# $build $host +-# mingw (msys) mingw [e.g. native] +-# cygwin mingw +-# *nix + wine mingw +-# +-# Path separators are also converted from $build format to +-# $host format. If ARG begins or ends with a path separator +-# character, it is preserved (but converted to $host format) +-# on output. +-# +-# ARG is a pathlist (on $build) that should be converted to +-# the proper representation on $host. The result is stored +-# in $func_to_host_pathlist_result. +-func_to_host_pathlist () +-{ +- func_to_host_pathlist_result="$1" +- if test -n "$1"; then +- case $host in +- *mingw* ) +- lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' +- # Remove leading and trailing path separator characters from +- # ARG. msys behavior is inconsistent here, cygpath turns them +- # into '.;' and ';.', and winepath ignores them completely. +- func_stripname : : "$1" +- func_to_host_pathlist_tmp1=$func_stripname_result +- case $build in +- *mingw* ) # Actually, msys. +- # Awkward: cmd appends spaces to result. +- func_to_host_pathlist_result=` +- ( cmd //c echo "$func_to_host_pathlist_tmp1" ) 2>/dev/null | +- $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +- ;; +- *cygwin* ) +- func_to_host_pathlist_result=`cygpath -w -p "$func_to_host_pathlist_tmp1" | +- $SED -e "$lt_sed_naive_backslashify"` +- ;; +- * ) +- # unfortunately, winepath doesn't convert pathlists +- func_to_host_pathlist_result="" +- func_to_host_pathlist_oldIFS=$IFS +- IFS=: +- for func_to_host_pathlist_f in $func_to_host_pathlist_tmp1 ; do +- IFS=$func_to_host_pathlist_oldIFS +- if test -n "$func_to_host_pathlist_f" ; then +- func_to_host_path "$func_to_host_pathlist_f" +- if test -n "$func_to_host_path_result" ; then +- if test -z "$func_to_host_pathlist_result" ; then +- func_to_host_pathlist_result="$func_to_host_path_result" +- else +- func_append func_to_host_pathlist_result ";$func_to_host_path_result" +- fi +- fi +- fi +- done +- IFS=$func_to_host_pathlist_oldIFS +- ;; +- esac +- if test -z "$func_to_host_pathlist_result"; then +- func_error "Could not determine the host path(s) corresponding to" +- func_error " \`$1'" +- func_error "Continuing, but uninstalled executables may not work." +- # Fallback. This may break if $1 contains DOS-style drive +- # specifications. The fix is not to complicate the expression +- # below, but for the user to provide a working wine installation +- # with winepath so that path translation in the cross-to-mingw +- # case works properly. +- lt_replace_pathsep_nix_to_dos="s|:|;|g" +- func_to_host_pathlist_result=`echo "$func_to_host_pathlist_tmp1" |\ +- $SED -e "$lt_replace_pathsep_nix_to_dos"` +- fi +- # Now, add the leading and trailing path separators back +- case "$1" in +- :* ) func_to_host_pathlist_result=";$func_to_host_pathlist_result" +- ;; +- esac +- case "$1" in +- *: ) func_append func_to_host_pathlist_result ";" +- ;; +- esac +- ;; +- esac +- fi +-} +-# end: func_to_host_pathlist +- + # func_emit_cwrapperexe_src + # emit the source code for a wrapper executable on stdout + # Must ONLY be called from within func_mode_link because +@@ -3334,10 +4148,6 @@ func_emit_cwrapperexe_src () + + This wrapper executable should never be moved out of the build directory. + If it is, it will not operate correctly. +- +- Currently, it simply execs the wrapper *script* "$SHELL $output", +- but could eventually absorb all of the scripts functionality and +- exec $objdir/$outputname directly. + */ + EOF + cat <<"EOF" +@@ -3462,22 +4272,13 @@ int setenv (const char *, const char *, int); + if (stale) { free ((void *) stale); stale = 0; } \ + } while (0) + +-#undef LTWRAPPER_DEBUGPRINTF +-#if defined LT_DEBUGWRAPPER +-# define LTWRAPPER_DEBUGPRINTF(args) ltwrapper_debugprintf args +-static void +-ltwrapper_debugprintf (const char *fmt, ...) +-{ +- va_list args; +- va_start (args, fmt); +- (void) vfprintf (stderr, fmt, args); +- va_end (args); +-} ++#if defined(LT_DEBUGWRAPPER) ++static int lt_debug = 1; + #else +-# define LTWRAPPER_DEBUGPRINTF(args) ++static int lt_debug = 0; + #endif + +-const char *program_name = NULL; ++const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + + void *xmalloc (size_t num); + char *xstrdup (const char *string); +@@ -3487,7 +4288,10 @@ char *chase_symlinks (const char *pathspec); + int make_executable (const char *path); + int check_executable (const char *path); + char *strendzap (char *str, const char *pat); +-void lt_fatal (const char *message, ...); ++void lt_debugprintf (const char *file, int line, const char *fmt, ...); ++void lt_fatal (const char *file, int line, const char *message, ...); ++static const char *nonnull (const char *s); ++static const char *nonempty (const char *s); + void lt_setenv (const char *name, const char *value); + char *lt_extend_str (const char *orig_value, const char *add, int to_end); + void lt_update_exe_path (const char *name, const char *value); +@@ -3497,14 +4301,14 @@ void lt_dump_script (FILE *f); + EOF + + cat <"))); ++ lt_debugprintf (__FILE__, __LINE__, "(main) lt_argv_zero: %s\n", ++ nonnull (lt_argv_zero)); + for (i = 0; i < newargc; i++) + { +- LTWRAPPER_DEBUGPRINTF (("(main) newargz[%d] : %s\n", i, (newargz[i] ? newargz[i] : ""))); ++ lt_debugprintf (__FILE__, __LINE__, "(main) newargz[%d]: %s\n", ++ i, nonnull (newargz[i])); + } + + EOF +@@ -3706,7 +4529,9 @@ EOF + if (rval == -1) + { + /* failed to start process */ +- LTWRAPPER_DEBUGPRINTF (("(main) failed to launch target \"%s\": errno = %d\n", lt_argv_zero, errno)); ++ lt_debugprintf (__FILE__, __LINE__, ++ "(main) failed to launch target \"%s\": %s\n", ++ lt_argv_zero, nonnull (strerror (errno))); + return 127; + } + return rval; +@@ -3728,7 +4553,7 @@ xmalloc (size_t num) + { + void *p = (void *) malloc (num); + if (!p) +- lt_fatal ("Memory exhausted"); ++ lt_fatal (__FILE__, __LINE__, "memory exhausted"); + + return p; + } +@@ -3762,8 +4587,8 @@ check_executable (const char *path) + { + struct stat st; + +- LTWRAPPER_DEBUGPRINTF (("(check_executable) : %s\n", +- path ? (*path ? path : "EMPTY!") : "NULL!")); ++ lt_debugprintf (__FILE__, __LINE__, "(check_executable): %s\n", ++ nonempty (path)); + if ((!path) || (!*path)) + return 0; + +@@ -3780,8 +4605,8 @@ make_executable (const char *path) + int rval = 0; + struct stat st; + +- LTWRAPPER_DEBUGPRINTF (("(make_executable) : %s\n", +- path ? (*path ? path : "EMPTY!") : "NULL!")); ++ lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", ++ nonempty (path)); + if ((!path) || (!*path)) + return 0; + +@@ -3807,8 +4632,8 @@ find_executable (const char *wrapper) + int tmp_len; + char *concat_name; + +- LTWRAPPER_DEBUGPRINTF (("(find_executable) : %s\n", +- wrapper ? (*wrapper ? wrapper : "EMPTY!") : "NULL!")); ++ lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", ++ nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; +@@ -3861,7 +4686,8 @@ find_executable (const char *wrapper) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) +- lt_fatal ("getcwd failed"); ++ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", ++ nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); +@@ -3886,7 +4712,8 @@ find_executable (const char *wrapper) + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) +- lt_fatal ("getcwd failed"); ++ lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", ++ nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); +@@ -3912,8 +4739,9 @@ chase_symlinks (const char *pathspec) + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { +- LTWRAPPER_DEBUGPRINTF (("checking path component for symlinks: %s\n", +- tmp_pathspec)); ++ lt_debugprintf (__FILE__, __LINE__, ++ "checking path component for symlinks: %s\n", ++ tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) +@@ -3935,8 +4763,9 @@ chase_symlinks (const char *pathspec) + } + else + { +- char *errstr = strerror (errno); +- lt_fatal ("Error accessing file %s (%s)", tmp_pathspec, errstr); ++ lt_fatal (__FILE__, __LINE__, ++ "error accessing file \"%s\": %s", ++ tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); +@@ -3949,7 +4778,8 @@ chase_symlinks (const char *pathspec) + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { +- lt_fatal ("Could not follow symlinks for %s", pathspec); ++ lt_fatal (__FILE__, __LINE__, ++ "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); + #endif +@@ -3975,11 +4805,25 @@ strendzap (char *str, const char *pat) + return str; + } + ++void ++lt_debugprintf (const char *file, int line, const char *fmt, ...) ++{ ++ va_list args; ++ if (lt_debug) ++ { ++ (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); ++ va_start (args, fmt); ++ (void) vfprintf (stderr, fmt, args); ++ va_end (args); ++ } ++} ++ + static void +-lt_error_core (int exit_status, const char *mode, ++lt_error_core (int exit_status, const char *file, ++ int line, const char *mode, + const char *message, va_list ap) + { +- fprintf (stderr, "%s: %s: ", program_name, mode); ++ fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + +@@ -3988,20 +4832,32 @@ lt_error_core (int exit_status, const char *mode, + } + + void +-lt_fatal (const char *message, ...) ++lt_fatal (const char *file, int line, const char *message, ...) + { + va_list ap; + va_start (ap, message); +- lt_error_core (EXIT_FAILURE, "FATAL", message, ap); ++ lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); + } + ++static const char * ++nonnull (const char *s) ++{ ++ return s ? s : "(null)"; ++} ++ ++static const char * ++nonempty (const char *s) ++{ ++ return (s && !*s) ? "(empty)" : nonnull (s); ++} ++ + void + lt_setenv (const char *name, const char *value) + { +- LTWRAPPER_DEBUGPRINTF (("(lt_setenv) setting '%s' to '%s'\n", +- (name ? name : ""), +- (value ? value : ""))); ++ lt_debugprintf (__FILE__, __LINE__, ++ "(lt_setenv) setting '%s' to '%s'\n", ++ nonnull (name), nonnull (value)); + { + #ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ +@@ -4049,9 +4905,9 @@ lt_extend_str (const char *orig_value, const char *add, int to_end) + void + lt_update_exe_path (const char *name, const char *value) + { +- LTWRAPPER_DEBUGPRINTF (("(lt_update_exe_path) modifying '%s' by prepending '%s'\n", +- (name ? name : ""), +- (value ? value : ""))); ++ lt_debugprintf (__FILE__, __LINE__, ++ "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", ++ nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { +@@ -4070,9 +4926,9 @@ lt_update_exe_path (const char *name, const char *value) + void + lt_update_lib_path (const char *name, const char *value) + { +- LTWRAPPER_DEBUGPRINTF (("(lt_update_lib_path) modifying '%s' by prepending '%s'\n", +- (name ? name : ""), +- (value ? value : ""))); ++ lt_debugprintf (__FILE__, __LINE__, ++ "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", ++ nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { +@@ -4222,7 +5078,7 @@ EOF + func_win32_import_lib_p () + { + $opt_debug +- case `eval "$file_magic_cmd \"\$1\" 2>/dev/null" | $SED -e 10q` in ++ case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +@@ -4401,9 +5257,9 @@ func_mode_link () + ;; + *) + if test "$prev" = dlfiles; then +- dlfiles="$dlfiles $arg" ++ func_append dlfiles " $arg" + else +- dlprefiles="$dlprefiles $arg" ++ func_append dlprefiles " $arg" + fi + prev= + continue +@@ -4427,7 +5283,7 @@ func_mode_link () + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; +- *) deplibs="$deplibs $qarg.ltframework" # this is fixed later ++ *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; +@@ -4446,7 +5302,7 @@ func_mode_link () + moreargs= + for fil in `cat "$save_arg"` + do +-# moreargs="$moreargs $fil" ++# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + +@@ -4475,7 +5331,7 @@ func_mode_link () + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then +- dlfiles="$dlfiles $pic_object" ++ func_append dlfiles " $pic_object" + prev= + continue + else +@@ -4487,7 +5343,7 @@ func_mode_link () + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. +- dlprefiles="$dlprefiles $pic_object" ++ func_append dlprefiles " $pic_object" + prev= + fi + +@@ -4557,12 +5413,12 @@ func_mode_link () + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; +- *) rpath="$rpath $arg" ;; ++ *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; +- *) xrpath="$xrpath $arg" ;; ++ *) func_append xrpath " $arg" ;; + esac + fi + prev= +@@ -4574,28 +5430,28 @@ func_mode_link () + continue + ;; + weak) +- weak_libs="$weak_libs $arg" ++ func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) +- linker_flags="$linker_flags $qarg" +- compiler_flags="$compiler_flags $qarg" ++ func_append linker_flags " $qarg" ++ func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) +- compiler_flags="$compiler_flags $qarg" ++ func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) +- linker_flags="$linker_flags $qarg" +- compiler_flags="$compiler_flags $wl$qarg" ++ func_append linker_flags " $qarg" ++ func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" +@@ -4686,15 +5542,16 @@ func_mode_link () + ;; + + -L*) +- func_stripname '-L' '' "$arg" +- dir=$func_stripname_result +- if test -z "$dir"; then ++ func_stripname "-L" '' "$arg" ++ if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi ++ func_resolve_sysroot "$func_stripname_result" ++ dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; +@@ -4706,10 +5563,16 @@ func_mode_link () + ;; + esac + case "$deplibs " in +- *" -L$dir "*) ;; ++ *" -L$dir "* | *" $arg "*) ++ # Will only happen for absolute or sysroot arguments ++ ;; + *) +- deplibs="$deplibs -L$dir" +- lib_search_path="$lib_search_path $dir" ++ # Preserve sysroot, but never include relative directories ++ case $dir in ++ [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; ++ *) func_append deplibs " -L$dir" ;; ++ esac ++ func_append lib_search_path " $dir" + ;; + esac + case $host in +@@ -4718,12 +5581,12 @@ func_mode_link () + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; +- *) dllsearchpath="$dllsearchpath:$dir";; ++ *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; +- *) dllsearchpath="$dllsearchpath:$testbindir";; ++ *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac +@@ -4747,7 +5610,7 @@ func_mode_link () + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework +- deplibs="$deplibs System.ltframework" ++ func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) +@@ -4758,9 +5621,6 @@ func_mode_link () + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; +- *-*-linux*) +- test "X$arg" = "X-lc" && continue +- ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in +@@ -4770,7 +5630,7 @@ func_mode_link () + ;; + esac + fi +- deplibs="$deplibs $arg" ++ func_append deplibs " $arg" + continue + ;; + +@@ -4782,8 +5642,8 @@ func_mode_link () + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. +- -model|-arch|-isysroot) +- compiler_flags="$compiler_flags $arg" ++ -model|-arch|-isysroot|--sysroot) ++ func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler +@@ -4791,12 +5651,12 @@ func_mode_link () + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads) +- compiler_flags="$compiler_flags $arg" ++ func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; +- * ) new_inherited_linker_flags="$new_inherited_linker_flags $arg" ;; ++ * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; +@@ -4863,13 +5723,17 @@ func_mode_link () + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; ++ =*) ++ func_stripname '=' '' "$dir" ++ dir=$lt_sysroot$func_stripname_result ++ ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; +- *) xrpath="$xrpath $dir" ;; ++ *) func_append xrpath " $dir" ;; + esac + continue + ;; +@@ -4922,8 +5786,8 @@ func_mode_link () + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" +- arg="$arg $func_quote_for_eval_result" +- compiler_flags="$compiler_flags $func_quote_for_eval_result" ++ func_append arg " $func_quote_for_eval_result" ++ func_append compiler_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" +@@ -4938,9 +5802,9 @@ func_mode_link () + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" +- arg="$arg $wl$func_quote_for_eval_result" +- compiler_flags="$compiler_flags $wl$func_quote_for_eval_result" +- linker_flags="$linker_flags $func_quote_for_eval_result" ++ func_append arg " $wl$func_quote_for_eval_result" ++ func_append compiler_flags " $wl$func_quote_for_eval_result" ++ func_append linker_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" +@@ -4968,24 +5832,27 @@ func_mode_link () + arg="$func_quote_for_eval_result" + ;; + +- # -64, -mips[0-9] enable 64-bit mode on the SGI compiler +- # -r[0-9][0-9]* specifies the processor on the SGI compiler +- # -xarch=*, -xtarget=* enable 64-bit mode on the Sun compiler +- # +DA*, +DD* enable 64-bit mode on the HP compiler +- # -q* pass through compiler args for the IBM compiler +- # -m*, -t[45]*, -txscale* pass through architecture-specific +- # compiler args for GCC +- # -F/path gives path to uninstalled frameworks, gcc on darwin +- # -p, -pg, --coverage, -fprofile-* pass through profiling flag for GCC +- # @file GCC response files +- # -tp=* Portland pgcc target processor selection ++ # Flags to be passed through unchanged, with rationale: ++ # -64, -mips[0-9] enable 64-bit mode for the SGI compiler ++ # -r[0-9][0-9]* specify processor for the SGI compiler ++ # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler ++ # +DA*, +DD* enable 64-bit mode for the HP compiler ++ # -q* compiler args for the IBM compiler ++ # -m*, -t[45]*, -txscale* architecture-specific flags for GCC ++ # -F/path path to uninstalled frameworks, gcc on darwin ++ # -p, -pg, --coverage, -fprofile-* profiling flags for GCC ++ # @file GCC response files ++ # -tp=* Portland pgcc target processor selection ++ # --sysroot=* for sysroot support ++ # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ +- -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*) ++ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ ++ -O*|-flto*|-fwhopr*|-fuse-linker-plugin) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" +- compiler_flags="$compiler_flags $arg" ++ func_append compiler_flags " $arg" + continue + ;; + +@@ -4997,7 +5864,7 @@ func_mode_link () + + *.$objext) + # A standard object. +- objs="$objs $arg" ++ func_append objs " $arg" + ;; + + *.lo) +@@ -5028,7 +5895,7 @@ func_mode_link () + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then +- dlfiles="$dlfiles $pic_object" ++ func_append dlfiles " $pic_object" + prev= + continue + else +@@ -5040,7 +5907,7 @@ func_mode_link () + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. +- dlprefiles="$dlprefiles $pic_object" ++ func_append dlprefiles " $pic_object" + prev= + fi + +@@ -5085,24 +5952,25 @@ func_mode_link () + + *.$libext) + # An archive. +- deplibs="$deplibs $arg" +- old_deplibs="$old_deplibs $arg" ++ func_append deplibs " $arg" ++ func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + ++ func_resolve_sysroot "$arg" + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. +- dlfiles="$dlfiles $arg" ++ func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. +- dlprefiles="$dlprefiles $arg" ++ func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else +- deplibs="$deplibs $arg" ++ func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; +@@ -5127,7 +5995,7 @@ func_mode_link () + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then +- eval "arg=\"$export_dynamic_flag_spec\"" ++ eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi +@@ -5144,11 +6012,13 @@ func_mode_link () + else + shlib_search_path= + fi +- eval "sys_lib_search_path=\"$sys_lib_search_path_spec\"" +- eval "sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\"" ++ eval sys_lib_search_path=\"$sys_lib_search_path_spec\" ++ eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" ++ func_to_tool_file "$output_objdir/" ++ tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + +@@ -5169,12 +6039,12 @@ func_mode_link () + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do +- if $opt_duplicate_deps ; then ++ if $opt_preserve_dup_deps ; then + case "$libs " in +- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; ++ *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi +- libs="$libs $deplib" ++ func_append libs " $deplib" + done + + if test "$linkmode" = lib; then +@@ -5187,9 +6057,9 @@ func_mode_link () + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in +- *" $pre_post_dep "*) specialdeplibs="$specialdeplibs $pre_post_deps" ;; ++ *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac +- pre_post_deps="$pre_post_deps $pre_post_dep" ++ func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= +@@ -5256,8 +6126,9 @@ func_mode_link () + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= ++ func_resolve_sysroot "$lib" + case $lib in +- *.la) func_source "$lib" ;; ++ *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library +@@ -5267,7 +6138,7 @@ func_mode_link () + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; +- *) deplibs="$deplibs $deplib" ;; ++ *) func_append deplibs " $deplib" ;; + esac + done + done +@@ -5288,11 +6159,11 @@ func_mode_link () + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else +- compiler_flags="$compiler_flags $deplib" ++ func_append compiler_flags " $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; +- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; ++ * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi +@@ -5377,7 +6248,7 @@ func_mode_link () + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; +- * ) new_inherited_linker_flags="$new_inherited_linker_flags $deplib" ;; ++ * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi +@@ -5390,7 +6261,8 @@ func_mode_link () + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" +- newlib_search_path="$newlib_search_path $func_stripname_result" ++ func_resolve_sysroot "$func_stripname_result" ++ func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test "$pass" = conv; then +@@ -5404,7 +6276,8 @@ func_mode_link () + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" +- newlib_search_path="$newlib_search_path $func_stripname_result" ++ func_resolve_sysroot "$func_stripname_result" ++ func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" +@@ -5415,17 +6288,21 @@ func_mode_link () + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" +- dir=$func_stripname_result ++ func_resolve_sysroot "$func_stripname_result" ++ dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; +- *) xrpath="$xrpath $dir" ;; ++ *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; +- *.la) lib="$deplib" ;; ++ *.la) ++ func_resolve_sysroot "$deplib" ++ lib=$func_resolve_sysroot_result ++ ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" +@@ -5488,11 +6365,11 @@ func_mode_link () + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. +- newdlprefiles="$newdlprefiles $deplib" ++ func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else +- newdlfiles="$newdlfiles $deplib" ++ func_append newdlfiles " $deplib" + fi + fi + continue +@@ -5538,7 +6415,7 @@ func_mode_link () + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; +- *) new_inherited_linker_flags="$new_inherited_linker_flags $tmp_inherited_linker_flag";; ++ *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi +@@ -5546,8 +6423,8 @@ func_mode_link () + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then +- test -n "$dlopen" && dlfiles="$dlfiles $dlopen" +- test -n "$dlpreopen" && dlprefiles="$dlprefiles $dlpreopen" ++ test -n "$dlopen" && func_append dlfiles " $dlopen" ++ test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test "$pass" = conv; then +@@ -5558,20 +6435,20 @@ func_mode_link () + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. +- convenience="$convenience $ladir/$objdir/$old_library" +- old_convenience="$old_convenience $ladir/$objdir/$old_library" ++ func_append convenience " $ladir/$objdir/$old_library" ++ func_append old_convenience " $ladir/$objdir/$old_library" + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" +- if $opt_duplicate_deps ; then ++ if $opt_preserve_dup_deps ; then + case "$tmp_libs " in +- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; ++ *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi +- tmp_libs="$tmp_libs $deplib" ++ func_append tmp_libs " $deplib" + done + continue + fi # $pass = conv +@@ -5579,9 +6456,15 @@ func_mode_link () + + # Get the name of the library we link against. + linklib= +- for l in $old_library $library_names; do +- linklib="$l" +- done ++ if test -n "$old_library" && ++ { test "$prefer_static_libs" = yes || ++ test "$prefer_static_libs,$installed" = "built,no"; }; then ++ linklib=$old_library ++ else ++ for l in $old_library $library_names; do ++ linklib="$l" ++ done ++ fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi +@@ -5598,9 +6481,9 @@ func_mode_link () + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. +- dlprefiles="$dlprefiles $lib $dependency_libs" ++ func_append dlprefiles " $lib $dependency_libs" + else +- newdlfiles="$newdlfiles $lib" ++ func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen +@@ -5622,14 +6505,14 @@ func_mode_link () + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then +- if test ! -f "$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then ++ if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else +- dir="$libdir" +- absdir="$libdir" ++ dir="$lt_sysroot$libdir" ++ absdir="$lt_sysroot$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else +@@ -5637,12 +6520,12 @@ func_mode_link () + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later +- notinst_path="$notinst_path $abs_ladir" ++ func_append notinst_path " $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later +- notinst_path="$notinst_path $abs_ladir" ++ func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" +@@ -5653,20 +6536,46 @@ func_mode_link () + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi +- # Prefer using a static library (so that no silly _DYNAMIC symbols +- # are required to link). +- if test -n "$old_library"; then +- newdlprefiles="$newdlprefiles $dir/$old_library" +- # Keep a list of preopened convenience libraries to check +- # that they are being used correctly in the link pass. +- test -z "$libdir" && \ +- dlpreconveniencelibs="$dlpreconveniencelibs $dir/$old_library" +- # Otherwise, use the dlname, so that lt_dlopen finds it. +- elif test -n "$dlname"; then +- newdlprefiles="$newdlprefiles $dir/$dlname" +- else +- newdlprefiles="$newdlprefiles $dir/$linklib" +- fi ++ case "$host" in ++ # special handling for platforms with PE-DLLs. ++ *cygwin* | *mingw* | *cegcc* ) ++ # Linker will automatically link against shared library if both ++ # static and shared are present. Therefore, ensure we extract ++ # symbols from the import library if a shared library is present ++ # (otherwise, the dlopen module name will be incorrect). We do ++ # this by putting the import library name into $newdlprefiles. ++ # We recover the dlopen module name by 'saving' the la file ++ # name in a special purpose variable, and (later) extracting the ++ # dlname from the la file. ++ if test -n "$dlname"; then ++ func_tr_sh "$dir/$linklib" ++ eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" ++ func_append newdlprefiles " $dir/$linklib" ++ else ++ func_append newdlprefiles " $dir/$old_library" ++ # Keep a list of preopened convenience libraries to check ++ # that they are being used correctly in the link pass. ++ test -z "$libdir" && \ ++ func_append dlpreconveniencelibs " $dir/$old_library" ++ fi ++ ;; ++ * ) ++ # Prefer using a static library (so that no silly _DYNAMIC symbols ++ # are required to link). ++ if test -n "$old_library"; then ++ func_append newdlprefiles " $dir/$old_library" ++ # Keep a list of preopened convenience libraries to check ++ # that they are being used correctly in the link pass. ++ test -z "$libdir" && \ ++ func_append dlpreconveniencelibs " $dir/$old_library" ++ # Otherwise, use the dlname, so that lt_dlopen finds it. ++ elif test -n "$dlname"; then ++ func_append newdlprefiles " $dir/$dlname" ++ else ++ func_append newdlprefiles " $dir/$linklib" ++ fi ++ ;; ++ esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then +@@ -5684,7 +6593,7 @@ func_mode_link () + + + if test "$linkmode" = prog && test "$pass" != link; then +- newlib_search_path="$newlib_search_path $ladir" ++ func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no +@@ -5697,7 +6606,8 @@ func_mode_link () + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" +- newlib_search_path="$newlib_search_path $func_stripname_result" ++ func_resolve_sysroot "$func_stripname_result" ++ func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? +@@ -5708,12 +6618,12 @@ func_mode_link () + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi +- if $opt_duplicate_deps ; then ++ if $opt_preserve_dup_deps ; then + case "$tmp_libs " in +- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; ++ *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi +- tmp_libs="$tmp_libs $deplib" ++ func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... +@@ -5728,7 +6638,7 @@ func_mode_link () + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; +- *) temp_rpath="$temp_rpath$absdir:" ;; ++ *) func_append temp_rpath "$absdir:" ;; + esac + fi + +@@ -5740,7 +6650,7 @@ func_mode_link () + *) + case "$compile_rpath " in + *" $absdir "*) ;; +- *) compile_rpath="$compile_rpath $absdir" ++ *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac +@@ -5749,7 +6659,7 @@ func_mode_link () + *) + case "$finalize_rpath " in + *" $libdir "*) ;; +- *) finalize_rpath="$finalize_rpath $libdir" ++ *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac +@@ -5774,12 +6684,12 @@ func_mode_link () + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded +- notinst_deplibs="$notinst_deplibs $lib" ++ func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then +- notinst_deplibs="$notinst_deplibs $lib" ++ func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; +@@ -5814,7 +6724,7 @@ func_mode_link () + *) + case "$compile_rpath " in + *" $absdir "*) ;; +- *) compile_rpath="$compile_rpath $absdir" ++ *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac +@@ -5823,7 +6733,7 @@ func_mode_link () + *) + case "$finalize_rpath " in + *" $libdir "*) ;; +- *) finalize_rpath="$finalize_rpath $libdir" ++ *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac +@@ -5835,7 +6745,7 @@ func_mode_link () + shift + realname="$1" + shift +- eval "libname=\"$libname_spec\"" ++ libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" +@@ -5848,7 +6758,7 @@ func_mode_link () + versuffix="-$major" + ;; + esac +- eval "soname=\"$soname_spec\"" ++ eval soname=\"$soname_spec\" + else + soname="$realname" + fi +@@ -5877,7 +6787,7 @@ func_mode_link () + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + +- if test "$linkmode" = prog || test "$mode" != relink; then ++ if test "$linkmode" = prog || test "$opt_mode" != relink; then + add_shlibpath= + add_dir= + add= +@@ -5933,7 +6843,7 @@ func_mode_link () + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) +- add_dir="$add_dir -L$inst_prefix_dir$libdir" ++ func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi +@@ -5955,7 +6865,7 @@ func_mode_link () + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; +- *) compile_shlibpath="$compile_shlibpath$add_shlibpath:" ;; ++ *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then +@@ -5969,13 +6879,13 @@ func_mode_link () + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; +- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; ++ *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + +- if test "$linkmode" = prog || test "$mode" = relink; then ++ if test "$linkmode" = prog || test "$opt_mode" = relink; then + add_shlibpath= + add_dir= + add= +@@ -5989,7 +6899,7 @@ func_mode_link () + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; +- *) finalize_shlibpath="$finalize_shlibpath$libdir:" ;; ++ *) func_append finalize_shlibpath "$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then +@@ -6001,12 +6911,12 @@ func_mode_link () + fi + else + # We cannot seem to hardcode it, guess we'll fake it. +- add_dir="-L$libdir" ++ add_dir="-L$lt_sysroot$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) +- add_dir="$add_dir -L$inst_prefix_dir$libdir" ++ func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi +@@ -6083,27 +6993,33 @@ func_mode_link () + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; +- *) xrpath="$xrpath $temp_xrpath";; ++ *) func_append xrpath " $temp_xrpath";; + esac;; +- *) temp_deplibs="$temp_deplibs $libdir";; ++ *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + +- newlib_search_path="$newlib_search_path $absdir" ++ func_append newlib_search_path " $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" +- if $opt_duplicate_deps ; then ++ case $deplib in ++ -L*) func_stripname '-L' '' "$deplib" ++ func_resolve_sysroot "$func_stripname_result";; ++ *) func_resolve_sysroot "$deplib" ;; ++ esac ++ if $opt_preserve_dup_deps ; then + case "$tmp_libs " in +- *" $deplib "*) specialdeplibs="$specialdeplibs $deplib" ;; ++ *" $func_resolve_sysroot_result "*) ++ func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi +- tmp_libs="$tmp_libs $deplib" ++ func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test "$link_all_deplibs" != no; then +@@ -6113,8 +7029,10 @@ func_mode_link () + case $deplib in + -L*) path="$deplib" ;; + *.la) ++ func_resolve_sysroot "$deplib" ++ deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." +- dir="$func_dirname_result" ++ dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; +@@ -6130,7 +7048,7 @@ func_mode_link () + case $host in + *-*-darwin*) + depdepl= +- deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` ++ eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp +@@ -6141,8 +7059,8 @@ func_mode_link () + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi +- compiler_flags="$compiler_flags ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" +- linker_flags="$linker_flags -dylib_file ${darwin_install_name}:${depdepl}" ++ func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" ++ func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi +@@ -6152,7 +7070,7 @@ func_mode_link () + ;; + esac + else +- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` ++ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ +@@ -6192,7 +7110,7 @@ func_mode_link () + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; +- *) lib_search_path="$lib_search_path $dir" ;; ++ *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= +@@ -6205,7 +7123,7 @@ func_mode_link () + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order +- eval tmp_libs=\$$var ++ eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so +@@ -6250,13 +7168,13 @@ func_mode_link () + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; +- *) tmp_libs="$tmp_libs $deplib" ;; ++ *) func_append tmp_libs " $deplib" ;; + esac + ;; +- *) tmp_libs="$tmp_libs $deplib" ;; ++ *) func_append tmp_libs " $deplib" ;; + esac + done +- eval $var=\$tmp_libs ++ eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs +@@ -6269,7 +7187,7 @@ func_mode_link () + ;; + esac + if test -n "$i" ; then +- tmp_libs="$tmp_libs $i" ++ func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs +@@ -6310,7 +7228,7 @@ func_mode_link () + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" +- objs="$objs$old_deplibs" ++ func_append objs "$old_deplibs" + ;; + + lib) +@@ -6319,8 +7237,8 @@ func_mode_link () + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result +- eval "shared_ext=\"$shrext_cmds\"" +- eval "libname=\"$libname_spec\"" ++ eval shared_ext=\"$shrext_cmds\" ++ eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ +@@ -6330,8 +7248,8 @@ func_mode_link () + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result +- eval "shared_ext=\"$shrext_cmds\"" +- eval "libname=\"$libname_spec\"" ++ eval shared_ext=\"$shrext_cmds\" ++ eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result +@@ -6346,7 +7264,7 @@ func_mode_link () + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" +- libobjs="$libobjs $objs" ++ func_append libobjs " $objs" + fi + fi + +@@ -6544,7 +7462,7 @@ func_mode_link () + done + + # Make executables depend on our current version. +- verstring="$verstring:${current}.0" ++ func_append verstring ":${current}.0" + ;; + + qnx) +@@ -6612,10 +7530,10 @@ func_mode_link () + fi + + func_generate_dlsyms "$libname" "$libname" "yes" +- libobjs="$libobjs $symfileobj" ++ func_append libobjs " $symfileobj" + test "X$libobjs" = "X " && libobjs= + +- if test "$mode" != relink; then ++ if test "$opt_mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= +@@ -6631,7 +7549,7 @@ func_mode_link () + continue + fi + fi +- removelist="$removelist $p" ++ func_append removelist " $p" + ;; + *) ;; + esac +@@ -6642,7 +7560,7 @@ func_mode_link () + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then +- oldlibs="$oldlibs $output_objdir/$libname.$libext" ++ func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` +@@ -6659,10 +7577,11 @@ func_mode_link () + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do +- temp_xrpath="$temp_xrpath -R$libdir" ++ func_replace_sysroot "$libdir" ++ func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; +- *) finalize_rpath="$finalize_rpath $libdir" ;; ++ *) func_append finalize_rpath " $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then +@@ -6676,7 +7595,7 @@ func_mode_link () + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; +- *) dlfiles="$dlfiles $lib" ;; ++ *) func_append dlfiles " $lib" ;; + esac + done + +@@ -6686,7 +7605,7 @@ func_mode_link () + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; +- *) dlprefiles="$dlprefiles $lib" ;; ++ *) func_append dlprefiles " $lib" ;; + esac + done + +@@ -6698,7 +7617,7 @@ func_mode_link () + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework +- deplibs="$deplibs System.ltframework" ++ func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. +@@ -6715,7 +7634,7 @@ func_mode_link () + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then +- deplibs="$deplibs -lc" ++ func_append deplibs " -lc" + fi + ;; + esac +@@ -6764,18 +7683,18 @@ EOF + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $i "*) +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + i="" + ;; + esac + fi + if test -n "$i" ; then +- eval "libname=\"$libname_spec\"" +- eval "deplib_matches=\"$library_names_spec\"" ++ libname=`eval "\\$ECHO \"$libname_spec\""` ++ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""` + set dummy $deplib_matches; shift + deplib_match=$1 + if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + else + droppeddeps=yes + echo +@@ -6789,7 +7708,7 @@ EOF + fi + ;; + *) +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + ;; + esac + done +@@ -6807,18 +7726,18 @@ EOF + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $i "*) +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + i="" + ;; + esac + fi + if test -n "$i" ; then +- eval "libname=\"$libname_spec\"" +- eval "deplib_matches=\"$library_names_spec\"" ++ libname=`eval "\\$ECHO \"$libname_spec\""` ++ deplib_matches=`eval "\\$ECHO \"$library_names_spec\""` + set dummy $deplib_matches; shift + deplib_match=$1 + if test `expr "$ldd_output" : ".*$deplib_match"` -ne 0 ; then +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + else + droppeddeps=yes + echo +@@ -6840,7 +7759,7 @@ EOF + fi + ;; + *) +- newdeplibs="$newdeplibs $i" ++ func_append newdeplibs " $i" + ;; + esac + done +@@ -6857,15 +7776,27 @@ EOF + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then +- eval "libname=\"$libname_spec\"" ++ libname=`eval "\\$ECHO \"$libname_spec\""` ++ if test -n "$file_magic_glob"; then ++ libnameglob=`func_echo_all "$libname" | $SED -e $file_magic_glob` ++ else ++ libnameglob=$libname ++ fi ++ test "$want_nocaseglob" = yes && nocaseglob=`shopt -p nocaseglob` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do +- potential_libs=`ls $i/$libname[.-]* 2>/dev/null` ++ if test "$want_nocaseglob" = yes; then ++ shopt -s nocaseglob ++ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` ++ $nocaseglob ++ else ++ potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` ++ fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | +@@ -6885,10 +7816,10 @@ EOF + *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; + esac + done +- if eval "$file_magic_cmd \"\$potlib\"" 2>/dev/null | ++ if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi +@@ -6913,7 +7844,7 @@ EOF + ;; + *) + # Add a -L argument. +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. +@@ -6929,20 +7860,20 @@ EOF + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then +- eval "libname=\"$libname_spec\"" ++ libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi +@@ -6967,7 +7898,7 @@ EOF + ;; + *) + # Add a -L argument. +- newdeplibs="$newdeplibs $a_deplib" ++ func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. +@@ -7071,7 +8002,7 @@ EOF + *) + case " $deplibs " in + *" -L$path/$objdir "*) +- new_libs="$new_libs -L$path/$objdir" ;; ++ func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac +@@ -7081,10 +8012,10 @@ EOF + -L*) + case " $new_libs " in + *" $deplib "*) ;; +- *) new_libs="$new_libs $deplib" ;; ++ *) func_append new_libs " $deplib" ;; + esac + ;; +- *) new_libs="$new_libs $deplib" ;; ++ *) func_append new_libs " $deplib" ;; + esac + done + deplibs="$new_libs" +@@ -7101,10 +8032,12 @@ EOF + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" +- test "$mode" != relink && rpath="$compile_rpath$rpath" ++ test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then ++ func_replace_sysroot "$libdir" ++ libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else +@@ -7113,18 +8046,18 @@ EOF + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) +- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ++ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else +- eval "flag=\"$hardcode_libdir_flag_spec\"" +- dep_rpath="$dep_rpath $flag" ++ eval flag=\"$hardcode_libdir_flag_spec\" ++ func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; +- *) perm_rpath="$perm_rpath $libdir" ;; ++ *) func_apped perm_rpath " $libdir" ;; + esac + fi + done +@@ -7133,40 +8066,38 @@ EOF + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + if test -n "$hardcode_libdir_flag_spec_ld"; then +- eval "dep_rpath=\"$hardcode_libdir_flag_spec_ld\"" ++ eval dep_rpath=\"$hardcode_libdir_flag_spec_ld\" + else +- eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" ++ eval dep_rpath=\"$hardcode_libdir_flag_spec\" + fi + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do +- rpath="$rpath$dir:" ++ func_append rpath "$dir:" + done +- eval $runpath_var=\$rpath\$$runpath_var +- export $runpath_var ++ eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" +- test "$mode" != relink && shlibpath="$compile_shlibpath$shlibpath" ++ test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then +- eval $shlibpath_var=\$shlibpath\$$shlibpath_var +- export $shlibpath_var ++ eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. +- eval "shared_ext=\"$shrext_cmds\"" +- eval "library_names=\"$library_names_spec\"" ++ eval shared_ext=\"$shrext_cmds\" ++ eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then +- eval "soname=\"$soname_spec\"" ++ eval soname=\"$soname_spec\" + else + soname="$realname" + fi +@@ -7178,7 +8109,7 @@ EOF + linknames= + for link + do +- linknames="$linknames $link" ++ func_append linknames " $link" + done + + # Use standard objects if they are pic +@@ -7189,7 +8120,7 @@ EOF + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" +- delfiles="$delfiles $export_symbols" ++ func_append delfiles " $export_symbols" + fi + + orig_export_symbols= +@@ -7220,13 +8151,45 @@ EOF + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' +- for cmd in $cmds; do ++ for cmd1 in $cmds; do + IFS="$save_ifs" +- eval "cmd=\"$cmd\"" +- func_len " $cmd" +- len=$func_len_result +- if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then ++ # Take the normal branch if the nm_file_list_spec branch ++ # doesn't work or if tool conversion is not needed. ++ case $nm_file_list_spec~$to_tool_file_cmd in ++ *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) ++ try_normal_branch=yes ++ eval cmd=\"$cmd1\" ++ func_len " $cmd" ++ len=$func_len_result ++ ;; ++ *) ++ try_normal_branch=no ++ ;; ++ esac ++ if test "$try_normal_branch" = yes \ ++ && { test "$len" -lt "$max_cmd_len" \ ++ || test "$max_cmd_len" -le -1; } ++ then ++ func_show_eval "$cmd" 'exit $?' ++ skipped_export=false ++ elif test -n "$nm_file_list_spec"; then ++ func_basename "$output" ++ output_la=$func_basename_result ++ save_libobjs=$libobjs ++ save_output=$output ++ output=${output_objdir}/${output_la}.nm ++ func_to_tool_file "$output" ++ libobjs=$nm_file_list_spec$func_to_tool_file_result ++ func_append delfiles " $output" ++ func_verbose "creating $NM input file list: $output" ++ for obj in $save_libobjs; do ++ func_to_tool_file "$obj" ++ $ECHO "$func_to_tool_file_result" ++ done > "$output" ++ eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' ++ output=$save_output ++ libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. +@@ -7248,7 +8211,7 @@ EOF + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" +- $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" ++ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then +@@ -7260,7 +8223,7 @@ EOF + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter +- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" ++ func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi +@@ -7270,7 +8233,7 @@ EOF + case " $convenience " in + *" $test_deplib "*) ;; + *) +- tmp_deplibs="$tmp_deplibs $test_deplib" ++ func_append tmp_deplibs " $test_deplib" + ;; + esac + done +@@ -7286,43 +8249,43 @@ EOF + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs +- eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" ++ eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + + func_extract_archives $gentop $convenience +- libobjs="$libobjs $func_extract_archives_result" ++ func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then +- eval "flag=\"$thread_safe_flag_spec\"" +- linker_flags="$linker_flags $flag" ++ eval flag=\"$thread_safe_flag_spec\" ++ func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking +- if test "$mode" = relink; then +- $opt_dry_run || (cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U) || exit $? ++ if test "$opt_mode" = relink; then ++ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then +- eval "test_cmds=\"$module_expsym_cmds\"" ++ eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else +- eval "test_cmds=\"$module_cmds\"" ++ eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then +- eval "test_cmds=\"$archive_expsym_cmds\"" ++ eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else +- eval "test_cmds=\"$archive_cmds\"" ++ eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi +@@ -7366,10 +8329,13 @@ EOF + echo 'INPUT (' > $output + for obj in $save_libobjs + do +- $ECHO "$obj" >> $output ++ func_to_tool_file "$obj" ++ $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output +- delfiles="$delfiles $output" ++ func_append delfiles " $output" ++ func_to_tool_file "$output" ++ output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" +@@ -7383,15 +8349,17 @@ EOF + fi + for obj + do +- $ECHO "$obj" >> $output ++ func_to_tool_file "$obj" ++ $ECHO "$func_to_tool_file_result" >> $output + done +- delfiles="$delfiles $output" +- output=$firstobj\"$file_list_spec$output\" ++ func_append delfiles " $output" ++ func_to_tool_file "$output" ++ output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext +- eval "test_cmds=\"$reload_cmds\"" ++ eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 +@@ -7411,12 +8379,12 @@ EOF + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist +- eval "concat_cmds=\"$reload_cmds\"" ++ eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" +- eval "concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\"" ++ eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 +@@ -7433,11 +8401,11 @@ EOF + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" +- eval "concat_cmds=\"\${concat_cmds}$reload_cmds\"" ++ eval concat_cmds=\"\${concat_cmds}$reload_cmds\" + if test -n "$last_robj"; then +- eval "concat_cmds=\"\${concat_cmds}~\$RM $last_robj\"" ++ eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi +- delfiles="$delfiles $output" ++ func_append delfiles " $output" + + else + output= +@@ -7450,9 +8418,9 @@ EOF + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ +- eval "concat_cmds=\"\$concat_cmds$export_symbols_cmds\"" ++ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then +- eval "concat_cmds=\"\$concat_cmds~\$RM $last_robj\"" ++ eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + +@@ -7471,7 +8439,7 @@ EOF + lt_exit=$? + + # Restore the uninstalled library and exit +- if test "$mode" = relink; then ++ if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) +@@ -7492,7 +8460,7 @@ EOF + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" +- $opt_dry_run || $ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols" ++ $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then +@@ -7504,7 +8472,7 @@ EOF + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter +- delfiles="$delfiles $export_symbols $output_objdir/$libname.filter" ++ func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi +@@ -7515,7 +8483,7 @@ EOF + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then +- eval "libobjs=\"\$libobjs $whole_archive_flag_spec\"" ++ eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the +@@ -7539,23 +8507,23 @@ EOF + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. +- eval "cmds=\"\$cmds~\$RM $delfiles\"" ++ eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles +- libobjs="$libobjs $func_extract_archives_result" ++ func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" +- eval "cmd=\"$cmd\"" ++ eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" +@@ -7564,7 +8532,7 @@ EOF + lt_exit=$? + + # Restore the uninstalled library and exit +- if test "$mode" = relink; then ++ if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) +@@ -7576,8 +8544,8 @@ EOF + IFS="$save_ifs" + + # Restore the uninstalled library and exit +- if test "$mode" = relink; then +- $opt_dry_run || (cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname) || exit $? ++ if test "$opt_mode" = relink; then ++ $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then +@@ -7656,17 +8624,20 @@ EOF + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then +- eval "tmp_whole_archive_flags=\"$whole_archive_flag_spec\"" ++ eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + else + gentop="$output_objdir/${obj}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + ++ # If we're not building shared, we need to use non_pic_objs ++ test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" ++ + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + +@@ -7690,7 +8661,7 @@ EOF + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" +- # $opt_dry_run || echo timestamp > $libobj || exit $? ++ # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + +@@ -7740,8 +8711,8 @@ EOF + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) +- compile_command="$compile_command ${wl}-bind_at_load" +- finalize_command="$finalize_command ${wl}-bind_at_load" ++ func_append compile_command " ${wl}-bind_at_load" ++ func_append finalize_command " ${wl}-bind_at_load" + ;; + esac + fi +@@ -7761,7 +8732,7 @@ EOF + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) +- new_libs="$new_libs -L$path/$objdir" ;; ++ func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac +@@ -7771,17 +8742,17 @@ EOF + -L*) + case " $new_libs " in + *" $deplib "*) ;; +- *) new_libs="$new_libs $deplib" ;; ++ *) func_append new_libs " $deplib" ;; + esac + ;; +- *) new_libs="$new_libs $deplib" ;; ++ *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + +- compile_command="$compile_command $compile_deplibs" +- finalize_command="$finalize_command $finalize_deplibs" ++ func_append compile_command " $compile_deplibs" ++ func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. +@@ -7789,7 +8760,7 @@ EOF + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; +- *) finalize_rpath="$finalize_rpath $libdir" ;; ++ *) func_append finalize_rpath " $libdir" ;; + esac + done + fi +@@ -7808,18 +8779,18 @@ EOF + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) +- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ++ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else +- eval "flag=\"$hardcode_libdir_flag_spec\"" +- rpath="$rpath $flag" ++ eval flag=\"$hardcode_libdir_flag_spec\" ++ func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; +- *) perm_rpath="$perm_rpath $libdir" ;; ++ *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in +@@ -7828,12 +8799,12 @@ EOF + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; +- *) dllsearchpath="$dllsearchpath:$libdir";; ++ *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; +- *) dllsearchpath="$dllsearchpath:$testbindir";; ++ *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac +@@ -7842,7 +8813,7 @@ EOF + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" +- eval "rpath=\" $hardcode_libdir_flag_spec\"" ++ eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + +@@ -7859,18 +8830,18 @@ EOF + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) +- hardcode_libdirs="$hardcode_libdirs$hardcode_libdir_separator$libdir" ++ func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else +- eval "flag=\"$hardcode_libdir_flag_spec\"" +- rpath="$rpath $flag" ++ eval flag=\"$hardcode_libdir_flag_spec\" ++ func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; +- *) finalize_perm_rpath="$finalize_perm_rpath $libdir" ;; ++ *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done +@@ -7878,7 +8849,7 @@ EOF + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" +- eval "rpath=\" $hardcode_libdir_flag_spec\"" ++ eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + +@@ -7921,6 +8892,12 @@ EOF + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + ++ if test -n "$postlink_cmds"; then ++ func_to_tool_file "$output" ++ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` ++ func_execute_cmds "$postlink_cmds" 'exit $?' ++ fi ++ + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' +@@ -7943,7 +8920,7 @@ EOF + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do +- rpath="$rpath$dir:" ++ func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi +@@ -7951,7 +8928,7 @@ EOF + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do +- rpath="$rpath$dir:" ++ func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi +@@ -7966,6 +8943,13 @@ EOF + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' ++ ++ if test -n "$postlink_cmds"; then ++ func_to_tool_file "$output" ++ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` ++ func_execute_cmds "$postlink_cmds" 'exit $?' ++ fi ++ + exit $EXIT_SUCCESS + fi + +@@ -7999,6 +8983,12 @@ EOF + + func_show_eval "$link_command" 'exit $?' + ++ if test -n "$postlink_cmds"; then ++ func_to_tool_file "$output_objdir/$outputname" ++ postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` ++ func_execute_cmds "$postlink_cmds" 'exit $?' ++ fi ++ + # Now create the wrapper script. + func_verbose "creating $output" + +@@ -8096,7 +9086,7 @@ EOF + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then +- oldobjs="$oldobjs $symfileobj" ++ func_append oldobjs " $symfileobj" + fi + fi + addlibs="$old_convenience" +@@ -8104,10 +9094,10 @@ EOF + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + + func_extract_archives $gentop $addlibs +- oldobjs="$oldobjs $func_extract_archives_result" ++ func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. +@@ -8118,10 +9108,10 @@ EOF + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles +- oldobjs="$oldobjs $func_extract_archives_result" ++ func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have +@@ -8139,7 +9129,7 @@ EOF + else + echo "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" +- generated="$generated $gentop" ++ func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= +@@ -8163,18 +9153,28 @@ EOF + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" +- oldobjs="$oldobjs $gentop/$newobj" ++ func_append oldobjs " $gentop/$newobj" + ;; +- *) oldobjs="$oldobjs $obj" ;; ++ *) func_append oldobjs " $obj" ;; + esac + done + fi +- eval "cmds=\"$old_archive_cmds\"" ++ eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds ++ elif test -n "$archiver_list_spec"; then ++ func_verbose "using command file archive linking..." ++ for obj in $oldobjs ++ do ++ func_to_tool_file "$obj" ++ $ECHO "$func_to_tool_file_result" ++ done > $output_objdir/$libname.libcmd ++ func_to_tool_file "$output_objdir/$libname.libcmd" ++ oldobjs=" $archiver_list_spec$func_to_tool_file_result" ++ cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." +@@ -8189,7 +9189,7 @@ EOF + do + last_oldobj=$obj + done +- eval "test_cmds=\"$old_archive_cmds\"" ++ eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 +@@ -8208,7 +9208,7 @@ EOF + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ +- eval "concat_cmds=\"\${concat_cmds}$old_archive_cmds\"" ++ eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi +@@ -8216,9 +9216,9 @@ EOF + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then +- eval "cmds=\"\$concat_cmds\"" ++ eval cmds=\"\$concat_cmds\" + else +- eval "cmds=\"\$concat_cmds~\$old_archive_cmds\"" ++ eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi +@@ -8268,12 +9268,23 @@ EOF + *.la) + func_basename "$deplib" + name="$func_basename_result" +- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` ++ func_resolve_sysroot "$deplib" ++ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" +- newdependency_libs="$newdependency_libs $libdir/$name" ++ func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" ++ ;; ++ -L*) ++ func_stripname -L '' "$deplib" ++ func_replace_sysroot "$func_stripname_result" ++ func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; +- *) newdependency_libs="$newdependency_libs $deplib" ;; ++ -R*) ++ func_stripname -R '' "$deplib" ++ func_replace_sysroot "$func_stripname_result" ++ func_append newdependency_libs " -R$func_replace_sysroot_result" ++ ;; ++ *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" +@@ -8284,12 +9295,14 @@ EOF + *.la) + func_basename "$lib" + name="$func_basename_result" +- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` ++ func_resolve_sysroot "$lib" ++ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` ++ + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" +- newdlfiles="$newdlfiles $libdir/$name" ++ func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; +- *) newdlfiles="$newdlfiles $lib" ;; ++ *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles="$newdlfiles" +@@ -8303,10 +9316,11 @@ EOF + # the library: + func_basename "$lib" + name="$func_basename_result" +- libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` ++ func_resolve_sysroot "$lib" ++ eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" +- newdlprefiles="$newdlprefiles $libdir/$name" ++ func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done +@@ -8318,7 +9332,7 @@ EOF + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac +- newdlfiles="$newdlfiles $abs" ++ func_append newdlfiles " $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= +@@ -8327,7 +9341,7 @@ EOF + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac +- newdlprefiles="$newdlprefiles $abs" ++ func_append newdlprefiles " $abs" + done + dlprefiles="$newdlprefiles" + fi +@@ -8412,7 +9426,7 @@ relink_command=\"$relink_command\"" + exit $EXIT_SUCCESS + } + +-{ test "$mode" = link || test "$mode" = relink; } && ++{ test "$opt_mode" = link || test "$opt_mode" = relink; } && + func_mode_link ${1+"$@"} + + +@@ -8432,9 +9446,9 @@ func_mode_uninstall () + for arg + do + case $arg in +- -f) RM="$RM $arg"; rmforce=yes ;; +- -*) RM="$RM $arg" ;; +- *) files="$files $arg" ;; ++ -f) func_append RM " $arg"; rmforce=yes ;; ++ -*) func_append RM " $arg" ;; ++ *) func_append files " $arg" ;; + esac + done + +@@ -8443,24 +9457,23 @@ func_mode_uninstall () + + rmdirs= + +- origobjdir="$objdir" + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then +- objdir="$origobjdir" ++ odir="$objdir" + else +- objdir="$dir/$origobjdir" ++ odir="$dir/$objdir" + fi + func_basename "$file" + name="$func_basename_result" +- test "$mode" = uninstall && objdir="$dir" ++ test "$opt_mode" = uninstall && odir="$dir" + +- # Remember objdir for removal later, being careful to avoid duplicates +- if test "$mode" = clean; then ++ # Remember odir for removal later, being careful to avoid duplicates ++ if test "$opt_mode" = clean; then + case " $rmdirs " in +- *" $objdir "*) ;; +- *) rmdirs="$rmdirs $objdir" ;; ++ *" $odir "*) ;; ++ *) func_append rmdirs " $odir" ;; + esac + fi + +@@ -8486,18 +9499,17 @@ func_mode_uninstall () + + # Delete the libtool libraries and symlinks. + for n in $library_names; do +- rmfiles="$rmfiles $objdir/$n" ++ func_append rmfiles " $odir/$n" + done +- test -n "$old_library" && rmfiles="$rmfiles $objdir/$old_library" ++ test -n "$old_library" && func_append rmfiles " $odir/$old_library" + +- case "$mode" in ++ case "$opt_mode" in + clean) +- case " $library_names " in +- # " " in the beginning catches empty $dlname ++ case " $library_names " in + *" $dlname "*) ;; +- *) rmfiles="$rmfiles $objdir/$dlname" ;; ++ *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac +- test -n "$libdir" && rmfiles="$rmfiles $objdir/$name $objdir/${name}i" ++ test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then +@@ -8525,19 +9537,19 @@ func_mode_uninstall () + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then +- rmfiles="$rmfiles $dir/$pic_object" ++ func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then +- rmfiles="$rmfiles $dir/$non_pic_object" ++ func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) +- if test "$mode" = clean ; then ++ if test "$opt_mode" = clean ; then + noexename=$name + case $file in + *.exe) +@@ -8547,7 +9559,7 @@ func_mode_uninstall () + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe +- rmfiles="$rmfiles $file" ++ func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. +@@ -8556,7 +9568,7 @@ func_mode_uninstall () + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result +- rmfiles="$rmfiles $func_ltwrapper_scriptname_result" ++ func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename +@@ -8564,12 +9576,12 @@ func_mode_uninstall () + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles +- rmfiles="$rmfiles $objdir/$name $objdir/${name}S.${objext}" ++ func_append rmfiles " $odir/$name $odir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then +- rmfiles="$rmfiles $objdir/lt-$name" ++ func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then +- rmfiles="$rmfiles $objdir/lt-${noexename}.c" ++ func_append rmfiles " $odir/lt-${noexename}.c" + fi + fi + fi +@@ -8577,7 +9589,6 @@ func_mode_uninstall () + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done +- objdir="$origobjdir" + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do +@@ -8589,16 +9600,16 @@ func_mode_uninstall () + exit $exit_status + } + +-{ test "$mode" = uninstall || test "$mode" = clean; } && ++{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +-test -z "$mode" && { ++test -z "$opt_mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" + } + + test -z "$exec_cmd" && \ +- func_fatal_help "invalid operation mode \`$mode'" ++ func_fatal_help "invalid operation mode \`$opt_mode'" + + if test -n "$exec_cmd"; then + eval exec "$exec_cmd" +diff --git a/ltoptions.m4 b/ltoptions.m4 +index 5ef12ced2a8..17cfd51c0b3 100644 +--- a/ltoptions.m4 ++++ b/ltoptions.m4 +@@ -8,7 +8,7 @@ + # unlimited permission to copy and/or distribute it, with or without + # modifications, as long as this notice is preserved. + +-# serial 6 ltoptions.m4 ++# serial 7 ltoptions.m4 + + # This is to help aclocal find these macros, as it can't see m4_define. + AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) +diff --git a/ltversion.m4 b/ltversion.m4 +index bf87f77132d..9c7b5d41185 100644 +--- a/ltversion.m4 ++++ b/ltversion.m4 +@@ -7,17 +7,17 @@ + # unlimited permission to copy and/or distribute it, with or without + # modifications, as long as this notice is preserved. + +-# Generated from ltversion.in. ++# @configure_input@ + +-# serial 3134 ltversion.m4 ++# serial 3293 ltversion.m4 + # This file is part of GNU Libtool + +-m4_define([LT_PACKAGE_VERSION], [2.2.7a]) +-m4_define([LT_PACKAGE_REVISION], [1.3134]) ++m4_define([LT_PACKAGE_VERSION], [2.4]) ++m4_define([LT_PACKAGE_REVISION], [1.3293]) + + AC_DEFUN([LTVERSION_VERSION], +-[macro_version='2.2.7a' +-macro_revision='1.3134' ++[macro_version='2.4' ++macro_revision='1.3293' + _LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) + _LT_DECL(, macro_revision, 0) + ]) +diff --git a/lt~obsolete.m4 b/lt~obsolete.m4 +index bf92b5e0790..c573da90c5c 100644 +--- a/lt~obsolete.m4 ++++ b/lt~obsolete.m4 +@@ -7,7 +7,7 @@ + # unlimited permission to copy and/or distribute it, with or without + # modifications, as long as this notice is preserved. + +-# serial 4 lt~obsolete.m4 ++# serial 5 lt~obsolete.m4 + + # These exist entirely to fool aclocal when bootstrapping libtool. + # +diff --git a/opcodes/configure b/opcodes/configure +index 8d1e561c942..a0291dfbfa7 100755 +--- a/opcodes/configure ++++ b/opcodes/configure +@@ -680,6 +680,9 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL ++ac_ct_AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -798,6 +801,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_targets + enable_werror +@@ -1462,6 +1466,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + + Some influential environment variables: + CC C compiler command +@@ -5403,8 +5409,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -5444,7 +5450,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -6130,8 +6136,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -6180,6 +6186,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -6196,6 +6276,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -6364,7 +6449,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -6518,6 +6604,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -6531,11 +6632,164 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -6551,7 +6805,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6571,11 +6825,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -6591,7 +6849,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -6610,6 +6868,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -6621,16 +6883,72 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ + ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a + ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } + ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi + + + +@@ -6972,8 +7290,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -7009,6 +7327,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -7050,6 +7369,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -7061,7 +7392,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -7087,8 +7418,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -7098,8 +7429,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -7136,6 +7467,16 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ + + + +@@ -7152,6 +7493,45 @@ fi + + + ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -7363,6 +7743,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -7926,6 +8423,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -8091,7 +8590,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -8180,7 +8680,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -8478,8 +8978,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -8645,6 +9143,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -8707,7 +9211,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -8764,13 +9268,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8831,6 +9339,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -9181,7 +9694,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -9280,12 +9794,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -9299,8 +9813,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -9318,8 +9832,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9365,8 +9879,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -9496,7 +10010,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9509,22 +10029,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -9536,7 +10063,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + + int +@@ -9549,22 +10082,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -9609,20 +10149,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -9683,7 +10266,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -9691,7 +10274,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -9707,7 +10290,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -9731,10 +10314,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -9813,23 +10396,36 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -9914,7 +10510,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -9933,9 +10529,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -10511,8 +11107,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -10545,13 +11142,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -10643,7 +11298,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -11439,7 +12094,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11442 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11483,10 +12138,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -11545,7 +12200,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 11548 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -11589,10 +12244,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -13756,13 +14411,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -13777,14 +14439,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -13817,12 +14482,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -13877,8 +14542,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -13888,12 +14558,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -13909,7 +14581,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -13945,6 +14616,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -14710,7 +15382,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -14813,19 +15486,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -14855,6 +15551,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -14864,6 +15566,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -14978,12 +15683,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -15070,9 +15775,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -15088,6 +15790,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -15120,210 +15825,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +diff --git a/zlib/configure b/zlib/configure +index de6fa7e9960..cce448b6a53 100755 +--- a/zlib/configure ++++ b/zlib/configure +@@ -646,8 +646,11 @@ OTOOL + LIPO + NMEDIT + DSYMUTIL ++MANIFEST_TOOL + RANLIB ++ac_ct_AR + AR ++DLLTOOL + OBJDUMP + LN_S + NM +@@ -774,6 +777,7 @@ enable_static + with_pic + enable_fast_install + with_gnu_ld ++with_libtool_sysroot + enable_libtool_lock + enable_host_shared + ' +@@ -1428,6 +1432,8 @@ Optional Packages: + --with-pic try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] ++ --with-libtool-sysroot=DIR Search for dependent libraries within DIR ++ (or the compiler's sysroot if not specified). + + Some influential environment variables: + CC C compiler command +@@ -4147,8 +4153,8 @@ esac + + + +-macro_version='2.2.7a' +-macro_revision='1.3134' ++macro_version='2.4' ++macro_revision='1.3293' + + + +@@ -4188,7 +4194,7 @@ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 + $as_echo_n "checking how to print strings... " >&6; } + # Test print first, because it will be a builtin if present. +-if test "X`print -r -- -n 2>/dev/null`" = X-n && \ ++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' + elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then +@@ -5004,8 +5010,8 @@ $as_echo_n "checking whether the shell understands some XSI constructs... " >&6; + # Try some XSI features + xsi_shell=no + ( _lt_dummy="a/b/c" +- test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \ +- = c,a/b,, \ ++ test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ ++ = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +@@ -5054,6 +5060,80 @@ esac + + + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 ++$as_echo_n "checking how to convert $build file names to $host format... " >&6; } ++if ${lt_cv_to_host_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ++ ;; ++ esac ++ ;; ++ *-*-cygwin* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ++ ;; ++ *-*-cygwin* ) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++ * ) # otherwise, assume *nix ++ lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ++ ;; ++ esac ++ ;; ++ * ) # unhandled hosts (and "normal" native builds) ++ lt_cv_to_host_file_cmd=func_convert_file_noop ++ ;; ++esac ++ ++fi ++ ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 ++$as_echo "$lt_cv_to_host_file_cmd" >&6; } ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 ++$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } ++if ${lt_cv_to_tool_file_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ #assume ordinary cross tools, or native build. ++lt_cv_to_tool_file_cmd=func_convert_file_noop ++case $host in ++ *-*-mingw* ) ++ case $build in ++ *-*-mingw* ) # actually msys ++ lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ++ ;; ++ esac ++ ;; ++esac ++ ++fi ++ ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 ++$as_echo "$lt_cv_to_tool_file_cmd" >&6; } ++ ++ ++ ++ ++ + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 + $as_echo_n "checking for $LD option to reload object files... " >&6; } + if ${lt_cv_ld_reload_flag+:} false; then : +@@ -5070,6 +5150,11 @@ case $reload_flag in + esac + reload_cmds='$LD$reload_flag -o $output$reload_objs' + case $host_os in ++ cygwin* | mingw* | pw32* | cegcc*) ++ if test "$GCC" != yes; then ++ reload_cmds=false ++ fi ++ ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' +@@ -5238,7 +5323,8 @@ mingw* | pw32*) + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else +- lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?' ++ # Keep this pattern in sync with the one in func_win32_libid. ++ lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; +@@ -5392,6 +5478,21 @@ esac + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 + $as_echo "$lt_cv_deplibs_check_method" >&6; } ++ ++file_magic_glob= ++want_nocaseglob=no ++if test "$build" = "$host"; then ++ case $host_os in ++ mingw* | pw32*) ++ if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then ++ want_nocaseglob=yes ++ else ++ file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` ++ fi ++ ;; ++ esac ++fi ++ + file_magic_cmd=$lt_cv_file_magic_cmd + deplibs_check_method=$lt_cv_deplibs_check_method + test -z "$deplibs_check_method" && deplibs_check_method=unknown +@@ -5405,11 +5506,165 @@ test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ + + + if test -n "$ac_tool_prefix"; then +- # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args. +-set dummy ${ac_tool_prefix}ar; ac_word=$2 ++ # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. ++set dummy ${ac_tool_prefix}dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$DLLTOOL"; then ++ ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++DLLTOOL=$ac_cv_prog_DLLTOOL ++if test -n "$DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 ++$as_echo "$DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_DLLTOOL"; then ++ ac_ct_DLLTOOL=$DLLTOOL ++ # Extract the first word of "dlltool", so it can be a program name with args. ++set dummy dlltool; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_DLLTOOL"; then ++ ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_DLLTOOL="dlltool" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL ++if test -n "$ac_ct_DLLTOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 ++$as_echo "$ac_ct_DLLTOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_DLLTOOL" = x; then ++ DLLTOOL="false" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ DLLTOOL=$ac_ct_DLLTOOL ++ fi ++else ++ DLLTOOL="$ac_cv_prog_DLLTOOL" ++fi ++ ++test -z "$DLLTOOL" && DLLTOOL=dlltool ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 ++$as_echo_n "checking how to associate runtime and link libraries... " >&6; } ++if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_sharedlib_from_linklib_cmd='unknown' ++ ++case $host_os in ++cygwin* | mingw* | pw32* | cegcc*) ++ # two different shell functions defined in ltmain.sh ++ # decide which to use based on capabilities of $DLLTOOL ++ case `$DLLTOOL --help 2>&1` in ++ *--identify-strict*) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ++ ;; ++ *) ++ lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ++ ;; ++ esac ++ ;; ++*) ++ # fallback: assume linklib IS sharedlib ++ lt_cv_sharedlib_from_linklib_cmd="$ECHO" ++ ;; ++esac ++ ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 ++$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } ++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd ++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO ++ ++ ++ ++ ++ ++ ++ ++ ++if test -n "$ac_tool_prefix"; then ++ for ac_prog in ar ++ do ++ # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. ++set dummy $ac_tool_prefix$ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_AR+:} false; then : +@@ -5425,7 +5680,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_AR="${ac_tool_prefix}ar" ++ ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -5445,11 +5700,15 @@ $as_echo "no" >&6; } + fi + + ++ test -n "$AR" && break ++ done + fi +-if test -z "$ac_cv_prog_AR"; then ++if test -z "$AR"; then + ac_ct_AR=$AR +- # Extract the first word of "ar", so it can be a program name with args. +-set dummy ar; ac_word=$2 ++ for ac_prog in ar ++do ++ # Extract the first word of "$ac_prog", so it can be a program name with args. ++set dummy $ac_prog; ac_word=$2 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 + $as_echo_n "checking for $ac_word... " >&6; } + if ${ac_cv_prog_ac_ct_AR+:} false; then : +@@ -5465,7 +5724,7 @@ do + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then +- ac_cv_prog_ac_ct_AR="ar" ++ ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +@@ -5484,6 +5743,10 @@ else + $as_echo "no" >&6; } + fi + ++ ++ test -n "$ac_ct_AR" && break ++done ++ + if test "x$ac_ct_AR" = x; then + AR="false" + else +@@ -5495,16 +5758,72 @@ ac_tool_warned=yes ;; + esac + AR=$ac_ct_AR + fi +-else +- AR="$ac_cv_prog_AR" + fi + +-test -z "$AR" && AR=ar +-test -z "$AR_FLAGS" && AR_FLAGS=cru ++: ${AR=ar} ++: ${AR_FLAGS=cru} ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 ++$as_echo_n "checking for archiver @FILE support... " >&6; } ++if ${lt_cv_ar_at_file+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_ar_at_file=no ++ cat confdefs.h - <<_ACEOF >conftest.$ac_ext ++/* end confdefs.h. */ ++ ++int ++main () ++{ + ++ ; ++ return 0; ++} ++_ACEOF ++if ac_fn_c_try_compile "$LINENO"; then : ++ echo conftest.$ac_objext > conftest.lst ++ lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -eq 0; then ++ # Ensure the archiver fails upon bogus file names. ++ rm -f conftest.$ac_objext libconftest.a ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 ++ (eval $lt_ar_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; } ++ if test "$ac_status" -ne 0; then ++ lt_cv_ar_at_file=@ ++ fi ++ fi ++ rm -f conftest.* libconftest.a + ++fi ++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 ++$as_echo "$lt_cv_ar_at_file" >&6; } + ++if test "x$lt_cv_ar_at_file" = xno; then ++ archiver_list_spec= ++else ++ archiver_list_spec=$lt_cv_ar_at_file ++fi + + + +@@ -5846,8 +6165,8 @@ esac + lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + + # Transform an extracted symbol line into symbol name and symbol address +-lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" ++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + + # Handle CRLF in mingw tool chain + opt_cr= +@@ -5883,6 +6202,7 @@ for ac_symprfx in "" "_"; do + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi ++ lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no +@@ -5924,6 +6244,18 @@ _LT_EOF + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext ++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ ++#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) ++/* DATA imports from DLLs on WIN32 con't be const, because runtime ++ relocations are performed -- see ld's documentation on pseudo-relocs. */ ++# define LT_DLSYM_CONST ++#elif defined(__osf__) ++/* This system does not cope well with relocations in const data. */ ++# define LT_DLSYM_CONST ++#else ++# define LT_DLSYM_CONST const ++#endif ++ + #ifdef __cplusplus + extern "C" { + #endif +@@ -5935,7 +6267,7 @@ _LT_EOF + cat <<_LT_EOF >> conftest.$ac_ext + + /* The mapping between symbol names and symbols. */ +-const struct { ++LT_DLSYM_CONST struct { + const char *name; + void *address; + } +@@ -5961,8 +6293,8 @@ static const void *lt_preloaded_setup() { + _LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext +- lt_save_LIBS="$LIBS" +- lt_save_CFLAGS="$CFLAGS" ++ lt_globsym_save_LIBS=$LIBS ++ lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 +@@ -5972,8 +6304,8 @@ _LT_EOF + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi +- LIBS="$lt_save_LIBS" +- CFLAGS="$lt_save_CFLAGS" ++ LIBS=$lt_globsym_save_LIBS ++ CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi +@@ -6010,6 +6342,19 @@ else + $as_echo "ok" >&6; } + fi + ++# Response file support. ++if test "$lt_cv_nm_interface" = "MS dumpbin"; then ++ nm_file_list_spec='@' ++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then ++ nm_file_list_spec='@' ++fi ++ ++ ++ ++ ++ ++ ++ + + + +@@ -6027,6 +6372,41 @@ fi + + + ++ ++ ++ ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 ++$as_echo_n "checking for sysroot... " >&6; } ++ ++# Check whether --with-libtool-sysroot was given. ++if test "${with_libtool_sysroot+set}" = set; then : ++ withval=$with_libtool_sysroot; ++else ++ with_libtool_sysroot=no ++fi ++ ++ ++lt_sysroot= ++case ${with_libtool_sysroot} in #( ++ yes) ++ if test "$GCC" = yes; then ++ lt_sysroot=`$CC --print-sysroot 2>/dev/null` ++ fi ++ ;; #( ++ /*) ++ lt_sysroot=`echo "$with_libtool_sysroot" | sed -e "$sed_quote_subst"` ++ ;; #( ++ no|'') ++ ;; #( ++ *) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_libtool_sysroot}" >&5 ++$as_echo "${with_libtool_sysroot}" >&6; } ++ as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ++ ;; ++esac ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 ++$as_echo "${lt_sysroot:-no}" >&6; } + + + +@@ -6241,6 +6621,123 @@ esac + + need_locks="$enable_libtool_lock" + ++if test -n "$ac_tool_prefix"; then ++ # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. ++set dummy ${ac_tool_prefix}mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$MANIFEST_TOOL"; then ++ ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL ++if test -n "$MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 ++$as_echo "$MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ ++fi ++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ++ ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL ++ # Extract the first word of "mt", so it can be a program name with args. ++set dummy mt; ac_word=$2 ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 ++$as_echo_n "checking for $ac_word... " >&6; } ++if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test -n "$ac_ct_MANIFEST_TOOL"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. ++else ++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR ++for as_dir in $PATH ++do ++ IFS=$as_save_IFS ++ test -z "$as_dir" && as_dir=. ++ for ac_exec_ext in '' $ac_executable_extensions; do ++ if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ++ ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" ++ $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 ++ break 2 ++ fi ++done ++ done ++IFS=$as_save_IFS ++ ++fi ++fi ++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL ++if test -n "$ac_ct_MANIFEST_TOOL"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 ++$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } ++else ++ { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 ++$as_echo "no" >&6; } ++fi ++ ++ if test "x$ac_ct_MANIFEST_TOOL" = x; then ++ MANIFEST_TOOL=":" ++ else ++ case $cross_compiling:$ac_tool_warned in ++yes:) ++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 ++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ++ac_tool_warned=yes ;; ++esac ++ MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL ++ fi ++else ++ MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" ++fi ++ ++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 ++$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } ++if ${lt_cv_path_mainfest_tool+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_path_mainfest_tool=no ++ echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 ++ $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out ++ cat conftest.err >&5 ++ if $GREP 'Manifest Tool' conftest.out > /dev/null; then ++ lt_cv_path_mainfest_tool=yes ++ fi ++ rm -f conftest* ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 ++$as_echo "$lt_cv_path_mainfest_tool" >&6; } ++if test "x$lt_cv_path_mainfest_tool" != xyes; then ++ MANIFEST_TOOL=: ++fi ++ ++ ++ ++ ++ + + case $host_os in + rhapsody* | darwin*) +@@ -6807,6 +7304,8 @@ _LT_EOF + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 ++ echo "$RANLIB libconftest.a" >&5 ++ $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF + int main() { return 0;} + _LT_EOF +@@ -7269,7 +7768,8 @@ fi + LIBTOOL_DEPS="$ltmain" + + # Always use our own libtool. +-LIBTOOL='$(SHELL) $(top_builddir)/libtool' ++LIBTOOL='$(SHELL) $(top_builddir)' ++LIBTOOL="$LIBTOOL/${host_alias}-libtool" + + + +@@ -7358,7 +7858,7 @@ aix3*) + esac + + # Global variables: +-ofile=libtool ++ofile=${host_alias}-libtool + can_build_shared=yes + + # All known linkers require a `.a' archive for static linking (except MSVC, +@@ -7656,8 +8156,6 @@ fi + lt_prog_compiler_pic= + lt_prog_compiler_static= + +-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +-$as_echo_n "checking for $compiler option to produce PIC... " >&6; } + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' +@@ -7823,6 +8321,12 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; ++ nagfor*) ++ # NAG Fortran compiler ++ lt_prog_compiler_wl='-Wl,-Wl,,' ++ lt_prog_compiler_pic='-PIC' ++ lt_prog_compiler_static='-Bstatic' ++ ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) +@@ -7885,7 +8389,7 @@ $as_echo_n "checking for $compiler option to produce PIC... " >&6; } + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in +- f77* | f90* | f95*) ++ f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; +@@ -7942,13 +8446,17 @@ case $host_os in + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; + esac +-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5 +-$as_echo "$lt_prog_compiler_pic" >&6; } +- +- +- +- + ++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 ++$as_echo_n "checking for $compiler option to produce PIC... " >&6; } ++if ${lt_cv_prog_compiler_pic+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ lt_cv_prog_compiler_pic=$lt_prog_compiler_pic ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 ++$as_echo "$lt_cv_prog_compiler_pic" >&6; } ++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + + # + # Check to make sure the PIC flag actually works. +@@ -8009,6 +8517,11 @@ fi + + + ++ ++ ++ ++ ++ + # + # Check to make sure the static flag actually works. + # +@@ -8359,7 +8872,8 @@ _LT_EOF + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes +- export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' ++ exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' +@@ -8458,12 +8972,12 @@ _LT_EOF + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec= + hardcode_libdir_flag_spec_ld='-rpath $libdir' +- archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib' ++ archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ +- $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' ++ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac +@@ -8477,8 +8991,8 @@ _LT_EOF + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + +@@ -8496,8 +9010,8 @@ _LT_EOF + + _LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -8543,8 +9057,8 @@ _LT_EOF + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi +@@ -8674,7 +9188,13 @@ _LT_EOF + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. +- if test x$gcc_no_link = xyes; then ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test x$gcc_no_link = xyes; then + as_fn_error $? "Link tests are not allowed after GCC_NO_EXECUTABLES." "$LINENO" 5 + fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -8690,22 +9210,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" +@@ -8717,7 +9244,13 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + else + # Determine the default libpath from the value encoded in an + # empty executable. +- if test x$gcc_no_link = xyes; then ++ if test "${lt_cv_aix_libpath+set}" = set; then ++ aix_libpath=$lt_cv_aix_libpath ++else ++ if ${lt_cv_aix_libpath_+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ if test x$gcc_no_link = xyes; then + as_fn_error $? "Link tests are not allowed after GCC_NO_EXECUTABLES." "$LINENO" 5 + fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +@@ -8733,22 +9266,29 @@ main () + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : + +-lt_aix_libpath_sed=' +- /Import File Strings/,/^$/ { +- /^0/ { +- s/^0 *\(.*\)$/\1/ +- p +- } +- }' +-aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-# Check for a 64-bit object if we didn't find anything. +-if test -z "$aix_libpath"; then +- aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` +-fi ++ lt_aix_libpath_sed=' ++ /Import File Strings/,/^$/ { ++ /^0/ { ++ s/^0 *\([^ ]*\) *$/\1/ ++ p ++ } ++ }' ++ lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ # Check for a 64-bit object if we didn't find anything. ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` ++ fi + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +-if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi ++ if test -z "$lt_cv_aix_libpath_"; then ++ lt_cv_aix_libpath_="/usr/lib:/lib" ++ fi ++ ++fi ++ ++ aix_libpath=$lt_cv_aix_libpath_ ++fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, +@@ -8793,20 +9333,63 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. +- hardcode_libdir_flag_spec=' ' +- allow_undefined_flag=unsupported +- # Tell ltmain to make .lib files, not .a files. +- libext=lib +- # Tell ltmain to make .dll files, not .so files. +- shrext_cmds=".dll" +- # FIXME: Setting linknames here is a bad hack. +- archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' +- # The linker will automatically build a .lib file if we build a DLL. +- old_archive_from_new_cmds='true' +- # FIXME: Should let the user specify the lib program. +- old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' +- fix_srcfile_path='`cygpath -w "$srcfile"`' +- enable_shared_with_static_runtimes=yes ++ case $cc_basename in ++ cl*) ++ # Native MSVC ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ always_export_symbols=yes ++ file_list_spec='@' ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' ++ archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then ++ sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; ++ else ++ sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; ++ fi~ ++ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ ++ linknames=' ++ # The linker will not automatically build a static lib if we build a DLL. ++ # _LT_TAGVAR(old_archive_from_new_cmds, )='true' ++ enable_shared_with_static_runtimes=yes ++ export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' ++ # Don't use ranlib ++ old_postinstall_cmds='chmod 644 $oldlib' ++ postlink_cmds='lt_outputfile="@OUTPUT@"~ ++ lt_tool_outputfile="@TOOL_OUTPUT@"~ ++ case $lt_outputfile in ++ *.exe|*.EXE) ;; ++ *) ++ lt_outputfile="$lt_outputfile.exe" ++ lt_tool_outputfile="$lt_tool_outputfile.exe" ++ ;; ++ esac~ ++ if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then ++ $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; ++ $RM "$lt_outputfile.manifest"; ++ fi' ++ ;; ++ *) ++ # Assume MSVC wrapper ++ hardcode_libdir_flag_spec=' ' ++ allow_undefined_flag=unsupported ++ # Tell ltmain to make .lib files, not .a files. ++ libext=lib ++ # Tell ltmain to make .dll files, not .so files. ++ shrext_cmds=".dll" ++ # FIXME: Setting linknames here is a bad hack. ++ archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' ++ # The linker will automatically build a .lib file if we build a DLL. ++ old_archive_from_new_cmds='true' ++ # FIXME: Should let the user specify the lib program. ++ old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' ++ enable_shared_with_static_runtimes=yes ++ ;; ++ esac + ;; + + darwin* | rhapsody*) +@@ -8867,7 +9450,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) +- archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no +@@ -8875,7 +9458,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux9*) + if test "$GCC" = yes; then +- archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' ++ archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi +@@ -8891,7 +9474,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi +@@ -8915,10 +9498,10 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) +- archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else +@@ -8997,26 +9580,39 @@ fi + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then +- archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" +- if test x$gcc_no_link = xyes; then ++ # This should be the same for all languages, so no per-tag cache variable. ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 ++$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } ++if ${lt_cv_irix_exported_symbol+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" ++ if test x$gcc_no_link = xyes; then + as_fn_error $? "Link tests are not allowed after GCC_NO_EXECUTABLES." "$LINENO" 5 + fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +-int foo(void) {} ++int foo (void) { return 0; } + _ACEOF + if ac_fn_c_try_link "$LINENO"; then : +- archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' +- ++ lt_cv_irix_exported_symbol=yes ++else ++ lt_cv_irix_exported_symbol=no + fi + rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +- LDFLAGS="$save_LDFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 ++$as_echo "$lt_cv_irix_exported_symbol" >&6; } ++ if test "$lt_cv_irix_exported_symbol" = yes; then ++ archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' ++ fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' +@@ -9101,7 +9697,7 @@ rm -f core conftest.err conftest.$ac_objext \ + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' +- archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' ++ archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' +@@ -9120,9 +9716,9 @@ rm -f core conftest.err conftest.$ac_objext \ + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' +- archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' ++ archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ +- $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ++ $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) +@@ -9698,8 +10294,9 @@ cygwin* | mingw* | pw32* | cegcc*) + need_version=no + need_lib_prefix=no + +- case $GCC,$host_os in +- yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*) ++ case $GCC,$cc_basename in ++ yes,*) ++ # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ +@@ -9732,13 +10329,71 @@ cygwin* | mingw* | pw32* | cegcc*) + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac ++ dynamic_linker='Win32 ld.exe' ++ ;; ++ ++ *,cl*) ++ # Native MSVC ++ libname_spec='$name' ++ soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' ++ library_names_spec='${libname}.dll.lib' ++ ++ case $build_os in ++ mingw*) ++ sys_lib_search_path_spec= ++ lt_save_ifs=$IFS ++ IFS=';' ++ for lt_path in $LIB ++ do ++ IFS=$lt_save_ifs ++ # Let DOS variable expansion print the short 8.3 style file name. ++ lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` ++ sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" ++ done ++ IFS=$lt_save_ifs ++ # Convert to MSYS style. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ++ ;; ++ cygwin*) ++ # Convert to unix form, then to dos form, then back to unix form ++ # but this time dos style (no spaces!) so that the unix form looks ++ # like /cygdrive/c/PROGRA~1:/cygdr... ++ sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` ++ sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` ++ sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ ;; ++ *) ++ sys_lib_search_path_spec="$LIB" ++ if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then ++ # It is most probably a Windows format PATH. ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` ++ else ++ sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ++ fi ++ # FIXME: find the short name or the path components, as spaces are ++ # common. (e.g. "Program Files" -> "PROGRA~1") ++ ;; ++ esac ++ ++ # DLL is installed to $(libdir)/../bin by postinstall_cmds ++ postinstall_cmds='base_file=`basename \${file}`~ ++ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ ++ dldir=$destdir/`dirname \$dlpath`~ ++ test -d \$dldir || mkdir -p \$dldir~ ++ $install_prog $dir/$dlname \$dldir/$dlname' ++ postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ ++ dlpath=$dir/\$dldll~ ++ $RM \$dlpath' ++ shlibpath_overrides_runpath=yes ++ dynamic_linker='Win32 link.exe' + ;; + + *) ++ # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' ++ dynamic_linker='Win32 ld.exe' + ;; + esac +- dynamic_linker='Win32 ld.exe' + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; +@@ -9830,7 +10485,7 @@ haiku*) + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes +- sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib' ++ sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +@@ -10644,7 +11299,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10647 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10688,10 +11343,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -10750,7 +11405,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10753 "configure" ++#line $LINENO "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10794,10 +11449,10 @@ else + /* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ + #if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +-void fnord () __attribute__((visibility("default"))); ++int fnord () __attribute__((visibility("default"))); + #endif + +-void fnord () { int i=42; } ++int fnord () { return 42; } + int main () + { + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); +@@ -12267,13 +12922,20 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' + lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' + lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' + lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' ++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' + reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' + reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' + OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' + deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' + file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' ++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' ++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' ++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' ++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' + AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' + AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' ++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' + STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' + RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' + old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +@@ -12288,14 +12950,17 @@ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$de + lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' ++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' ++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' + objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' + MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +-lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' ++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' + lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' + lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' + need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' ++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' + DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' + NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' + LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +@@ -12328,12 +12993,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q + hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' + inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' + link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +-fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`' + always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' + export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' + exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' + include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' + prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' ++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' + file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' + variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' + need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +@@ -12388,8 +13053,13 @@ reload_flag \ + OBJDUMP \ + deplibs_check_method \ + file_magic_cmd \ ++file_magic_glob \ ++want_nocaseglob \ ++DLLTOOL \ ++sharedlib_from_linklib_cmd \ + AR \ + AR_FLAGS \ ++archiver_list_spec \ + STRIP \ + RANLIB \ + CC \ +@@ -12399,12 +13069,14 @@ lt_cv_sys_global_symbol_pipe \ + lt_cv_sys_global_symbol_to_cdecl \ + lt_cv_sys_global_symbol_to_c_name_address \ + lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ ++nm_file_list_spec \ + lt_prog_compiler_no_builtin_flag \ +-lt_prog_compiler_wl \ + lt_prog_compiler_pic \ ++lt_prog_compiler_wl \ + lt_prog_compiler_static \ + lt_cv_prog_compiler_c_o \ + need_locks \ ++MANIFEST_TOOL \ + DSYMUTIL \ + NMEDIT \ + LIPO \ +@@ -12420,7 +13092,6 @@ no_undefined_flag \ + hardcode_libdir_flag_spec \ + hardcode_libdir_flag_spec_ld \ + hardcode_libdir_separator \ +-fix_srcfile_path \ + exclude_expsyms \ + include_expsyms \ + file_list_spec \ +@@ -12456,6 +13127,7 @@ module_cmds \ + module_expsym_cmds \ + export_symbols_cmds \ + prelink_cmds \ ++postlink_cmds \ + postinstall_cmds \ + postuninstall_cmds \ + finish_cmds \ +@@ -13054,7 +13726,8 @@ $as_echo X"$file" | + # NOTE: Changes made to this file will be lost: look at ltmain.sh. + # + # Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +-# 2006, 2007, 2008, 2009 Free Software Foundation, Inc. ++# 2006, 2007, 2008, 2009, 2010 Free Software Foundation, ++# Inc. + # Written by Gordon Matzigkeit, 1996 + # + # This file is part of GNU Libtool. +@@ -13157,19 +13830,42 @@ SP2NL=$lt_lt_SP2NL + # turn newlines into spaces. + NL2SP=$lt_lt_NL2SP + ++# convert \$build file names to \$host format. ++to_host_file_cmd=$lt_cv_to_host_file_cmd ++ ++# convert \$build files to toolchain format. ++to_tool_file_cmd=$lt_cv_to_tool_file_cmd ++ + # An object symbol dumper. + OBJDUMP=$lt_OBJDUMP + + # Method to check whether dependent libraries are shared objects. + deplibs_check_method=$lt_deplibs_check_method + +-# Command to use when deplibs_check_method == "file_magic". ++# Command to use when deplibs_check_method = "file_magic". + file_magic_cmd=$lt_file_magic_cmd + ++# How to find potential files when deplibs_check_method = "file_magic". ++file_magic_glob=$lt_file_magic_glob ++ ++# Find potential files using nocaseglob when deplibs_check_method = "file_magic". ++want_nocaseglob=$lt_want_nocaseglob ++ ++# DLL creation program. ++DLLTOOL=$lt_DLLTOOL ++ ++# Command to associate shared and link libraries. ++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd ++ + # The archiver. + AR=$lt_AR ++ ++# Flags to create an archive. + AR_FLAGS=$lt_AR_FLAGS + ++# How to feed a file listing to the archiver. ++archiver_list_spec=$lt_archiver_list_spec ++ + # A symbol stripping program. + STRIP=$lt_STRIP + +@@ -13199,6 +13895,12 @@ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + # Transform the output of nm in a C name address pair when lib prefix is needed. + global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + ++# Specify filename containing input files for \$NM. ++nm_file_list_spec=$lt_nm_file_list_spec ++ ++# The root where to search for dependent libraries,and in which our libraries should be installed. ++lt_sysroot=$lt_sysroot ++ + # The name of the directory that contains temporary libtool files. + objdir=$objdir + +@@ -13208,6 +13910,9 @@ MAGIC_CMD=$MAGIC_CMD + # Must we lock files when doing compilation? + need_locks=$lt_need_locks + ++# Manifest tool. ++MANIFEST_TOOL=$lt_MANIFEST_TOOL ++ + # Tool to manipulate archived DWARF debug symbol files on Mac OS X. + DSYMUTIL=$lt_DSYMUTIL + +@@ -13322,12 +14027,12 @@ with_gcc=$GCC + # Compiler flag to turn off builtin functions. + no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +-# How to pass a linker flag through the compiler. +-wl=$lt_lt_prog_compiler_wl +- + # Additional compiler flags for building library objects. + pic_flag=$lt_lt_prog_compiler_pic + ++# How to pass a linker flag through the compiler. ++wl=$lt_lt_prog_compiler_wl ++ + # Compiler flag to prevent dynamic linking. + link_static_flag=$lt_lt_prog_compiler_static + +@@ -13414,9 +14119,6 @@ inherit_rpath=$inherit_rpath + # Whether libtool must link a program against all its dependency libraries. + link_all_deplibs=$link_all_deplibs + +-# Fix the shell variable \$srcfile for the compiler. +-fix_srcfile_path=$lt_fix_srcfile_path +- + # Set to "yes" if exported symbols are required. + always_export_symbols=$always_export_symbols + +@@ -13432,6 +14134,9 @@ include_expsyms=$lt_include_expsyms + # Commands necessary for linking programs (against libraries) with templates. + prelink_cmds=$lt_prelink_cmds + ++# Commands necessary for finishing linking programs. ++postlink_cmds=$lt_postlink_cmds ++ + # Specify filename containing input files. + file_list_spec=$lt_file_list_spec + +@@ -13464,210 +14169,169 @@ ltmain="$ac_aux_dir/ltmain.sh" + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? +- sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- case $xsi_shell in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result="${1##*/}" +-} +- +-# func_dirname_and_basename file append nondir_replacement +-# perform func_basename and func_dirname in a single function +-# call: +-# dirname: Compute the dirname of FILE. If nonempty, +-# add APPEND to the result, otherwise set result +-# to NONDIR_REPLACEMENT. +-# value returned in "$func_dirname_result" +-# basename: Compute filename of FILE. +-# value retuned in "$func_basename_result" +-# Implementation must be kept synchronized with func_dirname +-# and func_basename. For efficiency, we do not delegate to +-# those functions but instead duplicate the functionality here. +-func_dirname_and_basename () +-{ +- case ${1} in +- */*) func_dirname_result="${1%/*}${2}" ;; +- * ) func_dirname_result="${3}" ;; +- esac +- func_basename_result="${1##*/}" +-} +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-func_stripname () +-{ +- # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are +- # positional parameters, so assign one to ordinary parameter first. +- func_stripname_result=${3} +- func_stripname_result=${func_stripname_result#"${1}"} +- func_stripname_result=${func_stripname_result%"${2}"} +-} +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=${1%%=*} +- func_opt_split_arg=${1#*=} +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- case ${1} in +- *.lo) func_lo2o_result=${1%.lo}.${objext} ;; +- *) func_lo2o_result=${1} ;; +- esac +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=${1%.*}.lo +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=$(( $* )) +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=${#1} +-} +- +-_LT_EOF +- ;; +- *) # Bourne compatible functions. +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_dirname file append nondir_replacement +-# Compute the dirname of FILE. If nonempty, add APPEND to the result, +-# otherwise set result to NONDIR_REPLACEMENT. +-func_dirname () +-{ +- # Extract subdirectory from the argument. +- func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` +- if test "X$func_dirname_result" = "X${1}"; then +- func_dirname_result="${3}" +- else +- func_dirname_result="$func_dirname_result${2}" +- fi +-} +- +-# func_basename file +-func_basename () +-{ +- func_basename_result=`$ECHO "${1}" | $SED "$basename"` +-} +- +- +-# func_stripname prefix suffix name +-# strip PREFIX and SUFFIX off of NAME. +-# PREFIX and SUFFIX must not contain globbing or regex special +-# characters, hashes, percent signs, but SUFFIX may contain a leading +-# dot (in which case that matches only a dot). +-# func_strip_suffix prefix name +-func_stripname () +-{ +- case ${2} in +- .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; +- *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; +- esac +-} +- +-# sed scripts: +-my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q' +-my_sed_long_arg='1s/^-[^=]*=//' +- +-# func_opt_split +-func_opt_split () +-{ +- func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"` +- func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"` +-} +- +-# func_lo2o object +-func_lo2o () +-{ +- func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +-} +- +-# func_xform libobj-or-source +-func_xform () +-{ +- func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +-} +- +-# func_arith arithmetic-term... +-func_arith () +-{ +- func_arith_result=`expr "$@"` +-} +- +-# func_len string +-# STRING may not start with a hyphen. +-func_len () +-{ +- func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` +-} +- +-_LT_EOF +-esac +- +-case $lt_shell_append in +- yes) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1+=\$2" +-} +-_LT_EOF +- ;; +- *) +- cat << \_LT_EOF >> "$cfgfile" +- +-# func_append var value +-# Append VALUE to the end of shell variable VAR. +-func_append () +-{ +- eval "$1=\$$1\$2" +-} +- +-_LT_EOF +- ;; +- esac +- +- +- sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \ +- || (rm -f "$cfgfile"; exit 1) +- +- mv -f "$cfgfile" "$ofile" || ++ sed '$q' "$ltmain" >> "$cfgfile" \ ++ || (rm -f "$cfgfile"; exit 1) ++ ++ if test x"$xsi_shell" = xyes; then ++ sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ ++func_dirname ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_basename ()$/,/^} # func_basename /c\ ++func_basename ()\ ++{\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ ++func_dirname_and_basename ()\ ++{\ ++\ case ${1} in\ ++\ */*) func_dirname_result="${1%/*}${2}" ;;\ ++\ * ) func_dirname_result="${3}" ;;\ ++\ esac\ ++\ func_basename_result="${1##*/}"\ ++} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ ++func_stripname ()\ ++{\ ++\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ ++\ # positional parameters, so assign one to ordinary parameter first.\ ++\ func_stripname_result=${3}\ ++\ func_stripname_result=${func_stripname_result#"${1}"}\ ++\ func_stripname_result=${func_stripname_result%"${2}"}\ ++} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ ++func_split_long_opt ()\ ++{\ ++\ func_split_long_opt_name=${1%%=*}\ ++\ func_split_long_opt_arg=${1#*=}\ ++} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ ++func_split_short_opt ()\ ++{\ ++\ func_split_short_opt_arg=${1#??}\ ++\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ ++} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ ++func_lo2o ()\ ++{\ ++\ case ${1} in\ ++\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ ++\ *) func_lo2o_result=${1} ;;\ ++\ esac\ ++} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_xform ()$/,/^} # func_xform /c\ ++func_xform ()\ ++{\ ++ func_xform_result=${1%.*}.lo\ ++} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_arith ()$/,/^} # func_arith /c\ ++func_arith ()\ ++{\ ++ func_arith_result=$(( $* ))\ ++} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_len ()$/,/^} # func_len /c\ ++func_len ()\ ++{\ ++ func_len_result=${#1}\ ++} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++fi ++ ++if test x"$lt_shell_append" = xyes; then ++ sed -e '/^func_append ()$/,/^} # func_append /c\ ++func_append ()\ ++{\ ++ eval "${1}+=\\${2}"\ ++} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ ++func_append_quoted ()\ ++{\ ++\ func_quote_for_eval "${2}"\ ++\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ ++} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++test 0 -eq $? || _lt_function_replace_fail=: ++ ++ ++ # Save a `func_append' function call where possible by direct use of '+=' ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++else ++ # Save a `func_append' function call even when '+=' is not available ++ sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ ++ && mv -f "$cfgfile.tmp" "$cfgfile" \ ++ || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") ++ test 0 -eq $? || _lt_function_replace_fail=: ++fi ++ ++if test x"$_lt_function_replace_fail" = x":"; then ++ { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 ++$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} ++fi ++ ++ ++ mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0013-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch b/poky/meta/recipes-devtools/binutils/binutils/0013-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch deleted file mode 100644 index 463de8063..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0013-fix-the-incorrect-assembling-for-ppc-wait-mnemonic.patch +++ /dev/null @@ -1,33 +0,0 @@ -From ef4ad1cb9ff1b5a871ffa792a71b3ad6d14eb3dc Mon Sep 17 00:00:00 2001 -From: Zhenhua Luo -Date: Sat, 11 Jun 2016 22:08:29 -0500 -Subject: [PATCH] fix the incorrect assembling for ppc wait mnemonic - -Signed-off-by: Zhenhua Luo - -Upstream-Status: Pending ---- - opcodes/ppc-opc.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - -diff --git a/opcodes/ppc-opc.c b/opcodes/ppc-opc.c -index 7ef91d819b..145953d3c4 100644 ---- a/opcodes/ppc-opc.c -+++ b/opcodes/ppc-opc.c -@@ -5709,7 +5709,6 @@ const struct powerpc_opcode powerpc_opcodes[] = { - {"ldepx", X(31,29), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, - - {"waitasec", X(31,30), XRTRARB_MASK, POWER8, POWER9, {0}}, --{"wait", X(31,30), XWC_MASK, POWER9, 0, {WC}}, - - {"lwepx", X(31,31), X_MASK, E500MC|PPCA2, 0, {RT, RA0, RB}}, - -@@ -5763,7 +5762,7 @@ const struct powerpc_opcode powerpc_opcodes[] = { - - {"waitrsv", X(31,62)|(1<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, - {"waitimpl", X(31,62)|(2<<21), 0xffffffff, E500MC|PPCA2, 0, {0}}, --{"wait", X(31,62), XWC_MASK, E500MC|PPCA2, 0, {WC}}, -+{"wait", X(31,62), XWC_MASK, E500MC|PPCA2|POWER9, 0, {WC}}, - - {"dcbstep", XRT(31,63,0), XRT_MASK, E500MC|PPCA2, 0, {RA0, RB}}, - diff --git a/poky/meta/recipes-devtools/binutils/binutils/0014-Detect-64-bit-MIPS-targets.patch b/poky/meta/recipes-devtools/binutils/binutils/0014-Detect-64-bit-MIPS-targets.patch deleted file mode 100644 index 6acde1f78..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0014-Detect-64-bit-MIPS-targets.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 2f7f2389764ef1d699f6ad32057314024b7e84e7 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 31 Mar 2017 11:42:03 -0700 -Subject: [PATCH] Detect 64-bit MIPS targets - -Add mips64 target triplets and default to N64 - -Upstream-Status: Submitted -https://sourceware.org/ml/binutils/2016-08/msg00048.html - -Signed-off-by: Khem Raj ---- - gold/configure.tgt | 14 ++++++++++++++ - 1 file changed, 14 insertions(+) - -diff --git a/gold/configure.tgt b/gold/configure.tgt -index aa7ec552ae..470515062e 100644 ---- a/gold/configure.tgt -+++ b/gold/configure.tgt -@@ -153,6 +153,13 @@ aarch64*-*) - targ_big_endian=false - targ_extra_big_endian=true - ;; -+mips*64*el*-*-*|mips*64*le*-*-*) -+ targ_obj=mips -+ targ_machine=EM_MIPS_RS3_LE -+ targ_size=64 -+ targ_big_endian=false -+ targ_extra_big_endian=true -+ ;; - mips*el*-*-*|mips*le*-*-*) - targ_obj=mips - targ_machine=EM_MIPS_RS3_LE -@@ -160,6 +167,13 @@ mips*el*-*-*|mips*le*-*-*) - targ_big_endian=false - targ_extra_big_endian=true - ;; -+mips*64*-*-*) -+ targ_obj=mips -+ targ_machine=EM_MIPS -+ targ_size=64 -+ targ_big_endian=true -+ targ_extra_big_endian=false -+ ;; - mips*-*-*) - targ_obj=mips - targ_machine=EM_MIPS diff --git a/poky/meta/recipes-devtools/binutils/binutils/0014-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch b/poky/meta/recipes-devtools/binutils/binutils/0014-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch new file mode 100644 index 000000000..69a238378 --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils/0014-Fix-rpath-in-libtool-when-sysroot-is-enabled.patch @@ -0,0 +1,52 @@ +From f791a5d84475c02356f16679b7f4ee9c9c3408aa Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 2 Mar 2015 01:42:38 +0000 +Subject: [PATCH 14/17] Fix rpath in libtool when sysroot is enabled + +Enabling sysroot support in libtool exposed a bug where the final +library had an RPATH encoded into it which still pointed to the +sysroot. This works around the issue until it gets sorted out +upstream. + +Fix suggested by Richard Purdie + +Upstream-Status: Inappropriate [embedded specific] + +Signed-off-by: Scott Garman +Signed-off-by: Khem Raj +--- + ltmain.sh | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +diff --git a/ltmain.sh b/ltmain.sh +index 70e856e0659..11ee684cccf 100644 +--- a/ltmain.sh ++++ b/ltmain.sh +@@ -8035,9 +8035,11 @@ EOF + test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then ++ func_replace_sysroot "$libdir" ++ libdir=$func_replace_sysroot_result ++ func_stripname '=' '' "$libdir" ++ libdir=$func_stripname_result + if test -n "$hardcode_libdir_separator"; then +- func_replace_sysroot "$libdir" +- libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else +@@ -8770,6 +8772,10 @@ EOF + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then ++ func_replace_sysroot "$libdir" ++ libdir=$func_replace_sysroot_result ++ func_stripname '=' '' "$libdir" ++ libdir=$func_stripname_result + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0015-sync-with-OE-libtool-changes.patch b/poky/meta/recipes-devtools/binutils/binutils/0015-sync-with-OE-libtool-changes.patch index a794719bd..e848f146b 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0015-sync-with-OE-libtool-changes.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0015-sync-with-OE-libtool-changes.patch @@ -1,7 +1,7 @@ -From 392d474a72d37b669f53ab9f0fa913b958af93f6 Mon Sep 17 00:00:00 2001 +From e087f96e219d47c4d2244c3f32397e56d41bfdec Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Mon, 6 Mar 2017 23:33:27 -0800 -Subject: [PATCH] sync with OE libtool changes +Subject: [PATCH 15/17] sync with OE libtool changes Apply these patches from our libtool patches as not only are redundant RPATHs a waste of space but they can cause incorrect linking when native packages are @@ -26,7 +26,7 @@ Signed-off-by: Khem Raj 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/ltmain.sh b/ltmain.sh -index 11ee684ccc..3b19ac1532 100644 +index 11ee684cccf..3b19ac15328 100644 --- a/ltmain.sh +++ b/ltmain.sh @@ -8053,8 +8053,16 @@ EOF @@ -84,3 +84,6 @@ index 11ee684ccc..3b19ac1532 100644 fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/binutils/binutils/0016-Check-for-clang-before-checking-gcc-version.patch b/poky/meta/recipes-devtools/binutils/binutils/0016-Check-for-clang-before-checking-gcc-version.patch index c694b42dc..400b79823 100644 --- a/poky/meta/recipes-devtools/binutils/binutils/0016-Check-for-clang-before-checking-gcc-version.patch +++ b/poky/meta/recipes-devtools/binutils/binutils/0016-Check-for-clang-before-checking-gcc-version.patch @@ -1,7 +1,7 @@ -From 67590a44c1256491fa674426f0170d5d05377d05 Mon Sep 17 00:00:00 2001 +From ae5a7d622dc9addb2ca9fc85889c45964c025a9c Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 15 Apr 2020 14:17:20 -0700 -Subject: [PATCH 16/16] Check for clang before checking gcc version +Subject: [PATCH 16/17] Check for clang before checking gcc version Clang advertises itself to be gcc 4.2.1, so when compiling this test here fails since gcc < 4.4.5 did not support -static-libstdc++ but thats @@ -18,10 +18,10 @@ Signed-off-by: Khem Raj 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configure b/configure -index 590b03c2da0..46f116fdb54 100755 +index 6782f8b6ab8..72f5766b363 100755 --- a/configure +++ b/configure -@@ -5140,7 +5140,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +@@ -5143,7 +5143,7 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -31,10 +31,10 @@ index 590b03c2da0..46f116fdb54 100755 #endif int main() {} diff --git a/configure.ac b/configure.ac -index d3f85e6f5d5..c0eb1343121 100644 +index 55beb1dea46..3f9e613e2d9 100644 --- a/configure.ac +++ b/configure.ac -@@ -1309,7 +1309,7 @@ if test "$GCC" = yes; then +@@ -1312,7 +1312,7 @@ if test "$GCC" = yes; then AC_MSG_CHECKING([whether g++ accepts -static-libstdc++ -static-libgcc]) AC_LANG_PUSH(C++) AC_LINK_IFELSE([AC_LANG_SOURCE([ @@ -44,5 +44,5 @@ index d3f85e6f5d5..c0eb1343121 100644 #endif int main() {}])], -- -2.26.1 +2.28.0 diff --git a/poky/meta/recipes-devtools/binutils/binutils/0017-binutils-drop-redundant-program_name-definition-fno-.patch b/poky/meta/recipes-devtools/binutils/binutils/0017-binutils-drop-redundant-program_name-definition-fno-.patch deleted file mode 100644 index be59f9dca..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/0017-binutils-drop-redundant-program_name-definition-fno-.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 57a3e055605b28a9449b1b27eda7125737c42b00 Mon Sep 17 00:00:00 2001 -From: Sergei Trofimovich -Date: Sat, 1 Feb 2020 23:16:11 +0000 -Subject: [PATCH] binutils: drop redundant 'program_name' definition - (-fno-common) - - * coffdump.c (program_name): Drop redundant definition. - * srconv.c (program_name): Likewise - * sysdump.c (program_name): Likewise - -Upstream-Status: Backport (commit 0b398d69ac) -Signed-off-by: Sergei Trofimovich -Signed-off-by: Richard Leitner ---- - binutils/coffdump.c | 2 -- - binutils/srconv.c | 2 -- - binutils/sysdump.c | 2 -- - 3 files changed, 6 deletions(-) - -diff --git a/binutils/coffdump.c b/binutils/coffdump.c -index 531a4e46c3..336da57ca2 100644 ---- a/binutils/coffdump.c -+++ b/binutils/coffdump.c -@@ -456,8 +456,6 @@ coff_dump (struct coff_ofile *ptr) - dump_coff_section (ptr->sections + i); - } - --char * program_name; -- - static void - show_usage (FILE *file, int status) - { -diff --git a/binutils/srconv.c b/binutils/srconv.c -index 5742b16759..f071794f0a 100644 ---- a/binutils/srconv.c -+++ b/binutils/srconv.c -@@ -1687,8 +1687,6 @@ prescan (struct coff_ofile *otree) - } - } - --char *program_name; -- - ATTRIBUTE_NORETURN static void - show_usage (FILE *ffile, int status) - { -diff --git a/binutils/sysdump.c b/binutils/sysdump.c -index d433e71ed9..7eebbd61d3 100644 ---- a/binutils/sysdump.c -+++ b/binutils/sysdump.c -@@ -633,8 +633,6 @@ module (void) - } - } - --char *program_name; -- - ATTRIBUTE_NORETURN static void - show_usage (FILE *ffile, int status) - { --- -2.26.2 - diff --git a/poky/meta/recipes-devtools/binutils/binutils/CVE-2020-0551.patch b/poky/meta/recipes-devtools/binutils/binutils/CVE-2020-0551.patch deleted file mode 100644 index 53e3caf44..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils/CVE-2020-0551.patch +++ /dev/null @@ -1,549 +0,0 @@ -From ae531041c7c5956672342f89c486a011c84f027f Mon Sep 17 00:00:00 2001 -From: "H.J. Lu" -Date: Wed, 11 Mar 2020 09:46:19 -0700 -Subject: [PATCH 1/1] i386: Generate lfence with load/indirect branch/ret - [CVE-2020-0551] - -Add 3 command-line options to generate lfence for load, indirect near -branch and ret to help mitigate: - -https://www.intel.com/content/www/us/en/security-center/advisory/intel-sa-00334.html -http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-0551 - -1. -mlfence-after-load=[no|yes]: - -mlfence-after-load=yes generates lfence after load instructions. -2. -mlfence-before-indirect-branch=[none|all|memory|register]: - a. -mlfence-before-indirect-branch=all generates lfence before indirect - near branches via register and a warning before indirect near branches - via memory. - b. -mlfence-before-indirect-branch=memory issue a warning before - indirect near branches via memory. - c. -mlfence-before-indirect-branch=register generates lfence before - indirect near branches via register. -Note that lfence won't be generated before indirect near branches via -register with -mlfence-after-load=yes since lfence will be generated -after loading branch target register. -3. -mlfence-before-ret=[none|or|not] - a. -mlfence-before-ret=or generates or with lfence before ret. - b. -mlfence-before-ret=not generates not with lfence before ret. - -A warning will be issued and lfence won't be generated before indirect -near branch and ret if the previous item is a prefix or a constant -directive, which may be used to hardcode an instruction, since there -is no clear instruction boundary. - - * config/tc-i386.c (lfence_after_load): New. - (lfence_before_indirect_branch_kind): New. - (lfence_before_indirect_branch): New. - (lfence_before_ret_kind): New. - (lfence_before_ret): New. - (last_insn): New. - (load_insn_p): New. - (insert_lfence_after): New. - (insert_lfence_before): New. - (md_assemble): Call insert_lfence_before and insert_lfence_after. - Set last_insn. - (OPTION_MLFENCE_AFTER_LOAD): New. - (OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH): New. - (OPTION_MLFENCE_BEFORE_RET): New. - (md_longopts): Add -mlfence-after-load=, - -mlfence-before-indirect-branch= and -mlfence-before-ret=. - (md_parse_option): Handle -mlfence-after-load=, - -mlfence-before-indirect-branch= and -mlfence-before-ret=. - (md_show_usage): Display -mlfence-after-load=, - -mlfence-before-indirect-branch= and -mlfence-before-ret=. - (i386_cons_align): New. - * config/tc-i386.h (i386_cons_align): New. - (md_cons_align): New. - * doc/c-i386.texi: Document -mlfence-after-load=, - -mlfence-before-indirect-branch= and -mlfence-before-ret=. - -Signed-off-by: Anuj Mittal -Upstream-Status: Backport [https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=ae531041c7c5956672342f89c486a011c84f027f] -CVE: CVE-2020-0551 ---- -diff --git a/gas/config/tc-i386.c b/gas/config/tc-i386.c -index b020f39c863..09063f784b7 100644 ---- a/gas/config/tc-i386.c -+++ b/gas/config/tc-i386.c -@@ -629,7 +629,29 @@ static int omit_lock_prefix = 0; - "lock addl $0, (%{re}sp)". */ - static int avoid_fence = 0; - --/* Type of the previous instruction. */ -+/* 1 if lfence should be inserted after every load. */ -+static int lfence_after_load = 0; -+ -+/* Non-zero if lfence should be inserted before indirect branch. */ -+static enum lfence_before_indirect_branch_kind -+ { -+ lfence_branch_none = 0, -+ lfence_branch_register, -+ lfence_branch_memory, -+ lfence_branch_all -+ } -+lfence_before_indirect_branch; -+ -+/* Non-zero if lfence should be inserted before ret. */ -+static enum lfence_before_ret_kind -+ { -+ lfence_before_ret_none = 0, -+ lfence_before_ret_not, -+ lfence_before_ret_or -+ } -+lfence_before_ret; -+ -+/* Types of previous instruction is .byte or prefix. */ - static struct - { - segT seg; -@@ -4311,6 +4333,283 @@ optimize_encoding (void) - } - } - -+/* Return non-zero for load instruction. */ -+ -+static int -+load_insn_p (void) -+{ -+ unsigned int dest; -+ int any_vex_p = is_any_vex_encoding (&i.tm); -+ unsigned int base_opcode = i.tm.base_opcode | 1; -+ -+ if (!any_vex_p) -+ { -+ /* lea */ -+ if (i.tm.base_opcode == 0x8d) -+ return 0; -+ -+ /* pop */ -+ if ((i.tm.base_opcode & ~7) == 0x58 -+ || (i.tm.base_opcode == 0x8f && i.tm.extension_opcode == 0)) -+ return 1; -+ -+ /* movs, cmps, lods, scas. */ -+ if ((i.tm.base_opcode | 0xb) == 0xaf) -+ return 1; -+ -+ /* outs */ -+ if (base_opcode == 0x6f) -+ return 1; -+ } -+ -+ /* No memory operand. */ -+ if (!i.mem_operands) -+ return 0; -+ -+ if (any_vex_p) -+ { -+ /* vldmxcsr. */ -+ if (i.tm.base_opcode == 0xae -+ && i.tm.opcode_modifier.vex -+ && i.tm.opcode_modifier.vexopcode == VEX0F -+ && i.tm.extension_opcode == 2) -+ return 1; -+ } -+ else -+ { -+ /* test, not, neg, mul, imul, div, idiv. */ -+ if ((i.tm.base_opcode == 0xf6 || i.tm.base_opcode == 0xf7) -+ && i.tm.extension_opcode != 1) -+ return 1; -+ -+ /* inc, dec. */ -+ if (base_opcode == 0xff && i.tm.extension_opcode <= 1) -+ return 1; -+ -+ /* add, or, adc, sbb, and, sub, xor, cmp. */ -+ if (i.tm.base_opcode >= 0x80 && i.tm.base_opcode <= 0x83) -+ return 1; -+ -+ /* bt, bts, btr, btc. */ -+ if (i.tm.base_opcode == 0xfba -+ && (i.tm.extension_opcode >= 4 && i.tm.extension_opcode <= 7)) -+ return 1; -+ -+ /* rol, ror, rcl, rcr, shl/sal, shr, sar. */ -+ if ((base_opcode == 0xc1 -+ || (i.tm.base_opcode >= 0xd0 && i.tm.base_opcode <= 0xd3)) -+ && i.tm.extension_opcode != 6) -+ return 1; -+ -+ /* cmpxchg8b, cmpxchg16b, xrstors. */ -+ if (i.tm.base_opcode == 0xfc7 -+ && (i.tm.extension_opcode == 1 || i.tm.extension_opcode == 3)) -+ return 1; -+ -+ /* fxrstor, ldmxcsr, xrstor. */ -+ if (i.tm.base_opcode == 0xfae -+ && (i.tm.extension_opcode == 1 -+ || i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 5)) -+ return 1; -+ -+ /* lgdt, lidt, lmsw. */ -+ if (i.tm.base_opcode == 0xf01 -+ && (i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 3 -+ || i.tm.extension_opcode == 6)) -+ return 1; -+ -+ /* vmptrld */ -+ if (i.tm.base_opcode == 0xfc7 -+ && i.tm.extension_opcode == 6) -+ return 1; -+ -+ /* Check for x87 instructions. */ -+ if (i.tm.base_opcode >= 0xd8 && i.tm.base_opcode <= 0xdf) -+ { -+ /* Skip fst, fstp, fstenv, fstcw. */ -+ if (i.tm.base_opcode == 0xd9 -+ && (i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 3 -+ || i.tm.extension_opcode == 6 -+ || i.tm.extension_opcode == 7)) -+ return 0; -+ -+ /* Skip fisttp, fist, fistp, fstp. */ -+ if (i.tm.base_opcode == 0xdb -+ && (i.tm.extension_opcode == 1 -+ || i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 3 -+ || i.tm.extension_opcode == 7)) -+ return 0; -+ -+ /* Skip fisttp, fst, fstp, fsave, fstsw. */ -+ if (i.tm.base_opcode == 0xdd -+ && (i.tm.extension_opcode == 1 -+ || i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 3 -+ || i.tm.extension_opcode == 6 -+ || i.tm.extension_opcode == 7)) -+ return 0; -+ -+ /* Skip fisttp, fist, fistp, fbstp, fistp. */ -+ if (i.tm.base_opcode == 0xdf -+ && (i.tm.extension_opcode == 1 -+ || i.tm.extension_opcode == 2 -+ || i.tm.extension_opcode == 3 -+ || i.tm.extension_opcode == 6 -+ || i.tm.extension_opcode == 7)) -+ return 0; -+ -+ return 1; -+ } -+ } -+ -+ dest = i.operands - 1; -+ -+ /* Check fake imm8 operand and 3 source operands. */ -+ if ((i.tm.opcode_modifier.immext -+ || i.tm.opcode_modifier.vexsources == VEX3SOURCES) -+ && i.types[dest].bitfield.imm8) -+ dest--; -+ -+ /* add, or, adc, sbb, and, sub, xor, cmp, test, xchg, xadd */ -+ if (!any_vex_p -+ && (base_opcode == 0x1 -+ || base_opcode == 0x9 -+ || base_opcode == 0x11 -+ || base_opcode == 0x19 -+ || base_opcode == 0x21 -+ || base_opcode == 0x29 -+ || base_opcode == 0x31 -+ || base_opcode == 0x39 -+ || (i.tm.base_opcode >= 0x84 && i.tm.base_opcode <= 0x87) -+ || base_opcode == 0xfc1)) -+ return 1; -+ -+ /* Check for load instruction. */ -+ return (i.types[dest].bitfield.class != ClassNone -+ || i.types[dest].bitfield.instance == Accum); -+} -+ -+/* Output lfence, 0xfaee8, after instruction. */ -+ -+static void -+insert_lfence_after (void) -+{ -+ if (lfence_after_load && load_insn_p ()) -+ { -+ char *p = frag_more (3); -+ *p++ = 0xf; -+ *p++ = 0xae; -+ *p = 0xe8; -+ } -+} -+ -+/* Output lfence, 0xfaee8, before instruction. */ -+ -+static void -+insert_lfence_before (void) -+{ -+ char *p; -+ -+ if (is_any_vex_encoding (&i.tm)) -+ return; -+ -+ if (i.tm.base_opcode == 0xff -+ && (i.tm.extension_opcode == 2 || i.tm.extension_opcode == 4)) -+ { -+ /* Insert lfence before indirect branch if needed. */ -+ -+ if (lfence_before_indirect_branch == lfence_branch_none) -+ return; -+ -+ if (i.operands != 1) -+ abort (); -+ -+ if (i.reg_operands == 1) -+ { -+ /* Indirect branch via register. Don't insert lfence with -+ -mlfence-after-load=yes. */ -+ if (lfence_after_load -+ || lfence_before_indirect_branch == lfence_branch_memory) -+ return; -+ } -+ else if (i.mem_operands == 1 -+ && lfence_before_indirect_branch != lfence_branch_register) -+ { -+ as_warn (_("indirect `%s` with memory operand should be avoided"), -+ i.tm.name); -+ return; -+ } -+ else -+ return; -+ -+ if (last_insn.kind != last_insn_other -+ && last_insn.seg == now_seg) -+ { -+ as_warn_where (last_insn.file, last_insn.line, -+ _("`%s` skips -mlfence-before-indirect-branch on `%s`"), -+ last_insn.name, i.tm.name); -+ return; -+ } -+ -+ p = frag_more (3); -+ *p++ = 0xf; -+ *p++ = 0xae; -+ *p = 0xe8; -+ return; -+ } -+ -+ /* Output or/not and lfence before ret. */ -+ if (lfence_before_ret != lfence_before_ret_none -+ && (i.tm.base_opcode == 0xc2 -+ || i.tm.base_opcode == 0xc3 -+ || i.tm.base_opcode == 0xca -+ || i.tm.base_opcode == 0xcb)) -+ { -+ if (last_insn.kind != last_insn_other -+ && last_insn.seg == now_seg) -+ { -+ as_warn_where (last_insn.file, last_insn.line, -+ _("`%s` skips -mlfence-before-ret on `%s`"), -+ last_insn.name, i.tm.name); -+ return; -+ } -+ if (lfence_before_ret == lfence_before_ret_or) -+ { -+ /* orl: 0x830c2400. */ -+ p = frag_more ((flag_code == CODE_64BIT ? 1 : 0) + 4 + 3); -+ if (flag_code == CODE_64BIT) -+ *p++ = 0x48; -+ *p++ = 0x83; -+ *p++ = 0xc; -+ *p++ = 0x24; -+ *p++ = 0x0; -+ } -+ else -+ { -+ p = frag_more ((flag_code == CODE_64BIT ? 2 : 0) + 6 + 3); -+ /* notl: 0xf71424. */ -+ if (flag_code == CODE_64BIT) -+ *p++ = 0x48; -+ *p++ = 0xf7; -+ *p++ = 0x14; -+ *p++ = 0x24; -+ /* notl: 0xf71424. */ -+ if (flag_code == CODE_64BIT) -+ *p++ = 0x48; -+ *p++ = 0xf7; -+ *p++ = 0x14; -+ *p++ = 0x24; -+ } -+ *p++ = 0xf; -+ *p++ = 0xae; -+ *p = 0xe8; -+ } -+} -+ - /* This is the guts of the machine-dependent assembler. LINE points to a - machine dependent instruction. This function is supposed to emit - the frags/bytes it assembles to. */ -@@ -4628,9 +4927,13 @@ md_assemble (char *line) - if (i.rex != 0) - add_prefix (REX_OPCODE | i.rex); - -+ insert_lfence_before (); -+ - /* We are ready to output the insn. */ - output_insn (); - -+ insert_lfence_after (); -+ - last_insn.seg = now_seg; - - if (i.tm.opcode_modifier.isprefix) -@@ -12250,6 +12553,9 @@ const char *md_shortopts = "qnO::"; - #define OPTION_MALIGN_BRANCH_PREFIX_SIZE (OPTION_MD_BASE + 28) - #define OPTION_MALIGN_BRANCH (OPTION_MD_BASE + 29) - #define OPTION_MBRANCHES_WITH_32B_BOUNDARIES (OPTION_MD_BASE + 30) -+#define OPTION_MLFENCE_AFTER_LOAD (OPTION_MD_BASE + 31) -+#define OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH (OPTION_MD_BASE + 32) -+#define OPTION_MLFENCE_BEFORE_RET (OPTION_MD_BASE + 33) - - struct option md_longopts[] = - { -@@ -12289,6 +12595,10 @@ struct option md_longopts[] = - {"malign-branch-prefix-size", required_argument, NULL, OPTION_MALIGN_BRANCH_PREFIX_SIZE}, - {"malign-branch", required_argument, NULL, OPTION_MALIGN_BRANCH}, - {"mbranches-within-32B-boundaries", no_argument, NULL, OPTION_MBRANCHES_WITH_32B_BOUNDARIES}, -+ {"mlfence-after-load", required_argument, NULL, OPTION_MLFENCE_AFTER_LOAD}, -+ {"mlfence-before-indirect-branch", required_argument, NULL, -+ OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH}, -+ {"mlfence-before-ret", required_argument, NULL, OPTION_MLFENCE_BEFORE_RET}, - {"mamd64", no_argument, NULL, OPTION_MAMD64}, - {"mintel64", no_argument, NULL, OPTION_MINTEL64}, - {NULL, no_argument, NULL, 0} -@@ -12668,6 +12978,41 @@ md_parse_option (int c, const char *arg) - as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg); - break; - -+ case OPTION_MLFENCE_AFTER_LOAD: -+ if (strcasecmp (arg, "yes") == 0) -+ lfence_after_load = 1; -+ else if (strcasecmp (arg, "no") == 0) -+ lfence_after_load = 0; -+ else -+ as_fatal (_("invalid -mlfence-after-load= option: `%s'"), arg); -+ break; -+ -+ case OPTION_MLFENCE_BEFORE_INDIRECT_BRANCH: -+ if (strcasecmp (arg, "all") == 0) -+ lfence_before_indirect_branch = lfence_branch_all; -+ else if (strcasecmp (arg, "memory") == 0) -+ lfence_before_indirect_branch = lfence_branch_memory; -+ else if (strcasecmp (arg, "register") == 0) -+ lfence_before_indirect_branch = lfence_branch_register; -+ else if (strcasecmp (arg, "none") == 0) -+ lfence_before_indirect_branch = lfence_branch_none; -+ else -+ as_fatal (_("invalid -mlfence-before-indirect-branch= option: `%s'"), -+ arg); -+ break; -+ -+ case OPTION_MLFENCE_BEFORE_RET: -+ if (strcasecmp (arg, "or") == 0) -+ lfence_before_ret = lfence_before_ret_or; -+ else if (strcasecmp (arg, "not") == 0) -+ lfence_before_ret = lfence_before_ret_not; -+ else if (strcasecmp (arg, "none") == 0) -+ lfence_before_ret = lfence_before_ret_none; -+ else -+ as_fatal (_("invalid -mlfence-before-ret= option: `%s'"), -+ arg); -+ break; -+ - case OPTION_MRELAX_RELOCATIONS: - if (strcasecmp (arg, "yes") == 0) - generate_relax_relocations = 1; -@@ -13025,6 +13370,15 @@ md_show_usage (FILE *stream) - -mbranches-within-32B-boundaries\n\ - align branches within 32 byte boundary\n")); - fprintf (stream, _("\ -+ -mlfence-after-load=[no|yes] (default: no)\n\ -+ generate lfence after load\n")); -+ fprintf (stream, _("\ -+ -mlfence-before-indirect-branch=[none|all|register|memory] (default: none)\n\ -+ generate lfence before indirect near branch\n")); -+ fprintf (stream, _("\ -+ -mlfence-before-ret=[none|or|not] (default: none)\n\ -+ generate lfence before ret\n")); -+ fprintf (stream, _("\ - -mamd64 accept only AMD64 ISA [default]\n")); - fprintf (stream, _("\ - -mintel64 accept only Intel64 ISA\n")); -@@ -13254,6 +13608,16 @@ i386_cons_align (int ignore ATTRIBUTE_UNUSED) - last_insn.kind = last_insn_directive; - last_insn.name = "constant directive"; - last_insn.file = as_where (&last_insn.line); -+ if (lfence_before_ret != lfence_before_ret_none) -+ { -+ if (lfence_before_indirect_branch != lfence_branch_none) -+ as_warn (_("constant directive skips -mlfence-before-ret " -+ "and -mlfence-before-indirect-branch")); -+ else -+ as_warn (_("constant directive skips -mlfence-before-ret")); -+ } -+ else if (lfence_before_indirect_branch != lfence_branch_none) -+ as_warn (_("constant directive skips -mlfence-before-indirect-branch")); - } - } - -diff --git a/gas/doc/c-i386.texi b/gas/doc/c-i386.texi -index c536759cb38..1dd99f91bb0 100644 ---- a/gas/doc/c-i386.texi -+++ b/gas/doc/c-i386.texi -@@ -464,6 +464,49 @@ on an instruction. It is equivalent to - @option{-malign-branch-prefix-size=5}. - The default doesn't align branches. - -+@cindex @samp{-mlfence-after-load=} option, i386 -+@cindex @samp{-mlfence-after-load=} option, x86-64 -+@item -mlfence-after-load=@var{no} -+@itemx -mlfence-after-load=@var{yes} -+These options control whether the assembler should generate lfence -+after load instructions. @option{-mlfence-after-load=@var{yes}} will -+generate lfence. @option{-mlfence-after-load=@var{no}} will not generate -+lfence, which is the default. -+ -+@cindex @samp{-mlfence-before-indirect-branch=} option, i386 -+@cindex @samp{-mlfence-before-indirect-branch=} option, x86-64 -+@item -mlfence-before-indirect-branch=@var{none} -+@item -mlfence-before-indirect-branch=@var{all} -+@item -mlfence-before-indirect-branch=@var{register} -+@itemx -mlfence-before-indirect-branch=@var{memory} -+These options control whether the assembler should generate lfence -+after indirect near branch instructions. -+@option{-mlfence-before-indirect-branch=@var{all}} will generate lfence -+after indirect near branch via register and issue a warning before -+indirect near branch via memory. -+@option{-mlfence-before-indirect-branch=@var{register}} will generate -+lfence after indirect near branch via register. -+@option{-mlfence-before-indirect-branch=@var{memory}} will issue a -+warning before indirect near branch via memory. -+@option{-mlfence-before-indirect-branch=@var{none}} will not generate -+lfence nor issue warning, which is the default. Note that lfence won't -+be generated before indirect near branch via register with -+@option{-mlfence-after-load=@var{yes}} since lfence will be generated -+after loading branch target register. -+ -+@cindex @samp{-mlfence-before-ret=} option, i386 -+@cindex @samp{-mlfence-before-ret=} option, x86-64 -+@item -mlfence-before-ret=@var{none} -+@item -mlfence-before-ret=@var{or} -+@itemx -mlfence-before-ret=@var{not} -+These options control whether the assembler should generate lfence -+before ret. @option{-mlfence-before-ret=@var{or}} will generate -+generate or instruction with lfence. -+@option{-mlfence-before-ret=@var{not}} will generate not instruction -+with lfence. -+@option{-mlfence-before-ret=@var{none}} will not generate lfence, -+which is the default. -+ - @cindex @samp{-mx86-used-note=} option, i386 - @cindex @samp{-mx86-used-note=} option, x86-64 - @item -mx86-used-note=@var{no} --- -2.18.2 diff --git a/poky/meta/recipes-devtools/binutils/binutils_2.34.bb b/poky/meta/recipes-devtools/binutils/binutils_2.34.bb deleted file mode 100644 index 2e645e1ed..000000000 --- a/poky/meta/recipes-devtools/binutils/binutils_2.34.bb +++ /dev/null @@ -1,65 +0,0 @@ -require binutils.inc -require binutils-${PV}.inc - -DEPENDS += "flex bison zlib" - -EXTRA_OECONF += "--with-sysroot=/ \ - --enable-install-libbfd \ - --enable-install-libiberty \ - --enable-shared \ - --with-system-zlib \ - " - -EXTRA_OEMAKE_append_libc-musl = "\ - gt_cv_func_gnugettext1_libc=yes \ - gt_cv_func_gnugettext2_libc=yes \ - " -EXTRA_OECONF_class-native = "--enable-targets=all \ - --enable-64-bit-bfd \ - --enable-install-libiberty \ - --enable-install-libbfd \ - --disable-gdb \ - --disable-gdbserver \ - --disable-libdecnumber \ - --disable-readline \ - --disable-sim \ - --disable-werror" - -# gcc9.0 end up mis-compiling libbfd.so with O2 which then crashes on target -# So remove -O2 and use -Os as workaround -SELECTED_OPTIMIZATION_remove_mipsarch = "-O2" -SELECTED_OPTIMIZATION_append_mipsarch = " -Os" - -do_install_class-native () { - autotools_do_install - - # Install the libiberty header - install -d ${D}${includedir} - install -m 644 ${S}/include/ansidecl.h ${D}${includedir} - install -m 644 ${S}/include/libiberty.h ${D}${includedir} - - # We only want libiberty, libbfd and libopcodes - rm -rf ${D}${bindir} - rm -rf ${D}${prefix}/${TARGET_SYS} - rm -rf ${D}${prefix}/lib/ldscripts - rm -rf ${D}${prefix}/share/info - rm -rf ${D}${prefix}/share/locale - rm -rf ${D}${prefix}/share/man - rmdir ${D}${prefix}/share || : - rmdir ${D}/${libdir}/gcc-lib || : - rmdir ${D}/${libdir}64/gcc-lib || : - rmdir ${D}/${libdir} || : - rmdir ${D}/${libdir}64 || : -} - -# Split out libbfd-*.so so including perf doesn't include extra stuff -PACKAGE_BEFORE_PN += "libbfd" -FILES_libbfd = "${libdir}/libbfd-*.so.* ${libdir}/libbfd-*.so" - -SRC_URI_append_class-nativesdk = " file://0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch " - -USE_ALTERNATIVES_FOR_class-nativesdk = "" -FILES_${PN}_append_class-nativesdk = " ${bindir}" - -BBCLASSEXTEND = "native nativesdk" - diff --git a/poky/meta/recipes-devtools/binutils/binutils_2.35.bb b/poky/meta/recipes-devtools/binutils/binutils_2.35.bb new file mode 100644 index 000000000..2e645e1ed --- /dev/null +++ b/poky/meta/recipes-devtools/binutils/binutils_2.35.bb @@ -0,0 +1,65 @@ +require binutils.inc +require binutils-${PV}.inc + +DEPENDS += "flex bison zlib" + +EXTRA_OECONF += "--with-sysroot=/ \ + --enable-install-libbfd \ + --enable-install-libiberty \ + --enable-shared \ + --with-system-zlib \ + " + +EXTRA_OEMAKE_append_libc-musl = "\ + gt_cv_func_gnugettext1_libc=yes \ + gt_cv_func_gnugettext2_libc=yes \ + " +EXTRA_OECONF_class-native = "--enable-targets=all \ + --enable-64-bit-bfd \ + --enable-install-libiberty \ + --enable-install-libbfd \ + --disable-gdb \ + --disable-gdbserver \ + --disable-libdecnumber \ + --disable-readline \ + --disable-sim \ + --disable-werror" + +# gcc9.0 end up mis-compiling libbfd.so with O2 which then crashes on target +# So remove -O2 and use -Os as workaround +SELECTED_OPTIMIZATION_remove_mipsarch = "-O2" +SELECTED_OPTIMIZATION_append_mipsarch = " -Os" + +do_install_class-native () { + autotools_do_install + + # Install the libiberty header + install -d ${D}${includedir} + install -m 644 ${S}/include/ansidecl.h ${D}${includedir} + install -m 644 ${S}/include/libiberty.h ${D}${includedir} + + # We only want libiberty, libbfd and libopcodes + rm -rf ${D}${bindir} + rm -rf ${D}${prefix}/${TARGET_SYS} + rm -rf ${D}${prefix}/lib/ldscripts + rm -rf ${D}${prefix}/share/info + rm -rf ${D}${prefix}/share/locale + rm -rf ${D}${prefix}/share/man + rmdir ${D}${prefix}/share || : + rmdir ${D}/${libdir}/gcc-lib || : + rmdir ${D}/${libdir}64/gcc-lib || : + rmdir ${D}/${libdir} || : + rmdir ${D}/${libdir}64 || : +} + +# Split out libbfd-*.so so including perf doesn't include extra stuff +PACKAGE_BEFORE_PN += "libbfd" +FILES_libbfd = "${libdir}/libbfd-*.so.* ${libdir}/libbfd-*.so" + +SRC_URI_append_class-nativesdk = " file://0003-binutils-nativesdk-Search-for-alternative-ld.so.conf.patch " + +USE_ALTERNATIVES_FOR_class-nativesdk = "" +FILES_${PN}_append_class-nativesdk = " ${bindir}" + +BBCLASSEXTEND = "native nativesdk" + diff --git a/poky/meta/recipes-devtools/build-compare/build-compare_git.bb b/poky/meta/recipes-devtools/build-compare/build-compare_git.bb deleted file mode 100644 index 74a954db4..000000000 --- a/poky/meta/recipes-devtools/build-compare/build-compare_git.bb +++ /dev/null @@ -1,29 +0,0 @@ -SUMMARY = "Build Result Compare Script" -DESCRIPTION = "This package contains scripts to find out if the build result\ -differs to a former build." -HOMEPAGE = "https://github.com/openSUSE/build-compare" -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe" - -SRC_URI = "git://github.com/openSUSE/build-compare.git" - -# Date matches entry in build-compare.changes and date of SRCREV. -# -SRCREV = "102d844ce052c6dec6c56ee1d471bde72a1b4633" -PE = "1" -PV = "2020.05.29+git${SRCPV}" -UPSTREAM_CHECK_COMMITS = "1" - -S = "${WORKDIR}/git" - -BBCLASSEXTEND = "native nativesdk" - -do_install() { - install -d ${D}/${bindir} - install -m 755 functions.sh ${D}/${bindir} - install -m 755 pkg-diff.sh ${D}/${bindir} - install -m 755 same-build-result.sh ${D}/${bindir} - install -m 755 srpm-check.sh ${D}/${bindir} -} - -RDEPENDS_${PN} += "bash" diff --git a/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb b/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb deleted file mode 100644 index 852583dfd..000000000 --- a/poky/meta/recipes-devtools/ccache/ccache_3.7.10.bb +++ /dev/null @@ -1,32 +0,0 @@ -SUMMARY = "a fast C/C++ compiler cache" -DESCRIPTION = "ccache is a compiler cache. It speeds up recompilation \ -by caching the result of previous compilations and detecting when the \ -same compilation is being done again. Supported languages are C, C\+\+, \ -Objective-C and Objective-C++." -HOMEPAGE = "http://ccache.samba.org" -SECTION = "devel" - -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://LICENSE.adoc;md5=22d514dbc01fdf9a9784334b6b59417a" - -DEPENDS = "zlib" - -SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz" -SRC_URI[sha256sum] = "447ddf21a5f0ffa6b6d26839ae876a6d17d0d7e3533926cdf78ecd11dad793f8" - -UPSTREAM_CHECK_URI = "https://github.com/ccache/ccache/releases/" - -inherit autotools - -# Remove ccache-native's dependencies, so that it can be used widely by -# other native recipes. -DEPENDS_class-native = "" -EXTRA_OECONF_class-native = "--with-bundled-zlib" -INHIBIT_AUTOTOOLS_DEPS_class-native = "1" -PATCHTOOL = "patch" - -BBCLASSEXTEND = "native" - -do_configure_class-native() { - oe_runconf -} diff --git a/poky/meta/recipes-devtools/ccache/ccache_3.7.11.bb b/poky/meta/recipes-devtools/ccache/ccache_3.7.11.bb new file mode 100644 index 000000000..fd004f4c9 --- /dev/null +++ b/poky/meta/recipes-devtools/ccache/ccache_3.7.11.bb @@ -0,0 +1,32 @@ +SUMMARY = "a fast C/C++ compiler cache" +DESCRIPTION = "ccache is a compiler cache. It speeds up recompilation \ +by caching the result of previous compilations and detecting when the \ +same compilation is being done again. Supported languages are C, C\+\+, \ +Objective-C and Objective-C++." +HOMEPAGE = "http://ccache.samba.org" +SECTION = "devel" + +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://LICENSE.adoc;md5=22d514dbc01fdf9a9784334b6b59417a" + +DEPENDS = "zlib" + +SRC_URI = "https://github.com/ccache/ccache/releases/download/v${PV}/${BP}.tar.gz" +SRC_URI[sha256sum] = "34309a59d4b6b6b33756366aa9d3144a4655587be9f914476b4c0e2d36365f01" + +UPSTREAM_CHECK_URI = "https://github.com/ccache/ccache/releases/" + +inherit autotools + +# Remove ccache-native's dependencies, so that it can be used widely by +# other native recipes. +DEPENDS_class-native = "" +EXTRA_OECONF_class-native = "--with-bundled-zlib" +INHIBIT_AUTOTOOLS_DEPS_class-native = "1" +PATCHTOOL = "patch" + +BBCLASSEXTEND = "native" + +do_configure_class-native() { + oe_runconf +} diff --git a/poky/meta/recipes-devtools/distcc/distcc_3.3.3.bb b/poky/meta/recipes-devtools/distcc/distcc_3.3.3.bb index c52f136be..d92b9c304 100644 --- a/poky/meta/recipes-devtools/distcc/distcc_3.3.3.bb +++ b/poky/meta/recipes-devtools/distcc/distcc_3.3.3.bb @@ -17,6 +17,7 @@ RRECOMMENDS_${PN}-server = "avahi-daemon" SRC_URI = "git://github.com/distcc/distcc.git \ file://fix-gnome.patch \ file://separatebuilddir.patch \ + file://0001-Fix-build-with-gcc-10-which-defaults-to-fno-common-c.patch \ file://default \ file://distcc \ file://distcc.service" diff --git a/poky/meta/recipes-devtools/distcc/files/0001-Fix-build-with-gcc-10-which-defaults-to-fno-common-c.patch b/poky/meta/recipes-devtools/distcc/files/0001-Fix-build-with-gcc-10-which-defaults-to-fno-common-c.patch new file mode 100644 index 000000000..86c07c11f --- /dev/null +++ b/poky/meta/recipes-devtools/distcc/files/0001-Fix-build-with-gcc-10-which-defaults-to-fno-common-c.patch @@ -0,0 +1,34 @@ +From 98530865795300008fe06bb647a6fc2dddfc7967 Mon Sep 17 00:00:00 2001 +From: Romain Geissler +Date: Mon, 27 Jan 2020 09:28:43 +0000 +Subject: [PATCH] Fix build with gcc 10 which defaults to -fno-common (cf + https://gcc.gnu.org/gcc-10/porting_to.html) + +This fixes the following link error I see when I use the latest gcc 10 +git branch: +/opt/1A/toolchain/x86_64-v20.0.7/lib/gcc/x86_64-1a-linux-gnu/10.0.1/../../../../x86_64-1a-linux-gnu/bin/ld: src/serve.o:(.bss+0x0): multiple definition of `stats_text'; src/prefork.o:(.bss+0x0): first defined here +/opt/1A/toolchain/x86_64-v20.0.7/lib/gcc/x86_64-1a-linux-gnu/10.0.1/../../../../x86_64-1a-linux-gnu/bin/ld: src/stats.o:(.data+0x20): multiple definition of `stats_text'; src/prefork.o:(.bss+0x0): first defined here +collect2: error: ld returned 1 exit status + +Upstream-Status: Backport [https://github.com/distcc/distcc/pull/373] +Signed-off-by: Khem Raj +--- + src/stats.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/stats.h b/src/stats.h +index 9bde285..74d4690 100644 +--- a/src/stats.h ++++ b/src/stats.h +@@ -33,7 +33,7 @@ enum stats_e { STATS_TCP_ACCEPT, STATS_REJ_BAD_REQ, STATS_REJ_OVERLOAD, + STATS_COMPILE_OK, STATS_COMPILE_ERROR, STATS_COMPILE_TIMEOUT, + STATS_CLI_DISCONN, STATS_OTHER, STATS_ENUM_MAX }; + +-const char *stats_text[20]; ++extern const char *stats_text[20]; + + int dcc_stats_init(void); + void dcc_stats_init_kid(void); +-- +2.28.0 + diff --git a/poky/meta/recipes-devtools/flex/flex/check-funcs.patch b/poky/meta/recipes-devtools/flex/flex/check-funcs.patch index 037ca81fd..762275e7f 100644 --- a/poky/meta/recipes-devtools/flex/flex/check-funcs.patch +++ b/poky/meta/recipes-devtools/flex/flex/check-funcs.patch @@ -1,10 +1,4 @@ -Upstream-Status: Backport -Signed-off-by: Ross Burton - -From c42de062bbdc7c31d7181c10a74202d493280ada Mon Sep 17 00:00:00 2001 -From: Explorer09 -Date: Tue, 27 Feb 2018 09:10:12 +0800 -Subject: [PATCH] build: Move dnl comments out of AC_CHECK_FUNCS +Subject: build: Move dnl comments out of AC_CHECK_FUNCS Due to a bug, autoheader (2.69) will treat M4 dnl comments in a quoted argument of AC_CHECK_FUNCS as function tokens and generate a lot of @@ -20,28 +14,20 @@ I have reported the autoheader bug here: As a workaround, let's move comments out of AC_CHECK_FUNCS. +Upstream-Status: Backport +Signed-off-by: Ross Burton + Signed-off-by: Kang-Che Sung +Signed-off-by: Zang Ruochen --- - configure.ac | 29 +++++++++++++---------------- - 1 file changed, 13 insertions(+), 16 deletions(-) + configure.ac | 28 +++++++++++++--------------- + 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/configure.ac b/configure.ac -index 55e774b0..d0f3b7da 100644 +index 55e774b..5ea3a93 100644 --- a/configure.ac +++ b/configure.ac -@@ -166,6 +166,7 @@ strtol dnl - AC_CHECK_FUNCS([dnl - pow dnl Used only by "examples/manual/expr" - setlocale dnl Needed only if NLS is enabled -+reallocarr dnl NetBSD function. Use reallocarray if not available. - reallocarray dnl OpenBSD function. We have replacement if not available. - ]) - -diff --git a/configure.ac b/configure.ac -index 3c977a4e..9c53590f 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -158,22 +158,19 @@ AC_FUNC_REALLOC +@@ -153,21 +153,19 @@ AC_FUNC_REALLOC AS_IF([test "$cross_compiling" = yes], AC_MSG_WARN([result $ac_cv_func_realloc_0_nonnull guessed because of cross compilation])) @@ -58,7 +44,6 @@ index 3c977a4e..9c53590f 100644 -AC_CHECK_FUNCS([dnl -pow dnl Used only by "examples/manual/expr" -setlocale dnl Needed only if NLS is enabled --reallocarr dnl NetBSD function. Use reallocarray if not available. -reallocarray dnl OpenBSD function. We have replacement if not available. -]) +dnl Autoheader (<= 2.69) bug: "dnl" comments in a quoted argument of @@ -77,3 +62,6 @@ index 3c977a4e..9c53590f 100644 AC_CONFIG_FILES( Makefile +-- +2.25.1 + diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1.inc b/poky/meta/recipes-devtools/gcc/gcc-10.1.inc deleted file mode 100644 index 5f310301b..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1.inc +++ /dev/null @@ -1,121 +0,0 @@ -require gcc-common.inc - -# Third digit in PV should be incremented after a minor release - -PV = "10.1.0" - -# BINV should be incremented to a revision after a minor gcc release - -BINV = "10.1.0" - -FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc-10.1:${FILE_DIRNAME}/gcc-10.1/backport:" - -DEPENDS =+ "mpfr gmp libmpc zlib flex-native" -NATIVEDEPS = "mpfr-native gmp-native libmpc-native zlib-native flex-native" - -LICENSE = "GPL-3.0-with-GCC-exception & GPLv3" - -LIC_FILES_CHKSUM = "\ - file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \ - file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \ - file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \ - file://COPYING.LIB;md5=2d5025d4aa3495befef8f17206a5b0a1 \ - file://COPYING.RUNTIME;md5=fe60d87048567d4fe8c8a0ed2448bcc8 \ -" - -BASEURI ?= "${GNU_MIRROR}/gcc/gcc-${PV}/gcc-${PV}.tar.xz" -#RELEASE ?= "93a49d2d2292893b9b7f38132df949c70942838c" -#BASEURI ?= "https://github.com/gcc-mirror/gcc/archive/${RELEASE}.zip;downloadfilename=gcc-${PV}-${RELEASE}.zip" -SRC_URI = "\ - ${BASEURI} \ - file://0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch \ - file://0002-gcc-poison-system-directories.patch \ - file://0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch \ - file://0004-64-bit-multilib-hack.patch \ - file://0005-optional-libstdc.patch \ - file://0006-COLLECT_GCC_OPTIONS.patch \ - file://0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch \ - file://0008-fortran-cross-compile-hack.patch \ - file://0009-cpp-honor-sysroot.patch \ - file://0010-MIPS64-Default-to-N64-ABI.patch \ - file://0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch \ - file://0012-gcc-Fix-argument-list-too-long-error.patch \ - file://0013-Disable-sdt.patch \ - file://0014-libtool.patch \ - file://0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch \ - file://0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch \ - file://0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch \ - file://0018-export-CPP.patch \ - file://0019-Ensure-target-gcc-headers-can-be-included.patch \ - file://0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch \ - file://0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch \ - file://0022-aarch64-Add-support-for-musl-ldso.patch \ - file://0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch \ - file://0024-handle-sysroot-support-for-nativesdk-gcc.patch \ - file://0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch \ - file://0026-Fix-various-_FOR_BUILD-and-related-variables.patch \ - file://0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch \ - file://0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch \ - file://0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch \ - file://0030-sync-gcc-stddef.h-with-musl.patch \ - file://0031-fix-segmentation-fault-in-precompiled-header-generat.patch \ - file://0032-Fix-for-testsuite-failure.patch \ - file://0033-Re-introduce-spe-commandline-options.patch \ - file://0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch \ - file://0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch \ - file://0036-Enable-CET-in-cross-compiler-if-possible.patch \ - file://0037-mingw32-Enable-operation_not_supported.patch \ - file://0038-libatomic-Do-not-enforce-march-on-aarch64.patch \ - file://0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch \ - file://0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch \ - file://0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch \ - file://pr96130.patch \ -" -SRC_URI[sha256sum] = "b6898a23844b656f1b68691c5c012036c2e694ac4b53a8918d4712ad876e7ea2" - -S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}" -# For dev release snapshotting -#S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${RELEASE}" -#B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}" - -# Language Overrides -FORTRAN = "" -JAVA = "" - -LTO = "--enable-lto" -SSP ?= "--disable-libssp" -SSP_mingw32 = "--enable-libssp" - -EXTRA_OECONF_BASE = "\ - ${LTO} \ - ${SSP} \ - --enable-libitm \ - --disable-bootstrap \ - --with-system-zlib \ - ${@'--with-linker-hash-style=${LINKER_HASH_STYLE}' if '${LINKER_HASH_STYLE}' else ''} \ - --enable-linker-build-id \ - --with-ppl=no \ - --with-cloog=no \ - --enable-checking=release \ - --enable-cheaders=c_global \ - --without-isl \ -" - -EXTRA_OECONF_INITIAL = "\ - --disable-libgomp \ - --disable-libitm \ - --disable-libquadmath \ - --with-system-zlib \ - --disable-lto \ - --disable-plugin \ - --enable-linker-build-id \ - --enable-decimal-float=no \ - --without-isl \ - --disable-libssp \ -" - -EXTRA_OECONF_PATHS = "\ - --with-gxx-include-dir=/not/exist{target_includedir}/c++/${BINV} \ - --with-sysroot=/not/exist \ - --with-build-sysroot=${STAGING_DIR_TARGET} \ -" diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch deleted file mode 100644 index 73de4c759..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch +++ /dev/null @@ -1,202 +0,0 @@ -CVE: CVE-2020-13844 -Upstream-Status: Backport -Signed-off-by: Ross Burton - -From 1ff243934ac443b5f58cd02a5012ce58ecc31fb2 Mon Sep 17 00:00:00 2001 -From: Matthew Malcomson -Date: Thu, 9 Jul 2020 09:11:58 +0100 -Subject: [PATCH 1/3] aarch64: New Straight Line Speculation (SLS) mitigation - flags - -Here we introduce the flags that will be used for straight line speculation. - -The new flag introduced is `-mharden-sls=`. -This flag can take arguments of `none`, `all`, or a comma seperated list of one -or more of `retbr` or `blr`. -`none` indicates no special mitigation of the straight line speculation -vulnerability. -`all` requests all mitigations currently implemented. -`retbr` requests that the RET and BR instructions have a speculation barrier -inserted after them. -`blr` requests that BLR instructions are replaced by a BL to a function stub -using a BR with a speculation barrier after it. - -Setting this on a per-function basis using attributes or the like is not -enabled, but may be in the future. - -gcc/ChangeLog: - -2020-06-02 Matthew Malcomson - - * config/aarch64/aarch64-protos.h (aarch64_harden_sls_retbr_p): - New. - (aarch64_harden_sls_blr_p): New. - * config/aarch64/aarch64.c (enum aarch64_sls_hardening_type): - New. - (aarch64_harden_sls_retbr_p): New. - (aarch64_harden_sls_blr_p): New. - (aarch64_validate_sls_mitigation): New. - (aarch64_override_options): Parse options for SLS mitigation. - * config/aarch64/aarch64.opt (-mharden-sls): New option. - * doc/invoke.texi: Document new option. ---- - gcc/config/aarch64/aarch64-protos.h | 3 ++ - gcc/config/aarch64/aarch64.c | 76 +++++++++++++++++++++++++++++++++++++ - gcc/config/aarch64/aarch64.opt | 4 ++ - gcc/doc/invoke.texi | 12 ++++++ - 4 files changed, 95 insertions(+) - -diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h -index 723d9ba..eb5f4b4 100644 ---- a/gcc/config/aarch64/aarch64-protos.h -+++ b/gcc/config/aarch64/aarch64-protos.h -@@ -781,4 +781,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; - - tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); - -+extern bool aarch64_harden_sls_retbr_p (void); -+extern bool aarch64_harden_sls_blr_p (void); -+ - #endif /* GCC_AARCH64_PROTOS_H */ -diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c -index b86434a..437a9cf 100644 ---- a/gcc/config/aarch64/aarch64.c -+++ b/gcc/config/aarch64/aarch64.c -@@ -14494,6 +14494,79 @@ aarch64_validate_mcpu (const char *str, const struct processor **res, - return false; - } - -+/* Straight line speculation indicators. */ -+enum aarch64_sls_hardening_type -+{ -+ SLS_NONE = 0, -+ SLS_RETBR = 1, -+ SLS_BLR = 2, -+ SLS_ALL = 3, -+}; -+static enum aarch64_sls_hardening_type aarch64_sls_hardening; -+ -+/* Return whether we should mitigatate Straight Line Speculation for the RET -+ and BR instructions. */ -+bool -+aarch64_harden_sls_retbr_p (void) -+{ -+ return aarch64_sls_hardening & SLS_RETBR; -+} -+ -+/* Return whether we should mitigatate Straight Line Speculation for the BLR -+ instruction. */ -+bool -+aarch64_harden_sls_blr_p (void) -+{ -+ return aarch64_sls_hardening & SLS_BLR; -+} -+ -+/* As of yet we only allow setting these options globally, in the future we may -+ allow setting them per function. */ -+static void -+aarch64_validate_sls_mitigation (const char *const_str) -+{ -+ char *token_save = NULL; -+ char *str = NULL; -+ -+ if (strcmp (const_str, "none") == 0) -+ { -+ aarch64_sls_hardening = SLS_NONE; -+ return; -+ } -+ if (strcmp (const_str, "all") == 0) -+ { -+ aarch64_sls_hardening = SLS_ALL; -+ return; -+ } -+ -+ char *str_root = xstrdup (const_str); -+ str = strtok_r (str_root, ",", &token_save); -+ if (!str) -+ error ("invalid argument given to %<-mharden-sls=%>"); -+ -+ int temp = SLS_NONE; -+ while (str) -+ { -+ if (strcmp (str, "blr") == 0) -+ temp |= SLS_BLR; -+ else if (strcmp (str, "retbr") == 0) -+ temp |= SLS_RETBR; -+ else if (strcmp (str, "none") == 0 || strcmp (str, "all") == 0) -+ { -+ error ("%<%s%> must be by itself for %<-mharden-sls=%>", str); -+ break; -+ } -+ else -+ { -+ error ("invalid argument %<%s%> for %<-mharden-sls=%>", str); -+ break; -+ } -+ str = strtok_r (NULL, ",", &token_save); -+ } -+ aarch64_sls_hardening = (aarch64_sls_hardening_type) temp; -+ free (str_root); -+} -+ - /* Parses CONST_STR for branch protection features specified in - aarch64_branch_protect_types, and set any global variables required. Returns - the parsing result and assigns LAST_STR to the last processed token from -@@ -14738,6 +14811,9 @@ aarch64_override_options (void) - selected_arch = NULL; - selected_tune = NULL; - -+ if (aarch64_harden_sls_string) -+ aarch64_validate_sls_mitigation (aarch64_harden_sls_string); -+ - if (aarch64_branch_protection_string) - aarch64_validate_mbranch_protection (aarch64_branch_protection_string); - -diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt -index d99d14c..5170361 100644 ---- a/gcc/config/aarch64/aarch64.opt -+++ b/gcc/config/aarch64/aarch64.opt -@@ -71,6 +71,10 @@ mgeneral-regs-only - Target Report RejectNegative Mask(GENERAL_REGS_ONLY) Save - Generate code which uses only the general registers. - -+mharden-sls= -+Target RejectNegative Joined Var(aarch64_harden_sls_string) -+Generate code to mitigate against straight line speculation. -+ - mfix-cortex-a53-835769 - Target Report Var(aarch64_fix_a53_err835769) Init(2) Save - Workaround for ARM Cortex-A53 Erratum number 835769. -diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi -index a2794a6..bd5b77a 100644 ---- a/gcc/doc/invoke.texi -+++ b/gcc/doc/invoke.texi -@@ -696,6 +696,7 @@ Objective-C and Objective-C++ Dialects}. - -msign-return-address=@var{scope} @gol - -mbranch-protection=@var{none}|@var{standard}|@var{pac-ret}[+@var{leaf} - +@var{b-key}]|@var{bti} @gol -+-mharden-sls=@var{opts} @gol - -march=@var{name} -mcpu=@var{name} -mtune=@var{name} @gol - -moverride=@var{string} -mverbose-cost-dump @gol - -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg} @gol -@@ -17065,6 +17066,17 @@ functions. The optional argument @samp{b-key} can be used to sign the functions - with the B-key instead of the A-key. - @samp{bti} turns on branch target identification mechanism. - -+@item -mharden-sls=@var{opts} -+@opindex mharden-sls -+Enable compiler hardening against straight line speculation (SLS). -+@var{opts} is a comma-separated list of the following options: -+@table @samp -+@item retbr -+@item blr -+@end table -+In addition, @samp{-mharden-sls=all} enables all SLS hardening while -+@samp{-mharden-sls=none} disables all SLS hardening. -+ - @item -msve-vector-bits=@var{bits} - @opindex msve-vector-bits - Specify the number of bits in an SVE vector register. This option only has --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch deleted file mode 100644 index 82ae9f8d1..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch +++ /dev/null @@ -1,39 +0,0 @@ -From f2a5dc3bc7e5727d6bf77e1c6e8a31a6f000883d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 08:37:11 +0400 -Subject: [PATCH] gcc-4.3.1: ARCH_FLAGS_FOR_TARGET - -Signed-off-by: Khem Raj - -Upstream-Status: Inappropriate [embedded specific] ---- - configure | 2 +- - configure.ac | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/configure b/configure -index 4cc938ebb7d..226a64939d1 100755 ---- a/configure -+++ b/configure -@@ -7722,7 +7722,7 @@ fi - # for target_alias and gcc doesn't manage it consistently. - target_configargs="--cache-file=./config.cache ${target_configargs}" - --FLAGS_FOR_TARGET= -+FLAGS_FOR_TARGET="$ARCH_FLAGS_FOR_TARGET" - case " $target_configdirs " in - *" newlib "*) - case " $target_configargs " in -diff --git a/configure.ac b/configure.ac -index c78d9cbea62..f024f4bac9b 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -3227,7 +3227,7 @@ fi - # for target_alias and gcc doesn't manage it consistently. - target_configargs="--cache-file=./config.cache ${target_configargs}" - --FLAGS_FOR_TARGET= -+FLAGS_FOR_TARGET="$ARCH_FLAGS_FOR_TARGET" - case " $target_configdirs " in - *" newlib "*) - case " $target_configargs " in diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch deleted file mode 100644 index 823cc8b66..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch +++ /dev/null @@ -1,607 +0,0 @@ -Upstream-Status: Backport -Signed-off-by: Ross Burton - -From b1204d16e1ec96a4aa89e44de8990e2499ffdb22 Mon Sep 17 00:00:00 2001 -From: Matthew Malcomson -Date: Thu, 9 Jul 2020 09:11:59 +0100 -Subject: [PATCH 2/3] aarch64: Introduce SLS mitigation for RET and BR - instructions - -Instructions following RET or BR are not necessarily executed. In order -to avoid speculation past RET and BR we can simply append a speculation -barrier. - -Since these speculation barriers will not be architecturally executed, -they are not expected to add a high performance penalty. - -The speculation barrier is to be SB when targeting architectures which -have this enabled, and DSB SY + ISB otherwise. - -We add tests for each of the cases where such an instruction was seen. - -This is implemented by modifying each machine description pattern that -emits either a RET or a BR instruction. We choose not to use something -like `TARGET_ASM_FUNCTION_EPILOGUE` since it does not affect the -`indirect_jump`, `jump`, `sibcall_insn` and `sibcall_value_insn` -patterns and we find it preferable to implement the functionality in the -same way for every pattern. - -There is one particular case which is slightly tricky. The -implementation of TARGET_ASM_TRAMPOLINE_TEMPLATE uses a BR which needs -to be mitigated against. The trampoline template is used *once* per -compilation unit, and the TRAMPOLINE_SIZE is exposed to the user via the -builtin macro __LIBGCC_TRAMPOLINE_SIZE__. -In the future we may implement function specific attributes to turn on -and off hardening on a per-function basis. -The fixed nature of the trampoline described above implies it will be -safer to ensure this speculation barrier is always used. - -Testing: - Bootstrap and regtest done on aarch64-none-linux - Used a temporary hack(1) to use these options on every test in the - testsuite and a script to check that the output never emitted an - unmitigated RET or BR. - -1) Temporary hack was a change to the testsuite to always use -`-save-temps` and run a script on the assembly output of those -compilations which produced one to ensure every RET or BR is immediately -followed by a speculation barrier. - -gcc/ChangeLog: - - * config/aarch64/aarch64-protos.h (aarch64_sls_barrier): New. - * config/aarch64/aarch64.c (aarch64_output_casesi): Emit - speculation barrier after BR instruction if needs be. - (aarch64_trampoline_init): Handle ptr_mode value & adjust size - of code copied. - (aarch64_sls_barrier): New. - (aarch64_asm_trampoline_template): Add needed barriers. - * config/aarch64/aarch64.h (AARCH64_ISA_SB): New. - (TARGET_SB): New. - (TRAMPOLINE_SIZE): Account for barrier. - * config/aarch64/aarch64.md (indirect_jump, *casesi_dispatch, - simple_return, *do_return, *sibcall_insn, *sibcall_value_insn): - Emit barrier if needs be, also account for possible barrier using - "sls_length" attribute. - (sls_length): New attribute. - (length): Determine default using any non-default sls_length - value. - -gcc/testsuite/ChangeLog: - - * gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c: New test. - * gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c: - New test. - * gcc.target/aarch64/sls-mitigation/sls-mitigation.exp: New file. - * lib/target-supports.exp (check_effective_target_aarch64_asm_sb_ok): - New proc. ---- - gcc/config/aarch64/aarch64-protos.h | 1 + - gcc/config/aarch64/aarch64.c | 41 ++++++- - gcc/config/aarch64/aarch64.h | 10 +- - gcc/config/aarch64/aarch64.md | 76 +++++++++---- - .../aarch64/sls-mitigation/sls-miti-retbr-pacret.c | 21 ++++ - .../aarch64/sls-mitigation/sls-miti-retbr.c | 119 +++++++++++++++++++++ - .../aarch64/sls-mitigation/sls-mitigation.exp | 73 +++++++++++++ - gcc/testsuite/lib/target-supports.exp | 2 +- - 8 files changed, 318 insertions(+), 25 deletions(-) - create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c - create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c - create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp - -diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h -index eb5f4b4..ee0ffde 100644 ---- a/gcc/config/aarch64/aarch64-protos.h -+++ b/gcc/config/aarch64/aarch64-protos.h -@@ -781,6 +781,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; - - tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); - -+const char *aarch64_sls_barrier (int); - extern bool aarch64_harden_sls_retbr_p (void); - extern bool aarch64_harden_sls_blr_p (void); - -diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c -index 437a9cf..44e3d1f 100644 ---- a/gcc/config/aarch64/aarch64.c -+++ b/gcc/config/aarch64/aarch64.c -@@ -10852,8 +10852,8 @@ aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) - static void - aarch64_asm_trampoline_template (FILE *f) - { -- int offset1 = 16; -- int offset2 = 20; -+ int offset1 = 24; -+ int offset2 = 28; - - if (aarch64_bti_enabled ()) - { -@@ -10876,6 +10876,17 @@ aarch64_asm_trampoline_template (FILE *f) - } - asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]); - -+ /* We always emit a speculation barrier. -+ This is because the same trampoline template is used for every nested -+ function. Since nested functions are not particularly common or -+ performant we don't worry too much about the extra instructions to copy -+ around. -+ This is not yet a problem, since we have not yet implemented function -+ specific attributes to choose between hardening against straight line -+ speculation or not, but such function specific attributes are likely to -+ happen in the future. */ -+ asm_fprintf (f, "\tdsb\tsy\n\tisb\n"); -+ - /* The trampoline needs an extra padding instruction. In case if BTI is - enabled the padding instruction is replaced by the BTI instruction at - the beginning. */ -@@ -10890,10 +10901,14 @@ static void - aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) - { - rtx fnaddr, mem, a_tramp; -- const int tramp_code_sz = 16; -+ const int tramp_code_sz = 24; - - /* Don't need to copy the trailing D-words, we fill those in below. */ -- emit_block_move (m_tramp, assemble_trampoline_template (), -+ /* We create our own memory address in Pmode so that `emit_block_move` can -+ use parts of the backend which expect Pmode addresses. */ -+ rtx temp = convert_memory_address (Pmode, XEXP (m_tramp, 0)); -+ emit_block_move (gen_rtx_MEM (BLKmode, temp), -+ assemble_trampoline_template (), - GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL); - mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz); - fnaddr = XEXP (DECL_RTL (fndecl), 0); -@@ -11084,6 +11099,8 @@ aarch64_output_casesi (rtx *operands) - output_asm_insn (buf, operands); - output_asm_insn (patterns[index][1], operands); - output_asm_insn ("br\t%3", operands); -+ output_asm_insn (aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()), -+ operands); - assemble_label (asm_out_file, label); - return ""; - } -@@ -22924,6 +22941,22 @@ aarch64_file_end_indicate_exec_stack () - #undef GNU_PROPERTY_AARCH64_FEATURE_1_BTI - #undef GNU_PROPERTY_AARCH64_FEATURE_1_AND - -+/* Helper function for straight line speculation. -+ Return what barrier should be emitted for straight line speculation -+ mitigation. -+ When not mitigating against straight line speculation this function returns -+ an empty string. -+ When mitigating against straight line speculation, use: -+ * SB when the v8.5-A SB extension is enabled. -+ * DSB+ISB otherwise. */ -+const char * -+aarch64_sls_barrier (int mitigation_required) -+{ -+ return mitigation_required -+ ? (TARGET_SB ? "sb" : "dsb\tsy\n\tisb") -+ : ""; -+} -+ - /* Target-specific selftests. */ - - #if CHECKING_P -diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h -index 1ce23c6..c21015f 100644 ---- a/gcc/config/aarch64/aarch64.h -+++ b/gcc/config/aarch64/aarch64.h -@@ -281,6 +281,7 @@ extern unsigned aarch64_architecture_version; - #define AARCH64_ISA_F32MM (aarch64_isa_flags & AARCH64_FL_F32MM) - #define AARCH64_ISA_F64MM (aarch64_isa_flags & AARCH64_FL_F64MM) - #define AARCH64_ISA_BF16 (aarch64_isa_flags & AARCH64_FL_BF16) -+#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB) - - /* Crypto is an optional extension to AdvSIMD. */ - #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO) -@@ -378,6 +379,9 @@ extern unsigned aarch64_architecture_version; - #define TARGET_FIX_ERR_A53_835769_DEFAULT 1 - #endif - -+/* SB instruction is enabled through +sb. */ -+#define TARGET_SB (AARCH64_ISA_SB) -+ - /* Apply the workaround for Cortex-A53 erratum 835769. */ - #define TARGET_FIX_ERR_A53_835769 \ - ((aarch64_fix_a53_err835769 == 2) \ -@@ -1058,8 +1062,10 @@ typedef struct - - #define RETURN_ADDR_RTX aarch64_return_addr - --/* BTI c + 3 insns + 2 pointer-sized entries. */ --#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32) -+/* BTI c + 3 insns -+ + sls barrier of DSB + ISB. -+ + 2 pointer-sized entries. */ -+#define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16)) - - /* Trampolines contain dwords, so must be dword aligned. */ - #define TRAMPOLINE_ALIGNMENT 64 -diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md -index 8c8be3c..dda04ee 100644 ---- a/gcc/config/aarch64/aarch64.md -+++ b/gcc/config/aarch64/aarch64.md -@@ -407,10 +407,25 @@ - ;; Attribute that specifies whether the alternative uses MOVPRFX. - (define_attr "movprfx" "no,yes" (const_string "no")) - -+;; Attribute to specify that an alternative has the length of a single -+;; instruction plus a speculation barrier. -+(define_attr "sls_length" "none,retbr,casesi" (const_string "none")) -+ - (define_attr "length" "" - (cond [(eq_attr "movprfx" "yes") - (const_int 8) -- ] (const_int 4))) -+ -+ (eq_attr "sls_length" "retbr") -+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 4) -+ (match_test "TARGET_SB") (const_int 8)] -+ (const_int 12)) -+ -+ (eq_attr "sls_length" "casesi") -+ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 16) -+ (match_test "TARGET_SB") (const_int 20)] -+ (const_int 24)) -+ ] -+ (const_int 4))) - - ;; Strictly for compatibility with AArch32 in pipeline models, since AArch64 has - ;; no predicated insns. -@@ -447,8 +462,12 @@ - (define_insn "indirect_jump" - [(set (pc) (match_operand:DI 0 "register_operand" "r"))] - "" -- "br\\t%0" -- [(set_attr "type" "branch")] -+ { -+ output_asm_insn ("br\\t%0", operands); -+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); -+ } -+ [(set_attr "type" "branch") -+ (set_attr "sls_length" "retbr")] - ) - - (define_insn "jump" -@@ -765,7 +784,7 @@ - "* - return aarch64_output_casesi (operands); - " -- [(set_attr "length" "16") -+ [(set_attr "sls_length" "casesi") - (set_attr "type" "branch")] - ) - -@@ -844,18 +863,23 @@ - [(return)] - "" - { -+ const char *ret = NULL; - if (aarch64_return_address_signing_enabled () - && TARGET_ARMV8_3 - && !crtl->calls_eh_return) - { - if (aarch64_ra_sign_key == AARCH64_KEY_B) -- return "retab"; -+ ret = "retab"; - else -- return "retaa"; -+ ret = "retaa"; - } -- return "ret"; -+ else -+ ret = "ret"; -+ output_asm_insn (ret, operands); -+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); - } -- [(set_attr "type" "branch")] -+ [(set_attr "type" "branch") -+ (set_attr "sls_length" "retbr")] - ) - - (define_expand "return" -@@ -867,8 +891,12 @@ - (define_insn "simple_return" - [(simple_return)] - "" -- "ret" -- [(set_attr "type" "branch")] -+ { -+ output_asm_insn ("ret", operands); -+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); -+ } -+ [(set_attr "type" "branch") -+ (set_attr "sls_length" "retbr")] - ) - - (define_insn "*cb1" -@@ -1066,10 +1094,16 @@ - (unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI) - (return)] - "SIBLING_CALL_P (insn)" -- "@ -- br\\t%0 -- b\\t%c0" -- [(set_attr "type" "branch, branch")] -+ { -+ if (which_alternative == 0) -+ { -+ output_asm_insn ("br\\t%0", operands); -+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); -+ } -+ return "b\\t%c0"; -+ } -+ [(set_attr "type" "branch, branch") -+ (set_attr "sls_length" "retbr,none")] - ) - - (define_insn "*sibcall_value_insn" -@@ -1080,10 +1114,16 @@ - (unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI) - (return)] - "SIBLING_CALL_P (insn)" -- "@ -- br\\t%1 -- b\\t%c1" -- [(set_attr "type" "branch, branch")] -+ { -+ if (which_alternative == 0) -+ { -+ output_asm_insn ("br\\t%1", operands); -+ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); -+ } -+ return "b\\t%c1"; -+ } -+ [(set_attr "type" "branch, branch") -+ (set_attr "sls_length" "retbr,none")] - ) - - ;; Call subroutine returning any type. -diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c -new file mode 100644 -index 0000000..fa1887a ---- /dev/null -+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c -@@ -0,0 +1,21 @@ -+/* Avoid ILP32 since pacret is only available for LP64 */ -+/* { dg-do compile { target { ! ilp32 } } } */ -+/* { dg-additional-options "-mharden-sls=retbr -mbranch-protection=pac-ret -march=armv8.3-a" } */ -+ -+/* Testing the do_return pattern for retaa and retab. */ -+long retbr_subcall(void); -+long retbr_do_return_retaa(void) -+{ -+ return retbr_subcall()+1; -+} -+ -+__attribute__((target("branch-protection=pac-ret+b-key"))) -+long retbr_do_return_retab(void) -+{ -+ return retbr_subcall()+1; -+} -+ -+/* Ensure there are no BR or RET instructions which are not directly followed -+ by a speculation barrier. */ -+/* { dg-final { scan-assembler-not {\t(br|ret|retaa|retab)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb)} } } */ -+/* { dg-final { scan-assembler-not {ret\t} } } */ -diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c -new file mode 100644 -index 0000000..76b8d03 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c -@@ -0,0 +1,119 @@ -+/* We ensure that -Wpedantic is off since it complains about the trampolines -+ we explicitly want to test. */ -+/* { dg-additional-options "-mharden-sls=retbr -Wno-pedantic " } */ -+/* -+ Ensure that the SLS hardening of RET and BR leaves no unprotected RET/BR -+ instructions. -+ */ -+typedef int (foo) (int, int); -+typedef void (bar) (int, int); -+struct sls_testclass { -+ foo *x; -+ bar *y; -+ int left; -+ int right; -+}; -+ -+int -+retbr_sibcall_value_insn (struct sls_testclass x) -+{ -+ return x.x(x.left, x.right); -+} -+ -+void -+retbr_sibcall_insn (struct sls_testclass x) -+{ -+ x.y(x.left, x.right); -+} -+ -+/* Aim to test two different returns. -+ One that introduces a tail call in the middle of the function, and one that -+ has a normal return. */ -+int -+retbr_multiple_returns (struct sls_testclass x) -+{ -+ int temp; -+ if (x.left % 10) -+ return x.x(x.left, 100); -+ else if (x.right % 20) -+ { -+ return x.x(x.left * x.right, 100); -+ } -+ temp = x.left % x.right; -+ temp *= 100; -+ temp /= 2; -+ return temp % 3; -+} -+ -+void -+retbr_multiple_returns_void (struct sls_testclass x) -+{ -+ if (x.left % 10) -+ { -+ x.y(x.left, 100); -+ } -+ else if (x.right % 20) -+ { -+ x.y(x.left * x.right, 100); -+ } -+ return; -+} -+ -+/* Testing the casesi jump via register. */ -+__attribute__ ((optimize ("Os"))) -+int -+retbr_casesi_dispatch (struct sls_testclass x) -+{ -+ switch (x.left) -+ { -+ case -5: -+ return -2; -+ case -3: -+ return -1; -+ case 0: -+ return 0; -+ case 3: -+ return 1; -+ case 5: -+ break; -+ default: -+ __builtin_unreachable (); -+ } -+ return x.right; -+} -+ -+/* Testing the BR in trampolines is mitigated against. */ -+void f1 (void *); -+void f3 (void *, void (*)(void *)); -+void f2 (void *); -+ -+int -+retbr_trampolines (void *a, int b) -+{ -+ if (!b) -+ { -+ f1 (a); -+ return 1; -+ } -+ if (b) -+ { -+ void retbr_tramp_internal (void *c) -+ { -+ if (c == a) -+ f2 (c); -+ } -+ f3 (a, retbr_tramp_internal); -+ } -+ return 0; -+} -+ -+/* Testing the indirect_jump pattern. */ -+void -+retbr_indirect_jump (int *buf) -+{ -+ __builtin_longjmp(buf, 1); -+} -+ -+/* Ensure there are no BR or RET instructions which are not directly followed -+ by a speculation barrier. */ -+/* { dg-final { scan-assembler-not {\t(br|ret|retaa|retab)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb|sb)} } } */ -diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp -new file mode 100644 -index 0000000..8122503 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp -@@ -0,0 +1,73 @@ -+# Regression driver for SLS mitigation on AArch64. -+# Copyright (C) 2020 Free Software Foundation, Inc. -+# Contributed by ARM Ltd. -+# -+# This file is part of GCC. -+# -+# GCC is free software; you can redistribute it and/or modify it -+# under the terms of the GNU General Public License as published by -+# the Free Software Foundation; either version 3, or (at your option) -+# any later version. -+# -+# GCC is distributed in the hope that it will be useful, but -+# WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+# General Public License for more details. -+# -+# You should have received a copy of the GNU General Public License -+# along with GCC; see the file COPYING3. If not see -+# . */ -+ -+# Exit immediately if this isn't an AArch64 target. -+if {![istarget aarch64*-*-*] } then { -+ return -+} -+ -+# Load support procs. -+load_lib gcc-dg.exp -+load_lib torture-options.exp -+ -+# If a testcase doesn't have special options, use these. -+global DEFAULT_CFLAGS -+if ![info exists DEFAULT_CFLAGS] then { -+ set DEFAULT_CFLAGS " " -+} -+ -+# Initialize `dg'. -+dg-init -+torture-init -+ -+# Use different architectures as well as the normal optimisation options. -+# (i.e. use both SB and DSB+ISB barriers). -+ -+set save-dg-do-what-default ${dg-do-what-default} -+# Main loop. -+# Run with torture tests (i.e. a bunch of different optimisation levels) just -+# to increase test coverage. -+set dg-do-what-default assemble -+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \ -+ "-save-temps" $DEFAULT_CFLAGS -+ -+# Run the same tests but this time with SB extension. -+# Since not all supported assemblers will support that extension we decide -+# whether to assemble or just compile based on whether the extension is -+# supported for the available assembler. -+ -+set templist {} -+foreach x $DG_TORTURE_OPTIONS { -+ lappend templist "$x -march=armv8.3-a+sb " -+ lappend templist "$x -march=armv8-a+sb " -+} -+set-torture-options $templist -+if { [check_effective_target_aarch64_asm_sb_ok] } { -+ set dg-do-what-default assemble -+} else { -+ set dg-do-what-default compile -+} -+gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \ -+ "-save-temps" $DEFAULT_CFLAGS -+set dg-do-what-default ${save-dg-do-what-default} -+ -+# All done. -+torture-finish -+dg-finish -diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp -index 8a186dd..9d2e093 100644 ---- a/gcc/testsuite/lib/target-supports.exp -+++ b/gcc/testsuite/lib/target-supports.exp -@@ -9432,7 +9432,7 @@ proc check_effective_target_aarch64_tiny { } { - # various architecture extensions via the .arch_extension pseudo-op. - - foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve" -- "i8mm" "f32mm" "f64mm" "bf16" } { -+ "i8mm" "f32mm" "f64mm" "bf16" "sb" } { - eval [string map [list FUNC $aarch64_ext] { - proc check_effective_target_aarch64_asm_FUNC_ok { } { - if { [istarget aarch64*-*-*] } { --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-gcc-poison-system-directories.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-gcc-poison-system-directories.patch deleted file mode 100644 index 30a848601..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0002-gcc-poison-system-directories.patch +++ /dev/null @@ -1,200 +0,0 @@ -From 74cc21f474402cf3578e37e1d7a1a22bbd070f6a Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 08:59:00 +0400 -Subject: [PATCH] gcc: poison-system-directories - -Add /sw/include and /opt/include based on the original -zecke-no-host-includes.patch patch. The original patch checked for -/usr/include, /sw/include and /opt/include and then triggered a failure and -aborted. - -Instead, we add the two missing items to the current scan. If the user -wants this to be a failure, they can add "-Werror=poison-system-directories". - -Signed-off-by: Mark Hatle -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - gcc/common.opt | 4 ++++ - gcc/config.in | 6 ++++++ - gcc/configure | 16 ++++++++++++++++ - gcc/configure.ac | 10 ++++++++++ - gcc/doc/invoke.texi | 9 +++++++++ - gcc/gcc.c | 2 ++ - gcc/incpath.c | 21 +++++++++++++++++++++ - 7 files changed, 68 insertions(+) - -diff --git a/gcc/common.opt b/gcc/common.opt -index 65a82410abc..415f38fa1f4 100644 ---- a/gcc/common.opt -+++ b/gcc/common.opt -@@ -682,6 +682,10 @@ Wreturn-local-addr - Common Var(warn_return_local_addr) Init(1) Warning - Warn about returning a pointer/reference to a local or temporary variable. - -+Wpoison-system-directories -+Common Var(flag_poison_system_directories) Init(1) Warning -+Warn for -I and -L options using system directories if cross compiling -+ - Wshadow - Common Var(warn_shadow) Warning - Warn when one variable shadows another. Same as -Wshadow=global. -diff --git a/gcc/config.in b/gcc/config.in -index 809e7b26823..5adeaeed36b 100644 ---- a/gcc/config.in -+++ b/gcc/config.in -@@ -224,6 +224,12 @@ - #endif - - -+/* Define to warn for use of native system header directories */ -+#ifndef USED_FOR_TARGET -+#undef ENABLE_POISON_SYSTEM_DIRECTORIES -+#endif -+ -+ - /* Define if you want all operations on RTL (the basic data structure of the - optimizer and back end) to be checked for dynamic type safety at runtime. - This is quite expensive. */ -diff --git a/gcc/configure b/gcc/configure -index cd3d9516fce..8de766a942c 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -1010,6 +1010,7 @@ with_system_zlib - enable_maintainer_mode - enable_link_mutex - enable_version_specific_runtime_libs -+enable_poison_system_directories - enable_plugin - enable_host_shared - enable_libquadmath_support -@@ -1766,6 +1767,8 @@ Optional Features: - --enable-version-specific-runtime-libs - specify that runtime libraries should be installed - in a compiler-specific directory -+ --enable-poison-system-directories -+ warn for use of native system header directories - --enable-plugin enable plugin support - --enable-host-shared build host code as shared libraries - --disable-libquadmath-support -@@ -30235,6 +30238,19 @@ if test "${enable_version_specific_runtime_libs+set}" = set; then : - fi - - -+# Check whether --enable-poison-system-directories was given. -+if test "${enable_poison_system_directories+set}" = set; then : -+ enableval=$enable_poison_system_directories; -+else -+ enable_poison_system_directories=no -+fi -+ -+if test "x${enable_poison_system_directories}" = "xyes"; then -+ -+$as_echo "#define ENABLE_POISON_SYSTEM_DIRECTORIES 1" >>confdefs.h -+ -+fi -+ - # Substitute configuration variables - - -diff --git a/gcc/configure.ac b/gcc/configure.ac -index 0de3b4bf97b..8bfd6feb780 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -6595,6 +6595,16 @@ AC_ARG_ENABLE(version-specific-runtime-libs, - [specify that runtime libraries should be - installed in a compiler-specific directory])]) - -+AC_ARG_ENABLE([poison-system-directories], -+ AS_HELP_STRING([--enable-poison-system-directories], -+ [warn for use of native system header directories]),, -+ [enable_poison_system_directories=no]) -+if test "x${enable_poison_system_directories}" = "xyes"; then -+ AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES], -+ [1], -+ [Define to warn for use of native system header directories]) -+fi -+ - # Substitute configuration variables - AC_SUBST(subdirs) - AC_SUBST(srcdir) -diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi -index f98161391a0..f12d8d12150 100644 ---- a/gcc/doc/invoke.texi -+++ b/gcc/doc/invoke.texi -@@ -348,6 +348,7 @@ Objective-C and Objective-C++ Dialects}. - -Wpacked -Wno-packed-bitfield-compat -Wpacked-not-aligned -Wpadded @gol - -Wparentheses -Wno-pedantic-ms-format @gol - -Wpointer-arith -Wno-pointer-compare -Wno-pointer-to-int-cast @gol -+-Wno-poison-system-directories @gol - -Wno-pragmas -Wno-prio-ctor-dtor -Wredundant-decls @gol - -Wrestrict -Wno-return-local-addr -Wreturn-type @gol - -Wno-scalar-storage-order -Wsequence-point @gol -@@ -6924,6 +6925,14 @@ made up of data only and thus requires no special treatment. But, for - most targets, it is made up of code and thus requires the stack to be - made executable in order for the program to work properly. - -+@item -Wno-poison-system-directories -+@opindex Wno-poison-system-directories -+Do not warn for @option{-I} or @option{-L} options using system -+directories such as @file{/usr/include} when cross compiling. This -+option is intended for use in chroot environments when such -+directories contain the correct headers and libraries for the target -+system rather than the host. -+ - @item -Wfloat-equal - @opindex Wfloat-equal - @opindex Wno-float-equal -diff --git a/gcc/gcc.c b/gcc/gcc.c -index 9f790db0daf..b2200c5185a 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -1041,6 +1041,8 @@ proper position among the other output files. */ - "%{fuse-ld=*:-fuse-ld=%*} " LINK_COMPRESS_DEBUG_SPEC \ - "%X %{o*} %{e*} %{N} %{n} %{r}\ - %{s} %{t} %{u*} %{z} %{Z} %{!nostdlib:%{!r:%{!nostartfiles:%S}}} \ -+ %{Wno-poison-system-directories:--no-poison-system-directories} \ -+ %{Werror=poison-system-directories:--error-poison-system-directories} \ - %{static|no-pie|static-pie:} %@{L*} %(mfwrap) %(link_libgcc) " \ - VTABLE_VERIFICATION_SPEC " " SANITIZER_EARLY_SPEC " %o "" \ - %{fopenacc|fopenmp|%:gt(%{ftree-parallelize-loops=*:%*} 1):\ -diff --git a/gcc/incpath.c b/gcc/incpath.c -index 8a2bda00f80..9098ab044ab 100644 ---- a/gcc/incpath.c -+++ b/gcc/incpath.c -@@ -26,6 +26,7 @@ - #include "intl.h" - #include "incpath.h" - #include "cppdefault.h" -+#include "diagnostic-core.h" - - /* Microsoft Windows does not natively support inodes. - VMS has non-numeric inodes. */ -@@ -393,6 +394,26 @@ merge_include_chains (const char *sysroot, cpp_reader *pfile, int verbose) - } - fprintf (stderr, _("End of search list.\n")); - } -+ -+#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES -+ if (flag_poison_system_directories) -+ { -+ struct cpp_dir *p; -+ -+ for (p = heads[INC_QUOTE]; p; p = p->next) -+ { -+ if ((!strncmp (p->name, "/usr/include", 12)) -+ || (!strncmp (p->name, "/usr/local/include", 18)) -+ || (!strncmp (p->name, "/usr/X11R6/include", 18)) -+ || (!strncmp (p->name, "/sw/include", 11)) -+ || (!strncmp (p->name, "/opt/include", 12))) -+ warning (OPT_Wpoison_system_directories, -+ "include location \"%s\" is unsafe for " -+ "cross-compilation", -+ p->name); -+ } -+ } -+#endif - } - - /* Use given -I paths for #include "..." but not #include <...>, and diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch deleted file mode 100644 index 716a36717..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch +++ /dev/null @@ -1,658 +0,0 @@ -Upstream-Status: Backport -Signed-off-by: Ross Burton - -From a5e7efc40ed841934c1d913f39476afa17d8e5f7 Mon Sep 17 00:00:00 2001 -From: Matthew Malcomson -Date: Thu, 9 Jul 2020 09:11:59 +0100 -Subject: [PATCH 3/3] aarch64: Mitigate SLS for BLR instruction - -This patch introduces the mitigation for Straight Line Speculation past -the BLR instruction. - -This mitigation replaces BLR instructions with a BL to a stub which uses -a BR to jump to the original value. These function stubs are then -appended with a speculation barrier to ensure no straight line -speculation happens after these jumps. - -When optimising for speed we use a set of stubs for each function since -this should help the branch predictor make more accurate predictions -about where a stub should branch. - -When optimising for size we use one set of stubs for all functions. -This set of stubs can have human readable names, and we are using -`__call_indirect_x` for register x. - -When BTI branch protection is enabled the BLR instruction can jump to a -`BTI c` instruction using any register, while the BR instruction can -only jump to a `BTI c` instruction using the x16 or x17 registers. -Hence, in order to ensure this transformation is safe we mov the value -of the original register into x16 and use x16 for the BR. - -As an example when optimising for size: -a - BLR x0 -instruction would get transformed to something like - BL __call_indirect_x0 -where __call_indirect_x0 labels a thunk that contains -__call_indirect_x0: - MOV X16, X0 - BR X16 - - -The first version of this patch used local symbols specific to a -compilation unit to try and avoid relocations. -This was mistaken since functions coming from the same compilation unit -can still be in different sections, and the assembler will insert -relocations at jumps between sections. - -On any relocation the linker is permitted to emit a veneer to handle -jumps between symbols that are very far apart. The registers x16 and -x17 may be clobbered by these veneers. -Hence the function stubs cannot rely on the values of x16 and x17 being -the same as just before the function stub is called. - -Similar can be said for the hot/cold partitioning of single functions, -so function-local stubs have the same restriction. - -This updated version of the patch never emits function stubs for x16 and -x17, and instead forces other registers to be used. - -Given the above, there is now no benefit to local symbols (since they -are not enough to avoid dealing with linker intricacies). This patch -now uses global symbols with hidden visibility each stored in their own -COMDAT section. This means stubs can be shared between compilation -units while still avoiding the PLT indirection. - -This patch also removes the `__call_indirect_x30` stub (and -function-local equivalent) which would simply jump back to the original -location. - -The function-local stubs are emitted to the assembly output file in one -chunk, which means we need not add the speculation barrier directly -after each one. -This is because we know for certain that the instructions directly after -the BR in all but the last function stub will be from another one of -these stubs and hence will not contain a speculation gadget. -Instead we add a speculation barrier at the end of the sequence of -stubs. - -The global stubs are emitted in COMDAT/.linkonce sections by -themselves so that the linker can remove duplicates from multiple object -files. This means they are not emitted in one chunk, and each one must -include the speculation barrier. - -Another difference is that since the global stubs are shared across -compilation units we do not know that all functions will be targeting an -architecture supporting the SB instruction. -Rather than provide multiple stubs for each architecture, we provide a -stub that will work for all architectures -- using the DSB+ISB barrier. - -This mitigation does not apply for BLR instructions in the following -places: -- Some accesses to thread-local variables use a code sequence with a BLR - instruction. This code sequence is part of the binary interface between - compiler and linker. If this BLR instruction needs to be mitigated, it'd - probably be best to do so in the linker. It seems that the code sequence - for thread-local variable access is unlikely to lead to a Spectre Revalation - Gadget. -- PLT stubs are produced by the linker and each contain a BLR instruction. - It seems that at most only after the last PLT stub a Spectre Revalation - Gadget might appear. - -Testing: - Bootstrap and regtest on AArch64 - (with BOOT_CFLAGS="-mharden-sls=retbr,blr") - Used a temporary hack(1) in gcc-dg.exp to use these options on every - test in the testsuite, a slight modification to emit the speculation - barrier after every function stub, and a script to check that the - output never emitted a BLR, or unmitigated BR or RET instruction. - Similar on an aarch64-none-elf cross-compiler. - -1) Temporary hack emitted a speculation barrier at the end of every stub -function, and used a script to ensure that: - a) Every RET or BR is immediately followed by a speculation barrier. - b) No BLR instruction is emitted by compiler. - -gcc/ChangeLog: - - * config/aarch64/aarch64-protos.h (aarch64_indirect_call_asm): - New declaration. - * config/aarch64/aarch64.c (aarch64_regno_regclass): Handle new - stub registers class. - (aarch64_class_max_nregs): Likewise. - (aarch64_register_move_cost): Likewise. - (aarch64_sls_shared_thunks): Global array to store stub labels. - (aarch64_sls_emit_function_stub): New. - (aarch64_create_blr_label): New. - (aarch64_sls_emit_blr_function_thunks): New. - (aarch64_sls_emit_shared_blr_thunks): New. - (aarch64_asm_file_end): New. - (aarch64_indirect_call_asm): New. - (TARGET_ASM_FILE_END): Use aarch64_asm_file_end. - (TARGET_ASM_FUNCTION_EPILOGUE): Use - aarch64_sls_emit_blr_function_thunks. - * config/aarch64/aarch64.h (STB_REGNUM_P): New. - (enum reg_class): Add STUB_REGS class. - (machine_function): Introduce `call_via` array for - function-local stub labels. - * config/aarch64/aarch64.md (*call_insn, *call_value_insn): Use - aarch64_indirect_call_asm to emit code when hardening BLR - instructions. - * config/aarch64/constraints.md (Ucr): New constraint - representing registers for indirect calls. Is GENERAL_REGS - usually, and STUB_REGS when hardening BLR instruction against - SLS. - * config/aarch64/predicates.md (aarch64_general_reg): STUB_REGS class - is also a general register. - -gcc/testsuite/ChangeLog: - - * gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c: New test. - * gcc.target/aarch64/sls-mitigation/sls-miti-blr.c: New test. ---- - gcc/config/aarch64/aarch64-protos.h | 1 + - gcc/config/aarch64/aarch64.c | 225 ++++++++++++++++++++- - gcc/config/aarch64/aarch64.h | 15 ++ - gcc/config/aarch64/aarch64.md | 11 +- - gcc/config/aarch64/constraints.md | 9 + - gcc/config/aarch64/predicates.md | 3 +- - .../aarch64/sls-mitigation/sls-miti-blr-bti.c | 40 ++++ - .../aarch64/sls-mitigation/sls-miti-blr.c | 33 +++ - 8 files changed, 328 insertions(+), 9 deletions(-) - create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c - create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c - -diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h -index ee0ffde..839f801 100644 ---- a/gcc/config/aarch64/aarch64-protos.h -+++ b/gcc/config/aarch64/aarch64-protos.h -@@ -782,6 +782,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; - tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); - - const char *aarch64_sls_barrier (int); -+const char *aarch64_indirect_call_asm (rtx); - extern bool aarch64_harden_sls_retbr_p (void); - extern bool aarch64_harden_sls_blr_p (void); - -diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c -index 2389d49..0f7bba3 100644 ---- a/gcc/config/aarch64/aarch64.c -+++ b/gcc/config/aarch64/aarch64.c -@@ -10605,6 +10605,9 @@ aarch64_label_mentioned_p (rtx x) - enum reg_class - aarch64_regno_regclass (unsigned regno) - { -+ if (STUB_REGNUM_P (regno)) -+ return STUB_REGS; -+ - if (GP_REGNUM_P (regno)) - return GENERAL_REGS; - -@@ -10939,6 +10942,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode) - unsigned int nregs, vec_flags; - switch (regclass) - { -+ case STUB_REGS: - case TAILCALL_ADDR_REGS: - case POINTER_REGS: - case GENERAL_REGS: -@@ -13155,10 +13159,12 @@ aarch64_register_move_cost (machine_mode mode, - = aarch64_tune_params.regmove_cost; - - /* Caller save and pointer regs are equivalent to GENERAL_REGS. */ -- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS) -+ if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS -+ || to == STUB_REGS) - to = GENERAL_REGS; - -- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS) -+ if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS -+ || from == STUB_REGS) - from = GENERAL_REGS; - - /* Make RDFFR very expensive. In particular, if we know that the FFR -@@ -22957,6 +22963,215 @@ aarch64_sls_barrier (int mitigation_required) - : ""; - } - -+static GTY (()) tree aarch64_sls_shared_thunks[30]; -+static GTY (()) bool aarch64_sls_shared_thunks_needed = false; -+const char *indirect_symbol_names[30] = { -+ "__call_indirect_x0", -+ "__call_indirect_x1", -+ "__call_indirect_x2", -+ "__call_indirect_x3", -+ "__call_indirect_x4", -+ "__call_indirect_x5", -+ "__call_indirect_x6", -+ "__call_indirect_x7", -+ "__call_indirect_x8", -+ "__call_indirect_x9", -+ "__call_indirect_x10", -+ "__call_indirect_x11", -+ "__call_indirect_x12", -+ "__call_indirect_x13", -+ "__call_indirect_x14", -+ "__call_indirect_x15", -+ "", /* "__call_indirect_x16", */ -+ "", /* "__call_indirect_x17", */ -+ "__call_indirect_x18", -+ "__call_indirect_x19", -+ "__call_indirect_x20", -+ "__call_indirect_x21", -+ "__call_indirect_x22", -+ "__call_indirect_x23", -+ "__call_indirect_x24", -+ "__call_indirect_x25", -+ "__call_indirect_x26", -+ "__call_indirect_x27", -+ "__call_indirect_x28", -+ "__call_indirect_x29", -+}; -+ -+/* Function to create a BLR thunk. This thunk is used to mitigate straight -+ line speculation. Instead of a simple BLR that can be speculated past, -+ we emit a BL to this thunk, and this thunk contains a BR to the relevant -+ register. These thunks have the relevant speculation barries put after -+ their indirect branch so that speculation is blocked. -+ -+ We use such a thunk so the speculation barriers are kept off the -+ architecturally executed path in order to reduce the performance overhead. -+ -+ When optimizing for size we use stubs shared by the linked object. -+ When optimizing for performance we emit stubs for each function in the hope -+ that the branch predictor can better train on jumps specific for a given -+ function. */ -+rtx -+aarch64_sls_create_blr_label (int regnum) -+{ -+ gcc_assert (STUB_REGNUM_P (regnum)); -+ if (optimize_function_for_size_p (cfun)) -+ { -+ /* For the thunks shared between different functions in this compilation -+ unit we use a named symbol -- this is just for users to more easily -+ understand the generated assembly. */ -+ aarch64_sls_shared_thunks_needed = true; -+ const char *thunk_name = indirect_symbol_names[regnum]; -+ if (aarch64_sls_shared_thunks[regnum] == NULL) -+ { -+ /* Build a decl representing this function stub and record it for -+ later. We build a decl here so we can use the GCC machinery for -+ handling sections automatically (through `get_named_section` and -+ `make_decl_one_only`). That saves us a lot of trouble handling -+ the specifics of different output file formats. */ -+ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, -+ get_identifier (thunk_name), -+ build_function_type_list (void_type_node, -+ NULL_TREE)); -+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL, -+ NULL_TREE, void_type_node); -+ TREE_PUBLIC (decl) = 1; -+ TREE_STATIC (decl) = 1; -+ DECL_IGNORED_P (decl) = 1; -+ DECL_ARTIFICIAL (decl) = 1; -+ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl)); -+ resolve_unique_section (decl, 0, false); -+ aarch64_sls_shared_thunks[regnum] = decl; -+ } -+ -+ return gen_rtx_SYMBOL_REF (Pmode, thunk_name); -+ } -+ -+ if (cfun->machine->call_via[regnum] == NULL) -+ cfun->machine->call_via[regnum] -+ = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ()); -+ return cfun->machine->call_via[regnum]; -+} -+ -+/* Helper function for aarch64_sls_emit_blr_function_thunks and -+ aarch64_sls_emit_shared_blr_thunks below. */ -+static void -+aarch64_sls_emit_function_stub (FILE *out_file, int regnum) -+{ -+ /* Save in x16 and branch to that function so this transformation does -+ not prevent jumping to `BTI c` instructions. */ -+ asm_fprintf (out_file, "\tmov\tx16, x%d\n", regnum); -+ asm_fprintf (out_file, "\tbr\tx16\n"); -+} -+ -+/* Emit all BLR stubs for this particular function. -+ Here we emit all the BLR stubs needed for the current function. Since we -+ emit these stubs in a consecutive block we know there will be no speculation -+ gadgets between each stub, and hence we only emit a speculation barrier at -+ the end of the stub sequences. -+ -+ This is called in the TARGET_ASM_FUNCTION_EPILOGUE hook. */ -+void -+aarch64_sls_emit_blr_function_thunks (FILE *out_file) -+{ -+ if (! aarch64_harden_sls_blr_p ()) -+ return; -+ -+ bool any_functions_emitted = false; -+ /* We must save and restore the current function section since this assembly -+ is emitted at the end of the function. This means it can be emitted *just -+ after* the cold section of a function. That cold part would be emitted in -+ a different section. That switch would trigger a `.cfi_endproc` directive -+ to be emitted in the original section and a `.cfi_startproc` directive to -+ be emitted in the new section. Switching to the original section without -+ restoring would mean that the `.cfi_endproc` emitted as a function ends -+ would happen in a different section -- leaving an unmatched -+ `.cfi_startproc` in the cold text section and an unmatched `.cfi_endproc` -+ in the standard text section. */ -+ section *save_text_section = in_section; -+ switch_to_section (function_section (current_function_decl)); -+ for (int regnum = 0; regnum < 30; ++regnum) -+ { -+ rtx specu_label = cfun->machine->call_via[regnum]; -+ if (specu_label == NULL) -+ continue; -+ -+ targetm.asm_out.print_operand (out_file, specu_label, 0); -+ asm_fprintf (out_file, ":\n"); -+ aarch64_sls_emit_function_stub (out_file, regnum); -+ any_functions_emitted = true; -+ } -+ if (any_functions_emitted) -+ /* Can use the SB if needs be here, since this stub will only be used -+ by the current function, and hence for the current target. */ -+ asm_fprintf (out_file, "\t%s\n", aarch64_sls_barrier (true)); -+ switch_to_section (save_text_section); -+} -+ -+/* Emit shared BLR stubs for the current compilation unit. -+ Over the course of compiling this unit we may have converted some BLR -+ instructions to a BL to a shared stub function. This is where we emit those -+ stub functions. -+ This function is for the stubs shared between different functions in this -+ compilation unit. We share when optimizing for size instead of speed. -+ -+ This function is called through the TARGET_ASM_FILE_END hook. */ -+void -+aarch64_sls_emit_shared_blr_thunks (FILE *out_file) -+{ -+ if (! aarch64_sls_shared_thunks_needed) -+ return; -+ -+ for (int regnum = 0; regnum < 30; ++regnum) -+ { -+ tree decl = aarch64_sls_shared_thunks[regnum]; -+ if (!decl) -+ continue; -+ -+ const char *name = indirect_symbol_names[regnum]; -+ switch_to_section (get_named_section (decl, NULL, 0)); -+ ASM_OUTPUT_ALIGN (out_file, 2); -+ targetm.asm_out.globalize_label (out_file, name); -+ /* Only emits if the compiler is configured for an assembler that can -+ handle visibility directives. */ -+ targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN); -+ ASM_OUTPUT_TYPE_DIRECTIVE (out_file, name, "function"); -+ ASM_OUTPUT_LABEL (out_file, name); -+ aarch64_sls_emit_function_stub (out_file, regnum); -+ /* Use the most conservative target to ensure it can always be used by any -+ function in the translation unit. */ -+ asm_fprintf (out_file, "\tdsb\tsy\n\tisb\n"); -+ ASM_DECLARE_FUNCTION_SIZE (out_file, name, decl); -+ } -+} -+ -+/* Implement TARGET_ASM_FILE_END. */ -+void -+aarch64_asm_file_end () -+{ -+ aarch64_sls_emit_shared_blr_thunks (asm_out_file); -+ /* Since this function will be called for the ASM_FILE_END hook, we ensure -+ that what would be called otherwise (e.g. `file_end_indicate_exec_stack` -+ for FreeBSD) still gets called. */ -+#ifdef TARGET_ASM_FILE_END -+ TARGET_ASM_FILE_END (); -+#endif -+} -+ -+const char * -+aarch64_indirect_call_asm (rtx addr) -+{ -+ gcc_assert (REG_P (addr)); -+ if (aarch64_harden_sls_blr_p ()) -+ { -+ rtx stub_label = aarch64_sls_create_blr_label (REGNO (addr)); -+ output_asm_insn ("bl\t%0", &stub_label); -+ } -+ else -+ output_asm_insn ("blr\t%0", &addr); -+ return ""; -+} -+ - /* Target-specific selftests. */ - - #if CHECKING_P -@@ -23507,6 +23722,12 @@ aarch64_libgcc_floating_mode_supported_p - #undef TARGET_MD_ASM_ADJUST - #define TARGET_MD_ASM_ADJUST arm_md_asm_adjust - -+#undef TARGET_ASM_FILE_END -+#define TARGET_ASM_FILE_END aarch64_asm_file_end -+ -+#undef TARGET_ASM_FUNCTION_EPILOGUE -+#define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks -+ - struct gcc_target targetm = TARGET_INITIALIZER; - - #include "gt-aarch64.h" -diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h -index 8e0fc37..7331450 100644 ---- a/gcc/config/aarch64/aarch64.h -+++ b/gcc/config/aarch64/aarch64.h -@@ -643,6 +643,16 @@ extern unsigned aarch64_architecture_version; - #define GP_REGNUM_P(REGNO) \ - (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM)) - -+/* Registers known to be preserved over a BL instruction. This consists of the -+ GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the -+ BL instruction itself, while the x16 and x17 registers may be used by -+ veneers which can be inserted by the linker. */ -+#define STUB_REGNUM_P(REGNO) \ -+ (GP_REGNUM_P (REGNO) \ -+ && (REGNO) != R16_REGNUM \ -+ && (REGNO) != R17_REGNUM \ -+ && (REGNO) != R30_REGNUM) \ -+ - #define FP_REGNUM_P(REGNO) \ - (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM)) - -@@ -667,6 +677,7 @@ enum reg_class - { - NO_REGS, - TAILCALL_ADDR_REGS, -+ STUB_REGS, - GENERAL_REGS, - STACK_REG, - POINTER_REGS, -@@ -689,6 +700,7 @@ enum reg_class - { \ - "NO_REGS", \ - "TAILCALL_ADDR_REGS", \ -+ "STUB_REGS", \ - "GENERAL_REGS", \ - "STACK_REG", \ - "POINTER_REGS", \ -@@ -708,6 +720,7 @@ enum reg_class - { \ - { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ - { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\ -+ { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \ - { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \ - { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ - { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \ -@@ -862,6 +875,8 @@ typedef struct GTY (()) machine_function - struct aarch64_frame frame; - /* One entry for each hard register. */ - bool reg_is_wrapped_separately[LAST_SAVED_REGNUM]; -+ /* One entry for each general purpose register. */ -+ rtx call_via[SP_REGNUM]; - bool label_is_assembled; - } machine_function; - #endif -diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md -index dda04ee..43da754 100644 ---- a/gcc/config/aarch64/aarch64.md -+++ b/gcc/config/aarch64/aarch64.md -@@ -1022,16 +1022,15 @@ - ) - - (define_insn "*call_insn" -- [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "r, Usf")) -+ [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf")) - (match_operand 1 "" "")) - (unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI) - (clobber (reg:DI LR_REGNUM))] - "" - "@ -- blr\\t%0 -+ * return aarch64_indirect_call_asm (operands[0]); - bl\\t%c0" -- [(set_attr "type" "call, call")] --) -+ [(set_attr "type" "call, call")]) - - (define_expand "call_value" - [(parallel -@@ -1050,13 +1049,13 @@ - - (define_insn "*call_value_insn" - [(set (match_operand 0 "" "") -- (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "r, Usf")) -+ (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf")) - (match_operand 2 "" ""))) - (unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI) - (clobber (reg:DI LR_REGNUM))] - "" - "@ -- blr\\t%1 -+ * return aarch64_indirect_call_asm (operands[1]); - bl\\t%c1" - [(set_attr "type" "call, call")] - ) -diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md -index d993268..8cc6f50 100644 ---- a/gcc/config/aarch64/constraints.md -+++ b/gcc/config/aarch64/constraints.md -@@ -24,6 +24,15 @@ - (define_register_constraint "Ucs" "TAILCALL_ADDR_REGS" - "@internal Registers suitable for an indirect tail call") - -+(define_register_constraint "Ucr" -+ "aarch64_harden_sls_blr_p () ? STUB_REGS : GENERAL_REGS" -+ "@internal Registers to be used for an indirect call. -+ This is usually the general registers, but when we are hardening against -+ Straight Line Speculation we disallow x16, x17, and x30 so we can use -+ indirection stubs. These indirection stubs cannot use the above registers -+ since they will be reached by a BL that may have to go through a linker -+ veneer.") -+ - (define_register_constraint "w" "FP_REGS" - "Floating point and SIMD vector registers.") - -diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md -index 215fcec..1754b1e 100644 ---- a/gcc/config/aarch64/predicates.md -+++ b/gcc/config/aarch64/predicates.md -@@ -32,7 +32,8 @@ - - (define_predicate "aarch64_general_reg" - (and (match_operand 0 "register_operand") -- (match_test "REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS"))) -+ (match_test "REGNO_REG_CLASS (REGNO (op)) == STUB_REGS -+ || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS"))) - - ;; Return true if OP a (const_int 0) operand. - (define_predicate "const0_operand" -diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c -new file mode 100644 -index 0000000..b1fb754 ---- /dev/null -+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c -@@ -0,0 +1,40 @@ -+/* { dg-do compile } */ -+/* { dg-additional-options "-mharden-sls=blr -mbranch-protection=bti" } */ -+/* -+ Ensure that the SLS hardening of BLR leaves no BLR instructions. -+ Here we also check that there are no BR instructions with anything except an -+ x16 or x17 register. This is because a `BTI c` instruction can be branched -+ to using a BLR instruction using any register, but can only be branched to -+ with a BR using an x16 or x17 register. -+ */ -+typedef int (foo) (int, int); -+typedef void (bar) (int, int); -+struct sls_testclass { -+ foo *x; -+ bar *y; -+ int left; -+ int right; -+}; -+ -+/* We test both RTL patterns for a call which returns a value and a call which -+ does not. */ -+int blr_call_value (struct sls_testclass x) -+{ -+ int retval = x.x(x.left, x.right); -+ if (retval % 10) -+ return 100; -+ return 9; -+} -+ -+int blr_call (struct sls_testclass x) -+{ -+ x.y(x.left, x.right); -+ if (x.left % 10) -+ return 100; -+ return 9; -+} -+ -+/* { dg-final { scan-assembler-not {\tblr\t} } } */ -+/* { dg-final { scan-assembler-not {\tbr\tx(?!16|17)} } } */ -+/* { dg-final { scan-assembler {\tbr\tx(16|17)} } } */ -+ -diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c -new file mode 100644 -index 0000000..88bafff ---- /dev/null -+++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c -@@ -0,0 +1,33 @@ -+/* { dg-additional-options "-mharden-sls=blr -save-temps" } */ -+/* Ensure that the SLS hardening of BLR leaves no BLR instructions. -+ We only test that all BLR instructions have been removed, not that the -+ resulting code makes sense. */ -+typedef int (foo) (int, int); -+typedef void (bar) (int, int); -+struct sls_testclass { -+ foo *x; -+ bar *y; -+ int left; -+ int right; -+}; -+ -+/* We test both RTL patterns for a call which returns a value and a call which -+ does not. */ -+int blr_call_value (struct sls_testclass x) -+{ -+ int retval = x.x(x.left, x.right); -+ if (retval % 10) -+ return 100; -+ return 9; -+} -+ -+int blr_call (struct sls_testclass x) -+{ -+ x.y(x.left, x.right); -+ if (x.left % 10) -+ return 100; -+ return 9; -+} -+ -+/* { dg-final { scan-assembler-not {\tblr\t} } } */ -+/* { dg-final { scan-assembler {\tbr\tx[0-9][0-9]?} } } */ --- -2.7.4 - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch deleted file mode 100644 index 27237feb5..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch +++ /dev/null @@ -1,70 +0,0 @@ -From 6e3395c0bc933bdc3242d1dead4896d0aa4e11a8 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:08:31 +0400 -Subject: [PATCH] gcc-4.3.3: SYSROOT_CFLAGS_FOR_TARGET - -Before committing, I noticed that PR/32161 was marked as a dup of PR/32009, but my previous patch did not fix it. - -This alternative patch is better because it lets you just use CFLAGS_FOR_TARGET to set the compilation flags for libgcc. Since bootstrapped target libraries are never compiled with the native compiler, it makes little sense to use different flags for stage1 and later stages. And it also makes little sense to use a different variable than CFLAGS_FOR_TARGET. - -Other changes I had to do include: - -- moving the creation of default CFLAGS_FOR_TARGET from Makefile.am to configure.ac, because otherwise the BOOT_CFLAGS are substituted into CFLAGS_FOR_TARGET (which is "-O2 -g $(CFLAGS)") via $(CFLAGS). It is also cleaner this way though. - -- passing the right CFLAGS to configure scripts as exported environment variables - -I also stopped passing LIBCFLAGS to configure scripts since they are unused in the whole src tree. And I updated the documentation as H-P reminded me to do. - -Bootstrapped/regtested i686-pc-linux-gnu, will commit to 4.4 shortly. Ok for 4.3? - -Signed-off-by: Paolo Bonzini -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - configure | 32 ++++++++++++++++++++++++++++++++ - 1 file changed, 32 insertions(+) - -diff --git a/configure b/configure -index 226a64939d1..b31dc137fc9 100755 ---- a/configure -+++ b/configure -@@ -6971,6 +6971,38 @@ fi - - - -+# During gcc bootstrap, if we use some random cc for stage1 then CFLAGS -+# might be empty or "-g". We don't require a C++ compiler, so CXXFLAGS -+# might also be empty (or "-g", if a non-GCC C++ compiler is in the path). -+# We want to ensure that TARGET libraries (which we know are built with -+# gcc) are built with "-O2 -g", so include those options when setting -+# CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET. -+if test "x$CFLAGS_FOR_TARGET" = x; then -+ CFLAGS_FOR_TARGET=$CFLAGS -+ case " $CFLAGS " in -+ *" -O2 "*) ;; -+ *) CFLAGS_FOR_TARGET="-O2 $CFLAGS" ;; -+ esac -+ case " $CFLAGS " in -+ *" -g "* | *" -g3 "*) ;; -+ *) CFLAGS_FOR_TARGET="-g $CFLAGS" ;; -+ esac -+fi -+ -+ -+if test "x$CXXFLAGS_FOR_TARGET" = x; then -+ CXXFLAGS_FOR_TARGET=$CXXFLAGS -+ case " $CXXFLAGS " in -+ *" -O2 "*) ;; -+ *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS" ;; -+ esac -+ case " $CXXFLAGS " in -+ *" -g "* | *" -g3 "*) ;; -+ *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS" ;; -+ esac -+fi -+ -+ - # Handle --with-headers=XXX. If the value is not "yes", the contents of - # the named directory are copied to $(tooldir)/sys-include. - if test x"${with_headers}" != x && test x"${with_headers}" != xno ; then diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0004-64-bit-multilib-hack.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0004-64-bit-multilib-hack.patch deleted file mode 100644 index 7c751bef6..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0004-64-bit-multilib-hack.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 85a7c5aeb82ed61e6ef6d8e061b9da9e6a4a652c Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:10:06 +0400 -Subject: [PATCH] 64-bit multilib hack. - -GCC has internal multilib handling code but it assumes a very specific rigid directory -layout. The build system implementation of multilib layout is very generic and allows -complete customisation of the library directories. - -This patch is a partial solution to allow any custom directories to be passed into gcc -and handled correctly. It forces gcc to use the base_libdir (which is the current -directory, "."). We need to do this for each multilib that is configured as we don't -know which compiler options may be being passed into the compiler. Since we have a compiler -per mulitlib at this point that isn't an issue. - -The one problem is the target compiler is only going to work for the default multlilib at -this point. Ideally we'd figure out which multilibs were being enabled with which paths -and be able to patch these entries with a complete set of correct paths but this we -don't have such code at this point. This is something the target gcc recipe should do -and override these platform defaults in its build config. - -Do same for riscv64 and aarch64 - -RP 15/8/11 - -Upstream-Status: Inappropriate[OE-Specific] - -Signed-off-by: Khem Raj -Signed-off-by: Elvis Dowson -Signed-off-by: Mark Hatle ---- - gcc/config/aarch64/t-aarch64-linux | 8 ++++---- - gcc/config/i386/t-linux64 | 6 ++---- - gcc/config/mips/t-linux64 | 10 +++------- - gcc/config/riscv/t-linux | 6 ++++-- - gcc/config/rs6000/t-linux64 | 5 ++--- - 5 files changed, 15 insertions(+), 20 deletions(-) - -diff --git a/gcc/config/aarch64/t-aarch64-linux b/gcc/config/aarch64/t-aarch64-linux -index 83e59e33b85..b1356be1fb4 100644 ---- a/gcc/config/aarch64/t-aarch64-linux -+++ b/gcc/config/aarch64/t-aarch64-linux -@@ -21,8 +21,8 @@ - LIB1ASMSRC = aarch64/lib1funcs.asm - LIB1ASMFUNCS = _aarch64_sync_cache_range - --AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be) --MULTILIB_OSDIRNAMES = mabi.lp64=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu) --MULTIARCH_DIRNAME = $(call if_multiarch,aarch64$(AARCH_BE)-linux-gnu) -+#AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be) -+#MULTILIB_OSDIRNAMES = mabi.lp64=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu) -+#MULTIARCH_DIRNAME = $(call if_multiarch,aarch64$(AARCH_BE)-linux-gnu) - --MULTILIB_OSDIRNAMES += mabi.ilp32=../libilp32$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu_ilp32) -+#MULTILIB_OSDIRNAMES += mabi.ilp32=../libilp32$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu_ilp32) -diff --git a/gcc/config/i386/t-linux64 b/gcc/config/i386/t-linux64 -index 1171e218578..5e057b7e5db 100644 ---- a/gcc/config/i386/t-linux64 -+++ b/gcc/config/i386/t-linux64 -@@ -32,7 +32,5 @@ - # - comma=, - MULTILIB_OPTIONS = $(subst $(comma),/,$(TM_MULTILIB_CONFIG)) --MULTILIB_DIRNAMES = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS))) --MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-linux-gnu) --MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-linux-gnu) --MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-linux-gnux32) -+MULTILIB_DIRNAMES = . . -+MULTILIB_OSDIRNAMES = ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) -diff --git a/gcc/config/mips/t-linux64 b/gcc/config/mips/t-linux64 -index ceb58d3b5f3..43fe2bf28ab 100644 ---- a/gcc/config/mips/t-linux64 -+++ b/gcc/config/mips/t-linux64 -@@ -17,10 +17,6 @@ - # . - - MULTILIB_OPTIONS = mabi=n32/mabi=32/mabi=64 --MULTILIB_DIRNAMES = n32 32 64 --MIPS_EL = $(if $(filter %el, $(firstword $(subst -, ,$(target)))),el) --MIPS_SOFT = $(if $(strip $(filter MASK_SOFT_FLOAT_ABI, $(target_cpu_default)) $(filter soft, $(with_float))),soft) --MULTILIB_OSDIRNAMES = \ -- ../lib32$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabin32$(MIPS_SOFT)) \ -- ../lib$(call if_multiarch,:mips$(MIPS_EL)-linux-gnu$(MIPS_SOFT)) \ -- ../lib64$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabi64$(MIPS_SOFT)) -+MULTILIB_DIRNAMES = . . . -+MULTILIB_OSDIRNAMES = ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) -+ -diff --git a/gcc/config/riscv/t-linux b/gcc/config/riscv/t-linux -index 216d2776a18..e4d817621fc 100644 ---- a/gcc/config/riscv/t-linux -+++ b/gcc/config/riscv/t-linux -@@ -1,3 +1,5 @@ - # Only XLEN and ABI affect Linux multilib dir names, e.g. /lib32/ilp32d/ --MULTILIB_DIRNAMES := $(patsubst rv32%,lib32,$(patsubst rv64%,lib64,$(MULTILIB_DIRNAMES))) --MULTILIB_OSDIRNAMES := $(patsubst lib%,../lib%,$(MULTILIB_DIRNAMES)) -+#MULTILIB_DIRNAMES := $(patsubst rv32%,lib32,$(patsubst rv64%,lib64,$(MULTILIB_DIRNAMES))) -+MULTILIB_DIRNAMES := . . -+#MULTILIB_OSDIRNAMES := $(patsubst lib%,../lib%,$(MULTILIB_DIRNAMES)) -+MULTILIB_OSDIRNAMES := ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) -diff --git a/gcc/config/rs6000/t-linux64 b/gcc/config/rs6000/t-linux64 -index 264a7e27524..dc9d440f66b 100644 ---- a/gcc/config/rs6000/t-linux64 -+++ b/gcc/config/rs6000/t-linux64 -@@ -26,10 +26,9 @@ - # MULTILIB_OSDIRNAMES according to what is found on the target. - - MULTILIB_OPTIONS := m64/m32 --MULTILIB_DIRNAMES := 64 32 -+MULTILIB_DIRNAMES := . . - MULTILIB_EXTRA_OPTS := --MULTILIB_OSDIRNAMES := m64=../lib64$(call if_multiarch,:powerpc64-linux-gnu) --MULTILIB_OSDIRNAMES += m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:powerpc-linux-gnu) -+MULTILIB_OSDIRNAMES := ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) - - rs6000-linux.o: $(srcdir)/config/rs6000/rs6000-linux.c - $(COMPILE) $< diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0005-optional-libstdc.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0005-optional-libstdc.patch deleted file mode 100644 index 4020c9e3c..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0005-optional-libstdc.patch +++ /dev/null @@ -1,122 +0,0 @@ -From 6ddfb0bfcd1eea71acd37ab06f7a4510b9f1d12b Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:12:56 +0400 -Subject: [PATCH] optional libstdc - -gcc-runtime builds libstdc++ separately from gcc-cross-*. Its configure tests using g++ -will not run correctly since by default the linker will try to link against libstdc++ -which shouldn't exist yet. We need an option to disable -lstdc++ -option whilst leaving -lc, -lgcc and other automatic library dependencies added by gcc -driver. This patch adds such an option which only disables the -lstdc++. - -A "standard" gcc build uses xgcc and hence avoids this. We should ask upstream how to -do this officially, the likely answer is don't build libstdc++ separately. - -RP 29/6/10 - -Signed-off-by: Khem Raj - -Upstream-Status: Inappropriate [embedded specific] ---- - gcc/c-family/c.opt | 4 ++++ - gcc/cp/g++spec.c | 1 + - gcc/doc/invoke.texi | 32 +++++++++++++++++++++++++++++++- - gcc/gcc.c | 1 + - 4 files changed, 37 insertions(+), 1 deletion(-) - -diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt -index c49da99d395..35f712e2c84 100644 ---- a/gcc/c-family/c.opt -+++ b/gcc/c-family/c.opt -@@ -2025,6 +2025,10 @@ nostdinc++ - C++ ObjC++ - Do not search standard system include directories for C++. - -+nostdlib++ -+Driver -+Do not link standard C++ runtime library -+ - o - C ObjC C++ ObjC++ Joined Separate - ; Documented in common.opt -diff --git a/gcc/cp/g++spec.c b/gcc/cp/g++spec.c -index 0ab63bcd211..7b081e9e4f0 100644 ---- a/gcc/cp/g++spec.c -+++ b/gcc/cp/g++spec.c -@@ -137,6 +137,7 @@ lang_specific_driver (struct cl_decoded_option **in_decoded_options, - switch (decoded_options[i].opt_index) - { - case OPT_nostdlib: -+ case OPT_nostdlib__: - case OPT_nodefaultlibs: - library = -1; - break; -diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi -index f12d8d12150..cf6cb428e7d 100644 ---- a/gcc/doc/invoke.texi -+++ b/gcc/doc/invoke.texi -@@ -230,6 +230,9 @@ in the following sections. - -fno-weak -nostdinc++ @gol - -fvisibility-inlines-hidden @gol - -fvisibility-ms-compat @gol -+-fvtable-verify=@r{[}std@r{|}preinit@r{|}none@r{]} @gol -+-fvtv-counts -fvtv-debug @gol -+-nostdlib++ @gol - -fext-numeric-literals @gol - -Wabi-tag -Wcatch-value -Wcatch-value=@var{n} @gol - -Wno-class-conversion -Wclass-memaccess @gol -@@ -599,7 +602,7 @@ Objective-C and Objective-C++ Dialects}. - -pie -pthread -r -rdynamic @gol - -s -static -static-pie -static-libgcc -static-libstdc++ @gol - -static-libasan -static-libtsan -static-liblsan -static-libubsan @gol ---shared -shared-libgcc -symbolic @gol -+-shared -shared-libgcc -symbolic -nostdlib++ @gol - -T @var{script} -Wl,@var{option} -Xlinker @var{option} @gol - -u @var{symbol} -z @var{keyword}} - -@@ -14407,6 +14410,33 @@ Specify that the program entry point is @var{entry}. The argument is - interpreted by the linker; the GNU linker accepts either a symbol name - or an address. - -+@item -nostdlib++ -+@opindex nostdlib++ -+Do not use the standard system C++ runtime libraries when linking. -+Only the libraries you specify will be passed to the linker. -+ -+@cindex @option{-lgcc}, use with @option{-nostdlib} -+@cindex @option{-nostdlib} and unresolved references -+@cindex unresolved references and @option{-nostdlib} -+@cindex @option{-lgcc}, use with @option{-nodefaultlibs} -+@cindex @option{-nodefaultlibs} and unresolved references -+@cindex unresolved references and @option{-nodefaultlibs} -+One of the standard libraries bypassed by @option{-nostdlib} and -+@option{-nodefaultlibs} is @file{libgcc.a}, a library of internal subroutines -+which GCC uses to overcome shortcomings of particular machines, or special -+needs for some languages. -+(@xref{Interface,,Interfacing to GCC Output,gccint,GNU Compiler -+Collection (GCC) Internals}, -+for more discussion of @file{libgcc.a}.) -+In most cases, you need @file{libgcc.a} even when you want to avoid -+other standard libraries. In other words, when you specify @option{-nostdlib} -+or @option{-nodefaultlibs} you should usually specify @option{-lgcc} as well. -+This ensures that you have no unresolved references to internal GCC -+library subroutines. -+(An example of such an internal subroutine is @code{__main}, used to ensure C++ -+constructors are called; @pxref{Collect2,,@code{collect2}, gccint, -+GNU Compiler Collection (GCC) Internals}.) -+ - @item -pie - @opindex pie - Produce a dynamically linked position independent executable on targets -diff --git a/gcc/gcc.c b/gcc/gcc.c -index b2200c5185a..f8be58ce0a6 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -1051,6 +1051,7 @@ proper position among the other output files. */ - %(mflib) " STACK_SPLIT_SPEC "\ - %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} " SANITIZER_SPEC " \ - %{!nostdlib:%{!r:%{!nodefaultlibs:%(link_ssp) %(link_gcc_c_sequence)}}}\ -+ %{!nostdlib++:}\ - %{!nostdlib:%{!r:%{!nostartfiles:%E}}} %{T*} \n%(post_link) }}}}}}" - #endif - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0006-COLLECT_GCC_OPTIONS.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0006-COLLECT_GCC_OPTIONS.patch deleted file mode 100644 index 9fbbe8070..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0006-COLLECT_GCC_OPTIONS.patch +++ /dev/null @@ -1,35 +0,0 @@ -From a6c90d3a9c5010b4aa7cc30467cf81ca7e0f430e Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:16:28 +0400 -Subject: [PATCH] COLLECT_GCC_OPTIONS - -This patch adds --sysroot into COLLECT_GCC_OPTIONS which is used to -invoke collect2. - -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - gcc/gcc.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/gcc/gcc.c b/gcc/gcc.c -index f8be58ce0a6..48b0f9dde81 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -4806,6 +4806,15 @@ set_collect_gcc_options (void) - sizeof ("COLLECT_GCC_OPTIONS=") - 1); - - first_time = TRUE; -+#ifdef HAVE_LD_SYSROOT -+ if (target_system_root_changed && target_system_root) -+ { -+ obstack_grow (&collect_obstack, "'--sysroot=", sizeof("'--sysroot=")-1); -+ obstack_grow (&collect_obstack, target_system_root,strlen(target_system_root)); -+ obstack_grow (&collect_obstack, "'", 1); -+ first_time = FALSE; -+ } -+#endif - for (i = 0; (int) i < n_switches; i++) - { - const char *const *args; diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch deleted file mode 100644 index a764bdd0f..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch +++ /dev/null @@ -1,92 +0,0 @@ -From 5670d4489f119d2da661734895ac0be99b606d1b Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:17:25 +0400 -Subject: [PATCH] Use the defaults.h in ${B} instead of ${S}, and t-oe in ${B} - -Use the defaults.h in ${B} instead of ${S}, and t-oe in ${B}, so that -the source can be shared between gcc-cross-initial, -gcc-cross-intermediate, gcc-cross, gcc-runtime, and also the sdk build. - -Signed-off-by: Khem Raj - -Upstream-Status: Pending - -While compiling gcc-crosssdk-initial-x86_64 on some host, there is -occasionally failure that test the existance of default.h doesn't -work, the reason is tm_include_list='** defaults.h' rather than -tm_include_list='** ./defaults.h' - -So we add the test condition for this situation. -Signed-off-by: Hongxu Jia ---- - gcc/Makefile.in | 2 +- - gcc/configure | 4 ++-- - gcc/configure.ac | 4 ++-- - gcc/mkconfig.sh | 4 ++-- - 4 files changed, 7 insertions(+), 7 deletions(-) - -diff --git a/gcc/Makefile.in b/gcc/Makefile.in -index 543b477ff18..a67d2cc18d6 100644 ---- a/gcc/Makefile.in -+++ b/gcc/Makefile.in -@@ -540,7 +540,7 @@ TARGET_SYSTEM_ROOT = @TARGET_SYSTEM_ROOT@ - TARGET_SYSTEM_ROOT_DEFINE = @TARGET_SYSTEM_ROOT_DEFINE@ - - xmake_file=@xmake_file@ --tmake_file=@tmake_file@ -+tmake_file=@tmake_file@ ./t-oe - TM_ENDIAN_CONFIG=@TM_ENDIAN_CONFIG@ - TM_MULTILIB_CONFIG=@TM_MULTILIB_CONFIG@ - TM_MULTILIB_EXCEPTIONS_CONFIG=@TM_MULTILIB_EXCEPTIONS_CONFIG@ -diff --git a/gcc/configure b/gcc/configure -index 8de766a942c..b26e8fc7fee 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -12705,8 +12705,8 @@ for f in $tm_file; do - tm_include_list="${tm_include_list} $f" - ;; - defaults.h ) -- tm_file_list="${tm_file_list} \$(srcdir)/$f" -- tm_include_list="${tm_include_list} $f" -+ tm_file_list="${tm_file_list} ./$f" -+ tm_include_list="${tm_include_list} ./$f" - ;; - * ) - tm_file_list="${tm_file_list} \$(srcdir)/config/$f" -diff --git a/gcc/configure.ac b/gcc/configure.ac -index 8bfd6feb780..26fa46802c7 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -2138,8 +2138,8 @@ for f in $tm_file; do - tm_include_list="${tm_include_list} $f" - ;; - defaults.h ) -- tm_file_list="${tm_file_list} \$(srcdir)/$f" -- tm_include_list="${tm_include_list} $f" -+ tm_file_list="${tm_file_list} ./$f" -+ tm_include_list="${tm_include_list} ./$f" - ;; - * ) - tm_file_list="${tm_file_list} \$(srcdir)/config/$f" -diff --git a/gcc/mkconfig.sh b/gcc/mkconfig.sh -index d2c677a4a42..d03852481cb 100644 ---- a/gcc/mkconfig.sh -+++ b/gcc/mkconfig.sh -@@ -77,7 +77,7 @@ if [ -n "$HEADERS" ]; then - if [ $# -ge 1 ]; then - echo '#ifdef IN_GCC' >> ${output}T - for file in "$@"; do -- if test x"$file" = x"defaults.h"; then -+ if test x"$file" = x"./defaults.h" -o x"$file" = x"defaults.h"; then - postpone_defaults_h="yes" - else - echo "# include \"$file\"" >> ${output}T -@@ -106,7 +106,7 @@ esac - - # If we postponed including defaults.h, add the #include now. - if test x"$postpone_defaults_h" = x"yes"; then -- echo "# include \"defaults.h\"" >> ${output}T -+ echo "# include \"./defaults.h\"" >> ${output}T - fi - - # Add multiple inclusion protection guard, part two. diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0008-fortran-cross-compile-hack.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0008-fortran-cross-compile-hack.patch deleted file mode 100644 index 714db3bef..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0008-fortran-cross-compile-hack.patch +++ /dev/null @@ -1,43 +0,0 @@ -From f05062625e7a4751be723595a2f7a4b7fbeff311 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:20:01 +0400 -Subject: [PATCH] fortran cross-compile hack. - -* Fortran would have searched for arm-angstrom-gnueabi-gfortran but would have used -used gfortan. For gcc_4.2.2.bb we want to use the gfortran compiler from our cross -directory. - -Signed-off-by: Khem Raj - -Upstream-Status: Inappropriate [embedded specific] ---- - libgfortran/configure | 2 +- - libgfortran/configure.ac | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/libgfortran/configure b/libgfortran/configure -index b4cf854ddb3..e8e0ac3b1cf 100755 ---- a/libgfortran/configure -+++ b/libgfortran/configure -@@ -13090,7 +13090,7 @@ esac - - # We need gfortran to compile parts of the library - #AC_PROG_FC(gfortran) --FC="$GFORTRAN" -+#FC="$GFORTRAN" - ac_ext=${ac_fc_srcext-f} - ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' - ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' -diff --git a/libgfortran/configure.ac b/libgfortran/configure.ac -index 711dc60ff78..3c9bbfbf47d 100644 ---- a/libgfortran/configure.ac -+++ b/libgfortran/configure.ac -@@ -258,7 +258,7 @@ AC_SUBST(enable_static) - - # We need gfortran to compile parts of the library - #AC_PROG_FC(gfortran) --FC="$GFORTRAN" -+#FC="$GFORTRAN" - AC_PROG_FC(gfortran) - - # extra LD Flags which are required for targets diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0009-cpp-honor-sysroot.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0009-cpp-honor-sysroot.patch deleted file mode 100644 index 8ad6853d8..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0009-cpp-honor-sysroot.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 1d76de7f1f5c99f1fa1a4b14aedad3d702e4e136 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:22:00 +0400 -Subject: [PATCH] cpp: honor sysroot. - -Currently, if the gcc toolchain is relocated and installed from sstate, then you try and compile -preprocessed source (.i or .ii files), the compiler will try and access the builtin sysroot location -rather than the --sysroot option specified on the commandline. If access to that directory is -permission denied (unreadable), gcc will error. - -This happens when ccache is in use due to the fact it uses preprocessed source files. - -The fix below adds %I to the cpp-output spec macro so the default substitutions for -iprefix, --isystem, -isysroot happen and the correct sysroot is used. - -[YOCTO #2074] - -RP 2012/04/13 - -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - gcc/cp/lang-specs.h | 2 +- - gcc/gcc.c | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/gcc/cp/lang-specs.h b/gcc/cp/lang-specs.h -index 0ad4a33b93e..16c744f4f90 100644 ---- a/gcc/cp/lang-specs.h -+++ b/gcc/cp/lang-specs.h -@@ -66,5 +66,5 @@ along with GCC; see the file COPYING3. If not see - {".ii", "@c++-cpp-output", 0, 0, 0}, - {"@c++-cpp-output", - "%{!E:%{!M:%{!MM:" -- " cc1plus -fpreprocessed %i %(cc1_options) %2" -+ " cc1plus -fpreprocessed %i %I %(cc1_options) %2" - " %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, -diff --git a/gcc/gcc.c b/gcc/gcc.c -index 48b0f9dde81..c87f603955f 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -1348,7 +1348,7 @@ static const struct compiler default_compilers[] = - %W{o*:--output-pch=%*}}%V}}}}}}}", 0, 0, 0}, - {".i", "@cpp-output", 0, 0, 0}, - {"@cpp-output", -- "%{!M:%{!MM:%{!E:cc1 -fpreprocessed %i %(cc1_options) %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, -+ "%{!M:%{!MM:%{!E:cc1 -fpreprocessed %i %I %(cc1_options) %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, - {".s", "@assembler", 0, 0, 0}, - {"@assembler", - "%{!M:%{!MM:%{!E:%{!S:as %(asm_debug) %(asm_options) %i %A }}}}", 0, 0, 0}, diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0010-MIPS64-Default-to-N64-ABI.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0010-MIPS64-Default-to-N64-ABI.patch deleted file mode 100644 index 625e2d870..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0010-MIPS64-Default-to-N64-ABI.patch +++ /dev/null @@ -1,54 +0,0 @@ -From 4fad4433c96bc9d0d9d124f9674fb3389f6f426e Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:23:08 +0400 -Subject: [PATCH] MIPS64: Default to N64 ABI - -MIPS64 defaults to n32 ABI, this patch makes it -so that it defaults to N64 ABI - -Signed-off-by: Khem Raj - -Upstream-Status: Inappropriate [OE config specific] ---- - gcc/config.gcc | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - -diff --git a/gcc/config.gcc b/gcc/config.gcc -index cf1a87e2efd..37c4221a39f 100644 ---- a/gcc/config.gcc -+++ b/gcc/config.gcc -@@ -2511,29 +2511,29 @@ mips*-*-linux*) # Linux MIPS, either endian. - default_mips_arch=mips32 - ;; - mips64el-st-linux-gnu) -- default_mips_abi=n32 -+ default_mips_abi=64 - tm_file="${tm_file} mips/st.h" - tmake_file="${tmake_file} mips/t-st" - enable_mips_multilibs="yes" - ;; - mips64octeon*-*-linux*) -- default_mips_abi=n32 -+ default_mips_abi=64 - tm_defines="${tm_defines} MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\"" - target_cpu_default=MASK_SOFT_FLOAT_ABI - enable_mips_multilibs="yes" - ;; - mipsisa64r6*-*-linux*) -- default_mips_abi=n32 -+ default_mips_abi=64 - default_mips_arch=mips64r6 - enable_mips_multilibs="yes" - ;; - mipsisa64r2*-*-linux*) -- default_mips_abi=n32 -+ default_mips_abi=64 - default_mips_arch=mips64r2 - enable_mips_multilibs="yes" - ;; - mips64*-*-linux* | mipsisa64*-*-linux*) -- default_mips_abi=n32 -+ default_mips_abi=64 - enable_mips_multilibs="yes" - ;; - esac diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch deleted file mode 100644 index e35797633..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch +++ /dev/null @@ -1,243 +0,0 @@ -From 8fc016a53c22c19feccbfa13ebdf19090dc67058 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:24:50 +0400 -Subject: [PATCH] Define GLIBC_DYNAMIC_LINKER and UCLIBC_DYNAMIC_LINKER - relative to SYSTEMLIBS_DIR - -This patch defines GLIBC_DYNAMIC_LINKER and UCLIBC_DYNAMIC_LINKER -relative to SYSTEMLIBS_DIR which can be set in generated headers -This breaks the assumption of hardcoded multilib in gcc -Change is only for the supported architectures in OE including -SH, sparc, alpha for possible future support (if any) - -Removes the do_headerfix task in metadata - -Signed-off-by: Khem Raj - -Upstream-Status: Inappropriate [OE configuration] ---- - gcc/config/alpha/linux-elf.h | 4 ++-- - gcc/config/arm/linux-eabi.h | 4 ++-- - gcc/config/arm/linux-elf.h | 2 +- - gcc/config/i386/linux.h | 2 +- - gcc/config/i386/linux64.h | 6 +++--- - gcc/config/linux.h | 8 ++++---- - gcc/config/mips/linux.h | 12 ++++++------ - gcc/config/riscv/linux.h | 2 +- - gcc/config/rs6000/linux64.h | 15 +++++---------- - gcc/config/sh/linux.h | 2 +- - gcc/config/sparc/linux.h | 2 +- - gcc/config/sparc/linux64.h | 4 ++-- - 12 files changed, 29 insertions(+), 34 deletions(-) - -diff --git a/gcc/config/alpha/linux-elf.h b/gcc/config/alpha/linux-elf.h -index e25fcac3c59..01aca0c6542 100644 ---- a/gcc/config/alpha/linux-elf.h -+++ b/gcc/config/alpha/linux-elf.h -@@ -23,8 +23,8 @@ along with GCC; see the file COPYING3. If not see - #define EXTRA_SPECS \ - { "elf_dynamic_linker", ELF_DYNAMIC_LINKER }, - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" --#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" -+#define UCLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-uClibc.so.0" - #if DEFAULT_LIBC == LIBC_UCLIBC - #define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" - #elif DEFAULT_LIBC == LIBC_GLIBC -diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h -index 5bdcfa0c5d3..0c0332f317f 100644 ---- a/gcc/config/arm/linux-eabi.h -+++ b/gcc/config/arm/linux-eabi.h -@@ -65,8 +65,8 @@ - GLIBC_DYNAMIC_LINKER_DEFAULT and TARGET_DEFAULT_FLOAT_ABI. */ - - #undef GLIBC_DYNAMIC_LINKER --#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT "/lib/ld-linux.so.3" --#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT "/lib/ld-linux-armhf.so.3" -+#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT SYSTEMLIBS_DIR "ld-linux.so.3" -+#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT SYSTEMLIBS_DIR "ld-linux-armhf.so.3" - #define GLIBC_DYNAMIC_LINKER_DEFAULT GLIBC_DYNAMIC_LINKER_SOFT_FLOAT - - #define GLIBC_DYNAMIC_LINKER \ -diff --git a/gcc/config/arm/linux-elf.h b/gcc/config/arm/linux-elf.h -index 0ec3aa53189..abfa9566d74 100644 ---- a/gcc/config/arm/linux-elf.h -+++ b/gcc/config/arm/linux-elf.h -@@ -60,7 +60,7 @@ - - #define LIBGCC_SPEC "%{mfloat-abi=soft*:-lfloat} -lgcc" - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" - - #define LINUX_TARGET_LINK_SPEC "%{h*} \ - %{static:-Bstatic} \ -diff --git a/gcc/config/i386/linux.h b/gcc/config/i386/linux.h -index 9f823f125ed..e0390b7d5e3 100644 ---- a/gcc/config/i386/linux.h -+++ b/gcc/config/i386/linux.h -@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see - . */ - - #define GNU_USER_LINK_EMULATION "elf_i386" --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" - - #undef MUSL_DYNAMIC_LINKER - #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-i386.so.1" -diff --git a/gcc/config/i386/linux64.h b/gcc/config/i386/linux64.h -index 6cb68d1ccfa..7de09ec857c 100644 ---- a/gcc/config/i386/linux64.h -+++ b/gcc/config/i386/linux64.h -@@ -27,9 +27,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - #define GNU_USER_LINK_EMULATION64 "elf_x86_64" - #define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64" - --#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" --#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux-x86-64.so.2" --#define GLIBC_DYNAMIC_LINKERX32 "/libx32/ld-linux-x32.so.2" -+#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-linux-x86-64.so.2" -+#define GLIBC_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ld-linux-x32.so.2" - - #undef MUSL_DYNAMIC_LINKER32 - #define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-i386.so.1" -diff --git a/gcc/config/linux.h b/gcc/config/linux.h -index 95654bcdb5a..0c1a8118a26 100644 ---- a/gcc/config/linux.h -+++ b/gcc/config/linux.h -@@ -94,10 +94,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - GLIBC_DYNAMIC_LINKER must be defined for each target using them, or - GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets - supporting both 32-bit and 64-bit compilation. */ --#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" --#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" --#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" --#define UCLIBC_DYNAMIC_LINKERX32 "/lib/ldx32-uClibc.so.0" -+#define UCLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-uClibc.so.0" -+#define UCLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-uClibc.so.0" -+#define UCLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld64-uClibc.so.0" -+#define UCLIBC_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ldx32-uClibc.so.0" - #define BIONIC_DYNAMIC_LINKER "/system/bin/linker" - #define BIONIC_DYNAMIC_LINKER32 "/system/bin/linker" - #define BIONIC_DYNAMIC_LINKER64 "/system/bin/linker64" -diff --git a/gcc/config/mips/linux.h b/gcc/config/mips/linux.h -index 54446e58e5f..4786ee304c1 100644 ---- a/gcc/config/mips/linux.h -+++ b/gcc/config/mips/linux.h -@@ -22,20 +22,20 @@ along with GCC; see the file COPYING3. If not see - #define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32" - - #define GLIBC_DYNAMIC_LINKER32 \ -- "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" - #define GLIBC_DYNAMIC_LINKER64 \ -- "%{mnan=2008:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" - #define GLIBC_DYNAMIC_LINKERN32 \ -- "%{mnan=2008:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" - - #undef UCLIBC_DYNAMIC_LINKER32 - #define UCLIBC_DYNAMIC_LINKER32 \ -- "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld-uClibc.so.0}" - #undef UCLIBC_DYNAMIC_LINKER64 - #define UCLIBC_DYNAMIC_LINKER64 \ -- "%{mnan=2008:/lib/ld64-uClibc-mipsn8.so.0;:/lib/ld64-uClibc.so.0}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld64-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld64-uClibc.so.0}" - #define UCLIBC_DYNAMIC_LINKERN32 \ -- "%{mnan=2008:/lib32/ld-uClibc-mipsn8.so.0;:/lib32/ld-uClibc.so.0}" -+ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld-uClibc.so.0}" - - #undef MUSL_DYNAMIC_LINKER32 - #define MUSL_DYNAMIC_LINKER32 \ -diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h -index 4afef7c228c..01997330741 100644 ---- a/gcc/config/riscv/linux.h -+++ b/gcc/config/riscv/linux.h -@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see - GNU_USER_TARGET_OS_CPP_BUILTINS(); \ - } while (0) - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1" - - #define MUSL_ABI_SUFFIX \ - "%{mabi=ilp32:-sf}" \ -diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h -index 34776c8421e..967c1c43c63 100644 ---- a/gcc/config/rs6000/linux64.h -+++ b/gcc/config/rs6000/linux64.h -@@ -419,24 +419,19 @@ extern int dot_symbols; - #undef LINK_OS_DEFAULT_SPEC - #define LINK_OS_DEFAULT_SPEC "%(link_os_linux)" - --#define GLIBC_DYNAMIC_LINKER32 "%(dynamic_linker_prefix)/lib/ld.so.1" -- -+#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld.so.1" - #ifdef LINUX64_DEFAULT_ABI_ELFv2 --#define GLIBC_DYNAMIC_LINKER64 \ --"%{mabi=elfv1:%(dynamic_linker_prefix)/lib64/ld64.so.1;" \ --":%(dynamic_linker_prefix)/lib64/ld64.so.2}" -+#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv1:" SYSTEMLIBS_DIR "ld64.so.1;:" SYSTEMLIBS_DIR "ld64.so.2}" - #else --#define GLIBC_DYNAMIC_LINKER64 \ --"%{mabi=elfv2:%(dynamic_linker_prefix)/lib64/ld64.so.2;" \ --":%(dynamic_linker_prefix)/lib64/ld64.so.1}" -+#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv2:" SYSTEMLIBS_DIR "ld64.so.2;:" SYSTEMLIBS_DIR "ld64.so.1}" - #endif - - #undef MUSL_DYNAMIC_LINKER32 - #define MUSL_DYNAMIC_LINKER32 \ -- "/lib/ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" -+ SYSTEMLIBS_DIR "ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" - #undef MUSL_DYNAMIC_LINKER64 - #define MUSL_DYNAMIC_LINKER64 \ -- "/lib/ld-musl-powerpc64" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" -+ SYSTEMLIBS_DIR "ld-musl-powerpc64" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" - - #undef DEFAULT_ASM_ENDIAN - #if (TARGET_DEFAULT & MASK_LITTLE_ENDIAN) -diff --git a/gcc/config/sh/linux.h b/gcc/config/sh/linux.h -index c1d0441d488..81373eb8336 100644 ---- a/gcc/config/sh/linux.h -+++ b/gcc/config/sh/linux.h -@@ -64,7 +64,7 @@ along with GCC; see the file COPYING3. If not see - "/lib/ld-musl-sh" MUSL_DYNAMIC_LINKER_E MUSL_DYNAMIC_LINKER_FP \ - "%{mfdpic:-fdpic}.so.1" - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" - - #undef SUBTARGET_LINK_EMUL_SUFFIX - #define SUBTARGET_LINK_EMUL_SUFFIX "%{mfdpic:_fd;:_linux}" -diff --git a/gcc/config/sparc/linux.h b/gcc/config/sparc/linux.h -index 81201e67a2f..8b6fc577594 100644 ---- a/gcc/config/sparc/linux.h -+++ b/gcc/config/sparc/linux.h -@@ -84,7 +84,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv); - When the -shared link option is used a final link is not being - done. */ - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" - - #undef LINK_SPEC - #define LINK_SPEC "-m elf32_sparc %{shared:-shared} \ -diff --git a/gcc/config/sparc/linux64.h b/gcc/config/sparc/linux64.h -index a1a0efd8f28..85d1084afc2 100644 ---- a/gcc/config/sparc/linux64.h -+++ b/gcc/config/sparc/linux64.h -@@ -84,8 +84,8 @@ along with GCC; see the file COPYING3. If not see - When the -shared link option is used a final link is not being - done. */ - --#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" --#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-linux.so.2" -+#define GLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-linux.so.2" - - #ifdef SPARC_BI_ARCH - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0012-gcc-Fix-argument-list-too-long-error.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0012-gcc-Fix-argument-list-too-long-error.patch deleted file mode 100644 index acbd75f13..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0012-gcc-Fix-argument-list-too-long-error.patch +++ /dev/null @@ -1,37 +0,0 @@ -From a22d1264049d29b90663cf5667049ae6f9b7a5ce Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:26:37 +0400 -Subject: [PATCH] gcc: Fix argument list too long error. - -There would be an "Argument list too long" error when the -build directory is longer than 200, this is caused by: - -headers=`echo $(PLUGIN_HEADERS) | tr ' ' '\012' | sort -u` - -The PLUGIN_HEADERS is too long before sort, so the "echo" can't handle -it, use the $(sort list) of GNU make which can handle the too long list -would fix the problem, the header would be short enough after sorted. -The "tr ' ' '\012'" was used for translating the space to "\n", the -$(sort list) doesn't need this. - -Signed-off-by: Robert Yang -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - gcc/Makefile.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/gcc/Makefile.in b/gcc/Makefile.in -index a67d2cc18d6..480c9366418 100644 ---- a/gcc/Makefile.in -+++ b/gcc/Makefile.in -@@ -3606,7 +3606,7 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype - # We keep the directory structure for files in config or c-family and .def - # files. All other files are flattened to a single directory. - $(mkinstalldirs) $(DESTDIR)$(plugin_includedir) -- headers=`echo $(PLUGIN_HEADERS) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \ -+ headers="$(sort $(PLUGIN_HEADERS) $$(cd $(srcdir); echo *.h *.def))"; \ - srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`; \ - for file in $$headers; do \ - if [ -f $$file ] ; then \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0013-Disable-sdt.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0013-Disable-sdt.patch deleted file mode 100644 index 207cdb57a..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0013-Disable-sdt.patch +++ /dev/null @@ -1,110 +0,0 @@ -From fa47586935a18ecfc2ad5586802e326e21741b7b Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:28:10 +0400 -Subject: [PATCH] Disable sdt. - -We don't list dtrace in DEPENDS so we shouldn't be depending on this header. -It may or may not exist from preivous builds though. To be determinstic, disable -sdt.h usage always. This avoids build failures if the header is removed after configure -but before libgcc is compiled for example. - -RP 2012/8/7 - -Signed-off-by: Khem Raj - -Disable sdt for libstdc++-v3. - -Signed-off-by: Robert Yang - -Upstream-Status: Inappropriate [hack] ---- - gcc/configure | 12 ++++++------ - gcc/configure.ac | 18 +++++++++--------- - libstdc++-v3/configure | 6 +++--- - libstdc++-v3/configure.ac | 2 +- - 4 files changed, 19 insertions(+), 19 deletions(-) - -diff --git a/gcc/configure b/gcc/configure -index b26e8fc7fee..6080f86145e 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -29789,12 +29789,12 @@ fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking sys/sdt.h in the target C library" >&5 - $as_echo_n "checking sys/sdt.h in the target C library... " >&6; } - have_sys_sdt_h=no --if test -f $target_header_dir/sys/sdt.h; then -- have_sys_sdt_h=yes -- --$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h -- --fi -+#if test -f $target_header_dir/sys/sdt.h; then -+# have_sys_sdt_h=yes -+# -+#$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h -+# -+#fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_sys_sdt_h" >&5 - $as_echo "$have_sys_sdt_h" >&6; } - -diff --git a/gcc/configure.ac b/gcc/configure.ac -index 26fa46802c7..42be5252778 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -6190,15 +6190,15 @@ fi - AC_SUBST([enable_default_ssp]) - - # Test for on the target. --GCC_TARGET_TEMPLATE([HAVE_SYS_SDT_H]) --AC_MSG_CHECKING(sys/sdt.h in the target C library) --have_sys_sdt_h=no --if test -f $target_header_dir/sys/sdt.h; then -- have_sys_sdt_h=yes -- AC_DEFINE(HAVE_SYS_SDT_H, 1, -- [Define if your target C library provides sys/sdt.h]) --fi --AC_MSG_RESULT($have_sys_sdt_h) -+#GCC_TARGET_TEMPLATE([HAVE_SYS_SDT_H]) -+#AC_MSG_CHECKING(sys/sdt.h in the target C library) -+#have_sys_sdt_h=no -+#if test -f $target_header_dir/sys/sdt.h; then -+# have_sys_sdt_h=yes -+# AC_DEFINE(HAVE_SYS_SDT_H, 1, -+# [Define if your target C library provides sys/sdt.h]) -+#fi -+#AC_MSG_RESULT($have_sys_sdt_h) - - # Check if TFmode long double should be used by default or not. - # Some glibc targets used DFmode long double, but with glibc 2.4 -diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure -index 9f9c5a2419a..71ed13b815b 100755 ---- a/libstdc++-v3/configure -+++ b/libstdc++-v3/configure -@@ -22615,11 +22615,11 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' - ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' - ac_compiler_gnu=$ac_cv_c_compiler_gnu - -- if test $glibcxx_cv_sys_sdt_h = yes; then -+# if test $glibcxx_cv_sys_sdt_h = yes; then - --$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h -+#$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h - -- fi -+# fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $glibcxx_cv_sys_sdt_h" >&5 - $as_echo "$glibcxx_cv_sys_sdt_h" >&6; } - -diff --git a/libstdc++-v3/configure.ac b/libstdc++-v3/configure.ac -index 699e55fd829..5c7a7bda439 100644 ---- a/libstdc++-v3/configure.ac -+++ b/libstdc++-v3/configure.ac -@@ -241,7 +241,7 @@ GLIBCXX_CHECK_SC_NPROCESSORS_ONLN - GLIBCXX_CHECK_SC_NPROC_ONLN - GLIBCXX_CHECK_PTHREADS_NUM_PROCESSORS_NP - GLIBCXX_CHECK_SYSCTL_HW_NCPU --GLIBCXX_CHECK_SDT_H -+#GLIBCXX_CHECK_SDT_H - - # Check for available headers. - AC_CHECK_HEADERS([endian.h execinfo.h float.h fp.h ieeefp.h inttypes.h \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0014-libtool.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0014-libtool.patch deleted file mode 100644 index f4e70c3b1..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0014-libtool.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 6ecd478881468934444ff85611fd43f7033b1e81 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:29:11 +0400 -Subject: [PATCH] libtool - -libstdc++ from gcc-runtime gets created with -rpath=/usr/lib/../lib for qemux86-64 -when running on am x86_64 build host. - -This patch stops this speading to libdir in the libstdc++.la file within libtool. -Arguably, it shouldn't be passing this into libtool in the first place but -for now this resolves the nastiest problems this causes. - -func_normal_abspath would resolve an empty path to `pwd` so we need -to filter the zero case. - -RP 2012/8/24 - -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - ltmain.sh | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/ltmain.sh b/ltmain.sh -index 70990740b6c..ee938056bef 100644 ---- a/ltmain.sh -+++ b/ltmain.sh -@@ -6359,6 +6359,10 @@ func_mode_link () - func_warning "ignoring multiple \`-rpath's for a libtool library" - - install_libdir="$1" -+ if test -n "$install_libdir"; then -+ func_normal_abspath "$install_libdir" -+ install_libdir=$func_normal_abspath_result -+ fi - - oldlibs= - if test -z "$rpath"; then diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch deleted file mode 100644 index bc2674abc..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch +++ /dev/null @@ -1,40 +0,0 @@ -From de4427fa49c07dc651ee6ceaf5c5078700ca3b08 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:30:32 +0400 -Subject: [PATCH] gcc: armv4: pass fix-v4bx to linker to support EABI. - -The LINK_SPEC for linux gets overwritten by linux-eabi.h which -means the value of TARGET_FIX_V4BX_SPEC gets lost and as a result -the option is not passed to linker when chosing march=armv4 -This patch redefines this in linux-eabi.h and reinserts it -for eabi defaulting toolchains. - -We might want to send it upstream. - -Signed-off-by: Khem Raj - -Upstream-Status: Pending ---- - gcc/config/arm/linux-eabi.h | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - -diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h -index 0c0332f317f..7b3769e8459 100644 ---- a/gcc/config/arm/linux-eabi.h -+++ b/gcc/config/arm/linux-eabi.h -@@ -91,10 +91,14 @@ - #define MUSL_DYNAMIC_LINKER \ - "/lib/ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1" - -+/* For armv4 we pass --fix-v4bx to linker to support EABI */ -+#undef TARGET_FIX_V4BX_SPEC -+#define TARGET_FIX_V4BX_SPEC "%{mcpu=arm8|mcpu=arm810|mcpu=strongarm*|march=armv4: --fix-v4bx}" -+ - /* At this point, bpabi.h will have clobbered LINK_SPEC. We want to - use the GNU/Linux version, not the generic BPABI version. */ - #undef LINK_SPEC --#define LINK_SPEC EABI_LINK_SPEC \ -+#define LINK_SPEC TARGET_FIX_V4BX_SPEC EABI_LINK_SPEC \ - LINUX_OR_ANDROID_LD (LINUX_TARGET_LINK_SPEC, \ - LINUX_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch deleted file mode 100644 index 1dc4bb859..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 6b363c2c1c089ee900efa6013aefba1003840a37 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 29 Mar 2013 09:33:04 +0400 -Subject: [PATCH] Use the multilib config files from ${B} instead of using the - ones from ${S} - -Use the multilib config files from ${B} instead of using the ones from ${S} -so that the source can be shared between gcc-cross-initial, -gcc-cross-intermediate, gcc-cross, gcc-runtime, and also the sdk build. - -Signed-off-by: Khem Raj -Signed-off-by: Constantin Musca - -Upstream-Status: Inappropriate [configuration] ---- - gcc/configure | 22 ++++++++++++++++++---- - gcc/configure.ac | 22 ++++++++++++++++++---- - 2 files changed, 36 insertions(+), 8 deletions(-) - -diff --git a/gcc/configure b/gcc/configure -index 6080f86145e..825a9652329 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -12685,10 +12685,20 @@ done - tmake_file_= - for f in ${tmake_file} - do -- if test -f ${srcdir}/config/$f -- then -- tmake_file_="${tmake_file_} \$(srcdir)/config/$f" -- fi -+ case $f in -+ */t-linux64 ) -+ if test -f ./config/$f -+ then -+ tmake_file_="${tmake_file_} ./config/$f" -+ fi -+ ;; -+ * ) -+ if test -f ${srcdir}/config/$f -+ then -+ tmake_file_="${tmake_file_} \$(srcdir)/config/$f" -+ fi -+ ;; -+ esac - done - tmake_file="${tmake_file_}${omp_device_property_tmake_file}" - -@@ -12699,6 +12709,10 @@ tm_file_list="options.h" - tm_include_list="options.h insn-constants.h" - for f in $tm_file; do - case $f in -+ */linux64.h ) -+ tm_file_list="${tm_file_list} ./config/$f" -+ tm_include_list="${tm_include_list} ./config/$f" -+ ;; - ./* ) - f=`echo $f | sed 's/^..//'` - tm_file_list="${tm_file_list} $f" -diff --git a/gcc/configure.ac b/gcc/configure.ac -index 42be5252778..6099eb3251f 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -2118,10 +2118,20 @@ done - tmake_file_= - for f in ${tmake_file} - do -- if test -f ${srcdir}/config/$f -- then -- tmake_file_="${tmake_file_} \$(srcdir)/config/$f" -- fi -+ case $f in -+ */t-linux64 ) -+ if test -f ./config/$f -+ then -+ tmake_file_="${tmake_file_} ./config/$f" -+ fi -+ ;; -+ * ) -+ if test -f ${srcdir}/config/$f -+ then -+ tmake_file_="${tmake_file_} \$(srcdir)/config/$f" -+ fi -+ ;; -+ esac - done - tmake_file="${tmake_file_}${omp_device_property_tmake_file}" - -@@ -2132,6 +2142,10 @@ tm_file_list="options.h" - tm_include_list="options.h insn-constants.h" - for f in $tm_file; do - case $f in -+ */linux64.h ) -+ tm_file_list="${tm_file_list} ./config/$f" -+ tm_include_list="${tm_include_list} ./config/$f" -+ ;; - ./* ) - f=`echo $f | sed 's/^..//'` - tm_file_list="${tm_file_list} $f" diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch deleted file mode 100644 index 05f12847e..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 08752c2f1d21553301bee5757c453c6a36cbe03c Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 20 Feb 2015 09:39:38 +0000 -Subject: [PATCH] Avoid using libdir from .la which usually points to a host - path - -Upstream-Status: Inappropriate [embedded specific] - -Signed-off-by: Jonathan Liu -Signed-off-by: Khem Raj ---- - ltmain.sh | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/ltmain.sh b/ltmain.sh -index ee938056bef..9ebc7e3d1e0 100644 ---- a/ltmain.sh -+++ b/ltmain.sh -@@ -5628,6 +5628,9 @@ func_mode_link () - absdir="$abs_ladir" - libdir="$abs_ladir" - else -+ # Instead of using libdir from .la which usually points to a host path, -+ # use the path the .la is contained in. -+ libdir="$abs_ladir" - dir="$libdir" - absdir="$libdir" - fi diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0018-export-CPP.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0018-export-CPP.patch deleted file mode 100644 index 886a1221d..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0018-export-CPP.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 5c3d66378c7ff60ca11a875aa4aa6f8a8529d43a Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 20 Feb 2015 09:40:59 +0000 -Subject: [PATCH] export CPP - -The OE environment sets and exports CPP as being the target gcc. When -building gcc-cross-canadian for a mingw targetted sdk, the following can be found -in build.x86_64-pokysdk-mingw32.i586-poky-linux/build-x86_64-linux/libiberty/config.log: - -configure:3641: checking for _FILE_OFFSET_BITS value needed for large files -configure:3666: gcc -c -isystem/media/build1/poky/build/tmp/sysroots/x86_64-linux/usr/include -O2 -pipe conftest.c >&5 -configure:3666: $? = 0 -configure:3698: result: no -configure:3786: checking how to run the C preprocessor -configure:3856: result: x86_64-pokysdk-mingw32-gcc -E --sysroot=/media/build1/poky/build/tmp/sysroots/x86_64-nativesdk-mingw32-pokysdk-mingw32 -configure:3876: x86_64-pokysdk-mingw32-gcc -E --sysroot=/media/build1/poky/build/tmp/sysroots/x86_64-nativesdk-mingw32-pokysdk-mingw32 conftest.c -configure:3876: $? = 0 - -Note this is a *build* target (in build-x86_64-linux) so it should be -using the host "gcc", not x86_64-pokysdk-mingw32-gcc. Since the mingw32 -headers are very different, using the wrong cpp is a real problem. It is leaking -into configure through the CPP variable. Ultimately this leads to build -failures related to not being able to include a process.h file for pem-unix.c. - -The fix is to ensure we export a sane CPP value into the build -environment when using build targets. We could define a CPP_FOR_BUILD value which may be -the version which needs to be upstreamed but for now, this fix is good enough to -avoid the problem. - -RP 22/08/2013 - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - Makefile.in | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/Makefile.in b/Makefile.in -index 36e369df6e7..c717903bb13 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -149,6 +149,7 @@ BUILD_EXPORTS = \ - AR="$(AR_FOR_BUILD)"; export AR; \ - AS="$(AS_FOR_BUILD)"; export AS; \ - CC="$(CC_FOR_BUILD)"; export CC; \ -+ CPP="$(CC_FOR_BUILD) -E"; export CPP; \ - CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ - CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ - CXX="$(CXX_FOR_BUILD)"; export CXX; \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0019-Ensure-target-gcc-headers-can-be-included.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0019-Ensure-target-gcc-headers-can-be-included.patch deleted file mode 100644 index 2797b2c22..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0019-Ensure-target-gcc-headers-can-be-included.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 378b752c5d9a3dba4e58cdadf8b4b4f34ea99a76 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 20 Feb 2015 10:25:11 +0000 -Subject: [PATCH] Ensure target gcc headers can be included - -There are a few headers installed as part of the OpenEmbedded -gcc-runtime target (omp.h, ssp/*.h). Being installed from a recipe -built for the target architecture, these are within the target -sysroot and not cross/nativesdk; thus they weren't able to be -found by gcc with the existing search paths. Add support for -picking up these headers under the sysroot supplied on the gcc -command line in order to resolve this. - -Upstream-Status: Pending - -Signed-off-by: Paul Eggleton -Signed-off-by: Khem Raj ---- - gcc/Makefile.in | 2 ++ - gcc/cppdefault.c | 4 ++++ - 2 files changed, 6 insertions(+) - -diff --git a/gcc/Makefile.in b/gcc/Makefile.in -index 480c9366418..011c7ac2db6 100644 ---- a/gcc/Makefile.in -+++ b/gcc/Makefile.in -@@ -618,6 +618,7 @@ libexecdir = @libexecdir@ - - # Directory in which the compiler finds libraries etc. - libsubdir = $(libdir)/gcc/$(real_target_noncanonical)/$(version)$(accel_dir_suffix) -+libsubdir_target = $(target_noncanonical)/$(version) - # Directory in which the compiler finds executables - libexecsubdir = $(libexecdir)/gcc/$(real_target_noncanonical)/$(version)$(accel_dir_suffix) - # Directory in which all plugin resources are installed -@@ -2946,6 +2947,7 @@ CFLAGS-intl.o += -DLOCALEDIR=\"$(localedir)\" - - PREPROCESSOR_DEFINES = \ - -DGCC_INCLUDE_DIR=\"$(libsubdir)/include\" \ -+ -DGCC_INCLUDE_SUBDIR_TARGET=\"$(libsubdir_target)/include\" \ - -DFIXED_INCLUDE_DIR=\"$(libsubdir)/include-fixed\" \ - -DGPLUSPLUS_INCLUDE_DIR=\"$(gcc_gxx_include_dir)\" \ - -DGPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT=$(gcc_gxx_include_dir_add_sysroot) \ -diff --git a/gcc/cppdefault.c b/gcc/cppdefault.c -index af38cc494ea..2f43b88a0c3 100644 ---- a/gcc/cppdefault.c -+++ b/gcc/cppdefault.c -@@ -59,6 +59,10 @@ const struct default_include cpp_include_defaults[] - /* This is the dir for gcc's private headers. */ - { GCC_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, - #endif -+#ifdef GCC_INCLUDE_SUBDIR_TARGET -+ /* This is the dir for gcc's private headers under the specified sysroot. */ -+ { STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET, "GCC", 0, 0, 1, 0 }, -+#endif - #ifdef LOCAL_INCLUDE_DIR - /* /usr/local/include comes before the fixincluded header files. */ - { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 }, diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch deleted file mode 100644 index c3baf8b45..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 870e805d705d99d9b9d7dbd09727f9c1d2ad9c1d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 3 Mar 2015 08:21:19 +0000 -Subject: [PATCH] Don't search host directory during "relink" if $inst_prefix - is provided - -http://lists.gnu.org/archive/html/libtool-patches/2011-01/msg00026.html - -Upstream-Status: Submitted - -Signed-off-by: Khem Raj ---- - ltmain.sh | 5 +++-- - 1 file changed, 3 insertions(+), 2 deletions(-) - -diff --git a/ltmain.sh b/ltmain.sh -index 9ebc7e3d1e0..7ea79fa8be6 100644 ---- a/ltmain.sh -+++ b/ltmain.sh -@@ -6004,12 +6004,13 @@ func_mode_link () - fi - else - # We cannot seem to hardcode it, guess we'll fake it. -+ # Default if $libdir is not relative to the prefix: - add_dir="-L$libdir" -- # Try looking first in the location we're being installed to. -+ - if test -n "$inst_prefix_dir"; then - case $libdir in - [\\/]*) -- add_dir="$add_dir -L$inst_prefix_dir$libdir" -+ add_dir="-L$inst_prefix_dir$libdir" - ;; - esac - fi diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch deleted file mode 100644 index abee48669..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch +++ /dev/null @@ -1,26 +0,0 @@ -From aba42de763a619355471efd1573561b0cbf51162 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 28 Apr 2015 23:15:27 -0700 -Subject: [PATCH] Use SYSTEMLIBS_DIR replacement instead of hardcoding - base_libdir - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - gcc/config/aarch64/aarch64-linux.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h -index e587e2e9ad6..ddc62895693 100644 ---- a/gcc/config/aarch64/aarch64-linux.h -+++ b/gcc/config/aarch64/aarch64-linux.h -@@ -21,7 +21,7 @@ - #ifndef GCC_AARCH64_LINUX_H - #define GCC_AARCH64_LINUX_H - --#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" -+#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" - - #undef MUSL_DYNAMIC_LINKER - #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0022-aarch64-Add-support-for-musl-ldso.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0022-aarch64-Add-support-for-musl-ldso.patch deleted file mode 100644 index c55b66d4b..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0022-aarch64-Add-support-for-musl-ldso.patch +++ /dev/null @@ -1,25 +0,0 @@ -From d63820a78d92f302410358293546f01c7ad17bd8 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 28 Apr 2015 23:18:39 -0700 -Subject: [PATCH] aarch64: Add support for musl ldso - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - gcc/config/aarch64/aarch64-linux.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h -index ddc62895693..b301825313a 100644 ---- a/gcc/config/aarch64/aarch64-linux.h -+++ b/gcc/config/aarch64/aarch64-linux.h -@@ -24,7 +24,7 @@ - #define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" - - #undef MUSL_DYNAMIC_LINKER --#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" -+#define MUSL_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" - - #undef ASAN_CC1_SPEC - #define ASAN_CC1_SPEC "%{%:sanitize(address):-funwind-tables}" diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch deleted file mode 100644 index 80c4d2292..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 3474e16ad4ea8cf4e0e330568e3bc9039e723dce Mon Sep 17 00:00:00 2001 -From: Robert Yang -Date: Sun, 5 Jul 2015 20:25:18 -0700 -Subject: [PATCH] libcc1: fix libcc1's install path and rpath - -* Install libcc1.so and libcc1plugin.so into - $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version), as what we - had done to lto-plugin. -* Fix bad RPATH iussue: - gcc-5.2.0: package gcc-plugins contains bad RPATH /patht/to/tmp/sysroots/qemux86-64/usr/lib64/../lib64 in file - /path/to/gcc/5.2.0-r0/packages-split/gcc-plugins/usr/lib64/gcc/x86_64-poky-linux/5.2.0/plugin/libcc1plugin.so.0.0.0 - [rpaths] - -Upstream-Status: Inappropriate [OE configuration] - -Signed-off-by: Robert Yang ---- - libcc1/Makefile.am | 4 ++-- - libcc1/Makefile.in | 4 ++-- - 2 files changed, 4 insertions(+), 4 deletions(-) - -diff --git a/libcc1/Makefile.am b/libcc1/Makefile.am -index c005b0dad4a..ec31d35b7b9 100644 ---- a/libcc1/Makefile.am -+++ b/libcc1/Makefile.am -@@ -37,8 +37,8 @@ libiberty = $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ - $(Wc)$(libiberty_normal))) - libiberty_dep = $(patsubst $(Wc)%,%,$(libiberty)) - --plugindir = $(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/plugin --cc1libdir = $(libdir)/$(libsuffix) -+cc1libdir = $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version) -+plugindir = $(cc1libdir) - - if ENABLE_PLUGIN - plugin_LTLIBRARIES = libcc1plugin.la libcp1plugin.la -diff --git a/libcc1/Makefile.in b/libcc1/Makefile.in -index 7104b649026..2103c477468 100644 ---- a/libcc1/Makefile.in -+++ b/libcc1/Makefile.in -@@ -393,8 +393,8 @@ libiberty = $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ - $(Wc)$(libiberty_normal))) - - libiberty_dep = $(patsubst $(Wc)%,%,$(libiberty)) --plugindir = $(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/plugin --cc1libdir = $(libdir)/$(libsuffix) -+cc1libdir = $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version) -+plugindir = $(cc1libdir) - @ENABLE_PLUGIN_TRUE@plugin_LTLIBRARIES = libcc1plugin.la libcp1plugin.la - @ENABLE_PLUGIN_TRUE@cc1lib_LTLIBRARIES = libcc1.la - shared_source = callbacks.cc callbacks.hh connection.cc connection.hh \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0024-handle-sysroot-support-for-nativesdk-gcc.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0024-handle-sysroot-support-for-nativesdk-gcc.patch deleted file mode 100644 index 1a65ece7b..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0024-handle-sysroot-support-for-nativesdk-gcc.patch +++ /dev/null @@ -1,346 +0,0 @@ -From bb1f359e34649516e61305e9748534cce7e0ee70 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 7 Dec 2015 23:39:54 +0000 -Subject: [PATCH] handle sysroot support for nativesdk-gcc - -Being able to build a nativesdk gcc is useful, particularly in cases -where the host compiler may be of an incompatible version (or a 32 -bit compiler is needed). - -Sadly, building nativesdk-gcc is not straight forward. We install -nativesdk-gcc into a relocatable location and this means that its -library locations can change. "Normal" sysroot support doesn't help -in this case since the values of paths like "libdir" change, not just -base root directory of the system. - -In order to handle this we do two things: - -a) Add %r into spec file markup which can be used for injected paths - such as SYSTEMLIBS_DIR (see gcc_multilib_setup()). -b) Add other paths which need relocation into a .gccrelocprefix section - which the relocation code will notice and adjust automatically. - -Upstream-Status: Inappropriate -RP 2015/7/28 - -Signed-off-by: Khem Raj - -Added PREFIXVAR and EXEC_PREFIXVAR to support runtime relocation. Without -these as part of the gccrelocprefix the system can't do runtime relocation -if the executable is moved. (These paths were missed in the original -implementation.) - -Signed-off-by: Mark Hatle ---- - gcc/c-family/c-opts.c | 4 +-- - gcc/cppdefault.c | 63 ++++++++++++++++++++++++++----------------- - gcc/cppdefault.h | 13 ++++----- - gcc/gcc.c | 20 +++++++++----- - gcc/incpath.c | 12 ++++----- - gcc/prefix.c | 6 +++-- - 6 files changed, 70 insertions(+), 48 deletions(-) - -diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c -index 58ba0948e79..806bbcfb7a5 100644 ---- a/gcc/c-family/c-opts.c -+++ b/gcc/c-family/c-opts.c -@@ -1409,8 +1409,8 @@ add_prefixed_path (const char *suffix, incpath_kind chain) - size_t prefix_len, suffix_len; - - suffix_len = strlen (suffix); -- prefix = iprefix ? iprefix : cpp_GCC_INCLUDE_DIR; -- prefix_len = iprefix ? strlen (iprefix) : cpp_GCC_INCLUDE_DIR_len; -+ prefix = iprefix ? iprefix : GCC_INCLUDE_DIRVAR; -+ prefix_len = iprefix ? strlen (iprefix) : strlen(GCC_INCLUDE_DIRVAR) - 7; - - path = (char *) xmalloc (prefix_len + suffix_len + 1); - memcpy (path, prefix, prefix_len); -diff --git a/gcc/cppdefault.c b/gcc/cppdefault.c -index 2f43b88a0c3..6b6be04686c 100644 ---- a/gcc/cppdefault.c -+++ b/gcc/cppdefault.c -@@ -35,6 +35,30 @@ - # undef CROSS_INCLUDE_DIR - #endif - -+static char GPLUSPLUS_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_INCLUDE_DIR; -+char GCC_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GCC_INCLUDE_DIR; -+static char GPLUSPLUS_TOOL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_TOOL_INCLUDE_DIR; -+static char GPLUSPLUS_BACKWARD_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_BACKWARD_INCLUDE_DIR; -+static char STANDARD_STARTFILE_PREFIX_2VAR[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET; -+#ifdef LOCAL_INCLUDE_DIR -+static char LOCAL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = LOCAL_INCLUDE_DIR; -+#endif -+#ifdef PREFIX_INCLUDE_DIR -+static char PREFIX_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX_INCLUDE_DIR; -+#endif -+#ifdef FIXED_INCLUDE_DIR -+static char FIXED_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = FIXED_INCLUDE_DIR; -+#endif -+#ifdef CROSS_INCLUDE_DIR -+static char CROSS_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = CROSS_INCLUDE_DIR; -+#endif -+#ifdef TOOL_INCLUDE_DIR -+static char TOOL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = TOOL_INCLUDE_DIR; -+#endif -+#ifdef NATIVE_SYSTEM_HEADER_DIR -+static char NATIVE_SYSTEM_HEADER_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = NATIVE_SYSTEM_HEADER_DIR; -+#endif -+ - const struct default_include cpp_include_defaults[] - #ifdef INCLUDE_DEFAULTS - = INCLUDE_DEFAULTS; -@@ -42,38 +66,38 @@ const struct default_include cpp_include_defaults[] - = { - #ifdef GPLUSPLUS_INCLUDE_DIR - /* Pick up GNU C++ generic include files. */ -- { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1, -+ { GPLUSPLUS_INCLUDE_DIRVAR, "G++", 1, 1, - GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, - #endif - #ifdef GPLUSPLUS_TOOL_INCLUDE_DIR - /* Pick up GNU C++ target-dependent include files. */ -- { GPLUSPLUS_TOOL_INCLUDE_DIR, "G++", 1, 1, -+ { GPLUSPLUS_TOOL_INCLUDE_DIRVAR, "G++", 1, 1, - GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 1 }, - #endif - #ifdef GPLUSPLUS_BACKWARD_INCLUDE_DIR - /* Pick up GNU C++ backward and deprecated include files. */ -- { GPLUSPLUS_BACKWARD_INCLUDE_DIR, "G++", 1, 1, -+ { GPLUSPLUS_BACKWARD_INCLUDE_DIRVAR, "G++", 1, 1, - GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, - #endif - #ifdef GCC_INCLUDE_DIR - /* This is the dir for gcc's private headers. */ -- { GCC_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, -+ { GCC_INCLUDE_DIRVAR, "GCC", 0, 0, 0, 0 }, - #endif - #ifdef GCC_INCLUDE_SUBDIR_TARGET - /* This is the dir for gcc's private headers under the specified sysroot. */ -- { STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET, "GCC", 0, 0, 1, 0 }, -+ { STANDARD_STARTFILE_PREFIX_2VAR, "GCC", 0, 0, 1, 0 }, - #endif - #ifdef LOCAL_INCLUDE_DIR - /* /usr/local/include comes before the fixincluded header files. */ -- { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 }, -- { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 0 }, -+ { LOCAL_INCLUDE_DIRVAR, 0, 0, 1, 1, 2 }, -+ { LOCAL_INCLUDE_DIRVAR, 0, 0, 1, 1, 0 }, - #endif - #ifdef PREFIX_INCLUDE_DIR -- { PREFIX_INCLUDE_DIR, 0, 0, 1, 0, 0 }, -+ { PREFIX_INCLUDE_DIRVAR, 0, 0, 1, 0, 0 }, - #endif - #ifdef FIXED_INCLUDE_DIR - /* This is the dir for fixincludes. */ -- { FIXED_INCLUDE_DIR, "GCC", 0, 0, 0, -+ { FIXED_INCLUDE_DIRVAR, "GCC", 0, 0, 0, - /* A multilib suffix needs adding if different multilibs use - different headers. */ - #ifdef SYSROOT_HEADERS_SUFFIX_SPEC -@@ -85,33 +109,24 @@ const struct default_include cpp_include_defaults[] - #endif - #ifdef CROSS_INCLUDE_DIR - /* One place the target system's headers might be. */ -- { CROSS_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, -+ { CROSS_INCLUDE_DIRVAR, "GCC", 0, 0, 0, 0 }, - #endif - #ifdef TOOL_INCLUDE_DIR - /* Another place the target system's headers might be. */ -- { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1, 0, 0 }, -+ { TOOL_INCLUDE_DIRVAR, "BINUTILS", 0, 1, 0, 0 }, - #endif - #ifdef NATIVE_SYSTEM_HEADER_DIR - /* /usr/include comes dead last. */ -- { NATIVE_SYSTEM_HEADER_DIR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 2 }, -- { NATIVE_SYSTEM_HEADER_DIR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 0 }, -+ { NATIVE_SYSTEM_HEADER_DIRVAR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 2 }, -+ { NATIVE_SYSTEM_HEADER_DIRVAR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 0 }, - #endif - { 0, 0, 0, 0, 0, 0 } - }; - #endif /* no INCLUDE_DEFAULTS */ - --#ifdef GCC_INCLUDE_DIR --const char cpp_GCC_INCLUDE_DIR[] = GCC_INCLUDE_DIR; --const size_t cpp_GCC_INCLUDE_DIR_len = sizeof GCC_INCLUDE_DIR - 8; --#else --const char cpp_GCC_INCLUDE_DIR[] = ""; --const size_t cpp_GCC_INCLUDE_DIR_len = 0; --#endif -- - /* The configured prefix. */ --const char cpp_PREFIX[] = PREFIX; --const size_t cpp_PREFIX_len = sizeof PREFIX - 1; --const char cpp_EXEC_PREFIX[] = STANDARD_EXEC_PREFIX; -+char PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX; -+char EXEC_PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_EXEC_PREFIX; - - /* This value is set by cpp_relocated at runtime */ - const char *gcc_exec_prefix; -diff --git a/gcc/cppdefault.h b/gcc/cppdefault.h -index a681264f75e..5e10a2fa140 100644 ---- a/gcc/cppdefault.h -+++ b/gcc/cppdefault.h -@@ -33,7 +33,8 @@ - - struct default_include - { -- const char *const fname; /* The name of the directory. */ -+ const char *fname; /* The name of the directory. */ -+ - const char *const component; /* The component containing the directory - (see update_path in prefix.c) */ - const char cplusplus; /* Only look here if we're compiling C++. */ -@@ -50,17 +51,13 @@ struct default_include - }; - - extern const struct default_include cpp_include_defaults[]; --extern const char cpp_GCC_INCLUDE_DIR[]; --extern const size_t cpp_GCC_INCLUDE_DIR_len; -+extern char GCC_INCLUDE_DIRVAR[] __attribute__ ((section (".gccrelocprefix"))); - - /* The configure-time prefix, i.e., the value supplied as the argument - to --prefix=. */ --extern const char cpp_PREFIX[]; -+extern char PREFIXVAR[] __attribute__ ((section (".gccrelocprefix"))); - /* The length of the configure-time prefix. */ --extern const size_t cpp_PREFIX_len; --/* The configure-time execution prefix. This is typically the lib/gcc -- subdirectory of cpp_PREFIX. */ --extern const char cpp_EXEC_PREFIX[]; -+extern char EXEC_PREFIXVAR[] __attribute__ ((section (".gccrelocprefix"))); - /* The run-time execution prefix. This is typically the lib/gcc - subdirectory of the actual installation. */ - extern const char *gcc_exec_prefix; -diff --git a/gcc/gcc.c b/gcc/gcc.c -index c87f603955f..535d5c3bb65 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -252,6 +252,8 @@ FILE *report_times_to_file = NULL; - #endif - static const char *target_system_root = DEFAULT_TARGET_SYSTEM_ROOT; - -+static char target_relocatable_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = SYSTEMLIBS_DIR; -+ - /* Nonzero means pass the updated target_system_root to the compiler. */ - - static int target_system_root_changed; -@@ -526,6 +528,7 @@ or with constant text in a single argument. - %G process LIBGCC_SPEC as a spec. - %R Output the concatenation of target_system_root and - target_sysroot_suffix. -+ %r Output the base path target_relocatable_prefix - %S process STARTFILE_SPEC as a spec. A capital S is actually used here. - %E process ENDFILE_SPEC as a spec. A capital E is actually used here. - %C process CPP_SPEC as a spec. -@@ -1499,10 +1502,10 @@ static const char *gcc_libexec_prefix; - gcc_exec_prefix is set because, in that case, we know where the - compiler has been installed, and use paths relative to that - location instead. */ --static const char *const standard_exec_prefix = STANDARD_EXEC_PREFIX; --static const char *const standard_libexec_prefix = STANDARD_LIBEXEC_PREFIX; --static const char *const standard_bindir_prefix = STANDARD_BINDIR_PREFIX; --static const char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; -+static char standard_exec_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_EXEC_PREFIX; -+static char standard_libexec_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_LIBEXEC_PREFIX; -+static char standard_bindir_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_BINDIR_PREFIX; -+static char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; - - /* For native compilers, these are well-known paths containing - components that may be provided by the system. For cross -@@ -1510,9 +1513,9 @@ static const char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; - static const char *md_exec_prefix = MD_EXEC_PREFIX; - static const char *md_startfile_prefix = MD_STARTFILE_PREFIX; - static const char *md_startfile_prefix_1 = MD_STARTFILE_PREFIX_1; --static const char *const standard_startfile_prefix_1 -+static char standard_startfile_prefix_1[4096] __attribute__ ((section (".gccrelocprefix"))) - = STANDARD_STARTFILE_PREFIX_1; --static const char *const standard_startfile_prefix_2 -+static char standard_startfile_prefix_2[4096] __attribute__ ((section (".gccrelocprefix"))) - = STANDARD_STARTFILE_PREFIX_2; - - /* A relative path to be used in finding the location of tools -@@ -5952,6 +5955,11 @@ do_spec_1 (const char *spec, int inswitch, const char *soft_matched_part) - } - break; - -+ case 'r': -+ obstack_grow (&obstack, target_relocatable_prefix, -+ strlen (target_relocatable_prefix)); -+ break; -+ - case 'S': - value = do_spec_1 (startfile_spec, 0, NULL); - if (value != 0) -diff --git a/gcc/incpath.c b/gcc/incpath.c -index 9098ab044ab..bfad4ebe382 100644 ---- a/gcc/incpath.c -+++ b/gcc/incpath.c -@@ -131,7 +131,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, - int relocated = cpp_relocated (); - size_t len; - -- if (iprefix && (len = cpp_GCC_INCLUDE_DIR_len) != 0) -+ if (iprefix && (len = strlen(GCC_INCLUDE_DIRVAR) - 7) != 0) - { - /* Look for directories that start with the standard prefix. - "Translate" them, i.e. replace /usr/local/lib/gcc... with -@@ -145,7 +145,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, - now. */ - if (sysroot && p->add_sysroot) - continue; -- if (!filename_ncmp (p->fname, cpp_GCC_INCLUDE_DIR, len)) -+ if (!filename_ncmp (p->fname, GCC_INCLUDE_DIRVAR, len)) - { - char *str = concat (iprefix, p->fname + len, NULL); - if (p->multilib == 1 && imultilib) -@@ -185,7 +185,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, - free (sysroot_no_trailing_dir_separator); - } - else if (!p->add_sysroot && relocated -- && !filename_ncmp (p->fname, cpp_PREFIX, cpp_PREFIX_len)) -+ && !filename_ncmp (p->fname, PREFIXVAR, strlen(PREFIXVAR))) - { - static const char *relocated_prefix; - char *ostr; -@@ -202,12 +202,12 @@ add_standard_paths (const char *sysroot, const char *iprefix, - dummy = concat (gcc_exec_prefix, "dummy", NULL); - relocated_prefix - = make_relative_prefix (dummy, -- cpp_EXEC_PREFIX, -- cpp_PREFIX); -+ EXEC_PREFIXVAR, -+ PREFIXVAR); - free (dummy); - } - ostr = concat (relocated_prefix, -- p->fname + cpp_PREFIX_len, -+ p->fname + strlen(PREFIXVAR), - NULL); - str = update_path (ostr, p->component); - free (ostr); -diff --git a/gcc/prefix.c b/gcc/prefix.c -index 1a403e535bd..3257ed3cd3e 100644 ---- a/gcc/prefix.c -+++ b/gcc/prefix.c -@@ -72,7 +72,9 @@ License along with GCC; see the file COPYING3. If not see - #include "prefix.h" - #include "common/common-target.h" - --static const char *std_prefix = PREFIX; -+static const char PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX; -+ -+static const char *std_prefix = PREFIXVAR; - - static const char *get_key_value (char *); - static char *translate_name (char *); -@@ -212,7 +214,7 @@ translate_name (char *name) - prefix = getenv (key); - - if (prefix == 0) -- prefix = PREFIX; -+ prefix = PREFIXVAR; - - /* We used to strip trailing DIR_SEPARATORs here, but that can - sometimes yield a result with no separator when one was coded diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch deleted file mode 100644 index abf1f8491..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch +++ /dev/null @@ -1,99 +0,0 @@ -From 9c0c73ee48dbee2aad57f4dcdad1b7b74e77b944 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 7 Dec 2015 23:41:45 +0000 -Subject: [PATCH] Search target sysroot gcc version specific dirs with - multilib. - -We install the gcc libraries (such as crtbegin.p) into -//5.2.0/ -which is a default search path for GCC (aka multi_suffix in the -code below). is 'machine' in gcc's terminology. We use -these directories so that multiple gcc versions could in theory -co-exist on target. - -We only want to build one gcc-cross-canadian per arch and have this work -for all multilibs. can be handled by mapping the multilib - to the one used by gcc-cross-canadian, e.g. -mips64-polkmllib32-linux -is symlinked to by mips64-poky-linux. - -The default gcc search path in the target sysroot for a "lib64" mutlilib -is: - -/lib32/mips64-poky-linux/5.2.0/ -/lib32/../lib64/ -/usr/lib32/mips64-poky-linux/5.2.0/ -/usr/lib32/../lib64/ -/lib32/ -/usr/lib32/ - -which means that the lib32 crtbegin.o will be found and the lib64 ones -will not which leads to compiler failures. - -This patch injects a multilib version of that path first so the lib64 -binaries can be found first. With this change the search path becomes: - -/lib32/../lib64/mips64-poky-linux/5.2.0/ -/lib32/mips64-poky-linux/5.2.0/ -/lib32/../lib64/ -/usr/lib32/../lib64/mips64-poky-linux/5.2.0/ -/usr/lib32/mips64-poky-linux/5.2.0/ -/usr/lib32/../lib64/ -/lib32/ -/usr/lib32/ - -Upstream-Status: Pending -RP 2015/7/31 - -Signed-off-by: Khem Raj ---- - gcc/gcc.c | 29 ++++++++++++++++++++++++++++- - 1 file changed, 28 insertions(+), 1 deletion(-) - -diff --git a/gcc/gcc.c b/gcc/gcc.c -index 535d5c3bb65..04647ae812d 100644 ---- a/gcc/gcc.c -+++ b/gcc/gcc.c -@@ -2616,7 +2616,7 @@ for_each_path (const struct path_prefix *paths, - if (path == NULL) - { - len = paths->max_len + extra_space + 1; -- len += MAX (MAX (suffix_len, multi_os_dir_len), multiarch_len); -+ len += MAX ((suffix_len + multi_os_dir_len), multiarch_len); - path = XNEWVEC (char, len); - } - -@@ -2628,6 +2628,33 @@ for_each_path (const struct path_prefix *paths, - /* Look first in MACHINE/VERSION subdirectory. */ - if (!skip_multi_dir) - { -+ if (!(pl->os_multilib ? skip_multi_os_dir : skip_multi_dir)) -+ { -+ const char *this_multi; -+ size_t this_multi_len; -+ -+ if (pl->os_multilib) -+ { -+ this_multi = multi_os_dir; -+ this_multi_len = multi_os_dir_len; -+ } -+ else -+ { -+ this_multi = multi_dir; -+ this_multi_len = multi_dir_len; -+ } -+ -+ /* Look in multilib MACHINE/VERSION subdirectory first */ -+ if (this_multi_len) -+ { -+ memcpy (path + len, this_multi, this_multi_len + 1); -+ memcpy (path + len + this_multi_len, multi_suffix, suffix_len + 1); -+ ret = callback (path, callback_info); -+ if (ret) -+ break; -+ } -+ } -+ - memcpy (path + len, multi_suffix, suffix_len + 1); - ret = callback (path, callback_info); - if (ret) diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0026-Fix-various-_FOR_BUILD-and-related-variables.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0026-Fix-various-_FOR_BUILD-and-related-variables.patch deleted file mode 100644 index 97bf2f3a7..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0026-Fix-various-_FOR_BUILD-and-related-variables.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 3a003af8804dda90fdf4862eca5f66cb12faaf02 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Mon, 7 Dec 2015 23:42:45 +0000 -Subject: [PATCH] Fix various _FOR_BUILD and related variables - -When doing a FOR_BUILD thing, you have to override CFLAGS with -CFLAGS_FOR_BUILD. And if you use C++, you also have to override -CXXFLAGS with CXXFLAGS_FOR_BUILD. -Without this, when building for mingw, you end up trying to use -the mingw headers for a host build. - -The same goes for other variables as well, such as CPPFLAGS, -CPP, and GMPINC. - -Upstream-Status: Pending - -Signed-off-by: Peter Seebach -Signed-off-by: Mark Hatle -Signed-off-by: Khem Raj ---- - Makefile.in | 6 ++++++ - Makefile.tpl | 5 +++++ - gcc/Makefile.in | 2 +- - gcc/configure | 2 +- - gcc/configure.ac | 2 +- - 5 files changed, 14 insertions(+), 3 deletions(-) - -diff --git a/Makefile.in b/Makefile.in -index c717903bb13..5abc649868d 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -152,6 +152,7 @@ BUILD_EXPORTS = \ - CPP="$(CC_FOR_BUILD) -E"; export CPP; \ - CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ - CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ -+ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \ - CXX="$(CXX_FOR_BUILD)"; export CXX; \ - CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \ - GFORTRAN="$(GFORTRAN_FOR_BUILD)"; export GFORTRAN; \ -@@ -171,6 +172,9 @@ BUILD_EXPORTS = \ - # built for the build system to override those in BASE_FLAGS_TO_PASS. - EXTRA_BUILD_FLAGS = \ - CFLAGS="$(CFLAGS_FOR_BUILD)" \ -+ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \ -+ CPP="$(CC_FOR_BUILD) -E" \ -+ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \ - LDFLAGS="$(LDFLAGS_FOR_BUILD)" - - # This is the list of directories to built for the host system. -@@ -188,6 +192,7 @@ HOST_SUBDIR = @host_subdir@ - HOST_EXPORTS = \ - $(BASE_EXPORTS) \ - CC="$(CC)"; export CC; \ -+ CPP="$(CC) -E"; export CPP; \ - ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \ - CFLAGS="$(CFLAGS)"; export CFLAGS; \ - CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ -@@ -776,6 +781,7 @@ BASE_FLAGS_TO_PASS = \ - "CC_FOR_BUILD=$(CC_FOR_BUILD)" \ - "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \ - "CXX_FOR_BUILD=$(CXX_FOR_BUILD)" \ -+ "CXXFLAGS_FOR_BUILD=$(CXXFLAGS_FOR_BUILD)" \ - "EXPECT=$(EXPECT)" \ - "FLEX=$(FLEX)" \ - "INSTALL=$(INSTALL)" \ -diff --git a/Makefile.tpl b/Makefile.tpl -index efed1511750..778beb705b4 100644 ---- a/Makefile.tpl -+++ b/Makefile.tpl -@@ -154,6 +154,7 @@ BUILD_EXPORTS = \ - CC="$(CC_FOR_BUILD)"; export CC; \ - CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ - CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ -+ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \ - CXX="$(CXX_FOR_BUILD)"; export CXX; \ - CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \ - GFORTRAN="$(GFORTRAN_FOR_BUILD)"; export GFORTRAN; \ -@@ -173,6 +174,9 @@ BUILD_EXPORTS = \ - # built for the build system to override those in BASE_FLAGS_TO_PASS. - EXTRA_BUILD_FLAGS = \ - CFLAGS="$(CFLAGS_FOR_BUILD)" \ -+ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \ -+ CPP="$(CC_FOR_BUILD) -E" \ -+ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \ - LDFLAGS="$(LDFLAGS_FOR_BUILD)" - - # This is the list of directories to built for the host system. -@@ -190,6 +194,7 @@ HOST_SUBDIR = @host_subdir@ - HOST_EXPORTS = \ - $(BASE_EXPORTS) \ - CC="$(CC)"; export CC; \ -+ CPP="$(CC) -E"; export CPP; \ - ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \ - CFLAGS="$(CFLAGS)"; export CFLAGS; \ - CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ -diff --git a/gcc/Makefile.in b/gcc/Makefile.in -index 011c7ac2db6..2f1165f7b5e 100644 ---- a/gcc/Makefile.in -+++ b/gcc/Makefile.in -@@ -805,7 +805,7 @@ BUILD_LDFLAGS=@BUILD_LDFLAGS@ - BUILD_NO_PIE_FLAG = @BUILD_NO_PIE_FLAG@ - BUILD_LDFLAGS += $(BUILD_NO_PIE_FLAG) - BUILD_CPPFLAGS= -I. -I$(@D) -I$(srcdir) -I$(srcdir)/$(@D) \ -- -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS) -+ -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS_FOR_BUILD) - - # Actual name to use when installing a native compiler. - GCC_INSTALL_NAME := $(shell echo gcc|sed '$(program_transform_name)') -diff --git a/gcc/configure b/gcc/configure -index 825a9652329..ff46cf58960 100755 ---- a/gcc/configure -+++ b/gcc/configure -@@ -12314,7 +12314,7 @@ else - CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ - CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \ - LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \ -- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \ -+ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \ - ${realsrcdir}/configure \ - --enable-languages=${enable_languages-all} \ - ${enable_obsolete+--enable-obsolete="$enable_obsolete"} \ -diff --git a/gcc/configure.ac b/gcc/configure.ac -index 6099eb3251f..b3c345b61dc 100644 ---- a/gcc/configure.ac -+++ b/gcc/configure.ac -@@ -1898,7 +1898,7 @@ else - CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ - CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \ - LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \ -- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \ -+ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \ - ${realsrcdir}/configure \ - --enable-languages=${enable_languages-all} \ - ${enable_obsolete+--enable-obsolete="$enable_obsolete"} \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch deleted file mode 100644 index 3cd75b718..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch +++ /dev/null @@ -1,25 +0,0 @@ -From 4e53d0ae70af85af0e112a48a3e4dfe4c39f4a8d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 2 Feb 2016 10:26:10 -0800 -Subject: [PATCH] nios2: Define MUSL_DYNAMIC_LINKER - -Upstream-Status: Pending - -Signed-off-by: Marek Vasut -Signed-off-by: Khem Raj ---- - gcc/config/nios2/linux.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/gcc/config/nios2/linux.h b/gcc/config/nios2/linux.h -index 4bdcdcca1f0..e7943a9d640 100644 ---- a/gcc/config/nios2/linux.h -+++ b/gcc/config/nios2/linux.h -@@ -30,6 +30,7 @@ - #define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" - - #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-nios2.so.1" -+#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-nios2.so.1" - - #undef LINK_SPEC - #define LINK_SPEC LINK_SPEC_ENDIAN \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch deleted file mode 100644 index 2a6769a82..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 5db0404eb770ac477fd99d444226bcf021067584 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 27 Jun 2017 18:10:54 -0700 -Subject: [PATCH] Add ssp_nonshared to link commandline for musl targets - -when -fstack-protector options are enabled we need to -link with ssp_shared on musl since it does not provide -the __stack_chk_fail_local() so essentially it provides -libssp but not libssp_nonshared something like -TARGET_LIBC_PROVIDES_SSP_BUT_NOT_SSP_NONSHARED - where-as for glibc the needed symbols -are already present in libc_nonshared library therefore -we do not need any library helper on glibc based systems -but musl needs the libssp_noshared from gcc - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - gcc/config/linux.h | 7 +++++++ - gcc/config/rs6000/linux.h | 10 ++++++++++ - gcc/config/rs6000/linux64.h | 10 ++++++++++ - 3 files changed, 27 insertions(+) - -diff --git a/gcc/config/linux.h b/gcc/config/linux.h -index 0c1a8118a26..bdc2a2d0659 100644 ---- a/gcc/config/linux.h -+++ b/gcc/config/linux.h -@@ -195,6 +195,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see - { GCC_INCLUDE_DIR, "GCC", 0, 1, 0, 0 }, \ - { 0, 0, 0, 0, 0, 0 } \ - } -+#ifdef TARGET_LIBC_PROVIDES_SSP -+#undef LINK_SSP_SPEC -+#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ -+ "|fstack-protector-strong|fstack-protector-explicit" \ -+ ":-lssp_nonshared}" -+#endif -+ - #endif - - #if (DEFAULT_LIBC == LIBC_UCLIBC) && defined (SINGLE_LIBC) /* uClinux */ -diff --git a/gcc/config/rs6000/linux.h b/gcc/config/rs6000/linux.h -index b7026fcbee7..dd54d6c393e 100644 ---- a/gcc/config/rs6000/linux.h -+++ b/gcc/config/rs6000/linux.h -@@ -94,6 +94,16 @@ - " -m elf32ppclinux") - #endif - -+/* link libssp_nonshared.a with musl */ -+#if DEFAULT_LIBC == LIBC_MUSL -+#ifdef TARGET_LIBC_PROVIDES_SSP -+#undef LINK_SSP_SPEC -+#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ -+ "|fstack-protector-strong|fstack-protector-explicit" \ -+ ":-lssp_nonshared}" -+#endif -+#endif -+ - #undef LINK_OS_LINUX_SPEC - #define LINK_OS_LINUX_SPEC LINK_OS_LINUX_EMUL " %{!shared: %{!static: \ - %{!static-pie: \ -diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h -index 967c1c43c63..dc5e4d97975 100644 ---- a/gcc/config/rs6000/linux64.h -+++ b/gcc/config/rs6000/linux64.h -@@ -452,6 +452,16 @@ extern int dot_symbols; - " -m elf64ppc") - #endif - -+/* link libssp_nonshared.a with musl */ -+#if DEFAULT_LIBC == LIBC_MUSL -+#ifdef TARGET_LIBC_PROVIDES_SSP -+#undef LINK_SSP_SPEC -+#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ -+ "|fstack-protector-strong|fstack-protector-explicit" \ -+ ":-lssp_nonshared}" -+#endif -+#endif -+ - #define LINK_OS_LINUX_SPEC32 LINK_OS_LINUX_EMUL32 " %{!shared: %{!static: \ - %{!static-pie: \ - %{rdynamic:-export-dynamic} \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch deleted file mode 100644 index 767cba038..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch +++ /dev/null @@ -1,26 +0,0 @@ -From fbc926dbf6a47fa623b9c94cd9b09a0e90448fdc Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 4 May 2016 21:11:34 -0700 -Subject: [PATCH] Link libgcc using LDFLAGS, not just SHLIB_LDFLAGS - -Upstream-Status: Pending - -Signed-off-by: Christopher Larson -Signed-off-by: Khem Raj ---- - libgcc/config/t-slibgcc | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/libgcc/config/t-slibgcc b/libgcc/config/t-slibgcc -index c997553447c..330352c2c81 100644 ---- a/libgcc/config/t-slibgcc -+++ b/libgcc/config/t-slibgcc -@@ -32,7 +32,7 @@ SHLIB_INSTALL_SOLINK = $(LN_S) $(SHLIB_SONAME) \ - $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK) - - SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \ -- $(SHLIB_LDFLAGS) \ -+ $(LDFLAGS) $(SHLIB_LDFLAGS) \ - -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp @multilib_flags@ \ - $(SHLIB_OBJS) $(SHLIB_LC) && \ - rm -f $(SHLIB_DIR)/$(SHLIB_SOLINK) && \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0030-sync-gcc-stddef.h-with-musl.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0030-sync-gcc-stddef.h-with-musl.patch deleted file mode 100644 index 4f18907a1..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0030-sync-gcc-stddef.h-with-musl.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 24dc04dc8d69e3bf61322615b3ef18e02ccd311e Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Fri, 3 Feb 2017 12:56:00 -0800 -Subject: [PATCH] sync gcc stddef.h with musl - -musl defines ptrdiff_t size_t and wchar_t -so dont define them here if musl is definining them - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - gcc/ginclude/stddef.h | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/gcc/ginclude/stddef.h b/gcc/ginclude/stddef.h -index 9d67eac4947..6cb5edbedb4 100644 ---- a/gcc/ginclude/stddef.h -+++ b/gcc/ginclude/stddef.h -@@ -128,6 +128,7 @@ _TYPE_wchar_t; - #ifndef ___int_ptrdiff_t_h - #ifndef _GCC_PTRDIFF_T - #ifndef _PTRDIFF_T_DECLARED /* DragonFly */ -+#ifndef __DEFINED_ptrdiff_t /* musl */ - #define _PTRDIFF_T - #define _T_PTRDIFF_ - #define _T_PTRDIFF -@@ -137,10 +138,12 @@ _TYPE_wchar_t; - #define ___int_ptrdiff_t_h - #define _GCC_PTRDIFF_T - #define _PTRDIFF_T_DECLARED -+#define __DEFINED_ptrdiff_t /* musl */ - #ifndef __PTRDIFF_TYPE__ - #define __PTRDIFF_TYPE__ long int - #endif - typedef __PTRDIFF_TYPE__ ptrdiff_t; -+#endif /* __DEFINED_ptrdiff_t */ - #endif /* _PTRDIFF_T_DECLARED */ - #endif /* _GCC_PTRDIFF_T */ - #endif /* ___int_ptrdiff_t_h */ -@@ -178,6 +181,7 @@ typedef __PTRDIFF_TYPE__ ptrdiff_t; - #ifndef _GCC_SIZE_T - #ifndef _SIZET_ - #ifndef __size_t -+#ifndef __DEFINED_size_t /* musl */ - #define __size_t__ /* BeOS */ - #define __SIZE_T__ /* Cray Unicos/Mk */ - #define _SIZE_T -@@ -194,6 +198,7 @@ typedef __PTRDIFF_TYPE__ ptrdiff_t; - #define ___int_size_t_h - #define _GCC_SIZE_T - #define _SIZET_ -+#define __DEFINED_size_t /* musl */ - #if defined (__FreeBSD__) \ - || defined(__DragonFly__) \ - || defined(__FreeBSD_kernel__) \ -@@ -228,6 +233,7 @@ typedef long ssize_t; - #endif /* _SIZE_T */ - #endif /* __SIZE_T__ */ - #endif /* __size_t__ */ -+#endif /* __DEFINED_size_t */ - #undef __need_size_t - #endif /* _STDDEF_H or __need_size_t. */ - -@@ -257,6 +263,7 @@ typedef long ssize_t; - #ifndef ___int_wchar_t_h - #ifndef __INT_WCHAR_T_H - #ifndef _GCC_WCHAR_T -+#ifndef __DEFINED_wchar_t /* musl */ - #define __wchar_t__ /* BeOS */ - #define __WCHAR_T__ /* Cray Unicos/Mk */ - #define _WCHAR_T -@@ -272,6 +279,7 @@ typedef long ssize_t; - #define __INT_WCHAR_T_H - #define _GCC_WCHAR_T - #define _WCHAR_T_DECLARED -+#define __DEFINED_wchar_t /* musl */ - - /* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ - instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other -@@ -337,6 +345,7 @@ typedef __WCHAR_TYPE__ wchar_t; - #endif - #endif /* __WCHAR_T__ */ - #endif /* __wchar_t__ */ -+#endif /* __DEFINED_wchar_t musl */ - #undef __need_wchar_t - #endif /* _STDDEF_H or __need_wchar_t. */ - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0031-fix-segmentation-fault-in-precompiled-header-generat.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0031-fix-segmentation-fault-in-precompiled-header-generat.patch deleted file mode 100644 index 702279af0..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0031-fix-segmentation-fault-in-precompiled-header-generat.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 27b8ba5555ada2dab076988529bfb84d00a4b901 Mon Sep 17 00:00:00 2001 -From: Juro Bystricky -Date: Mon, 19 Mar 2018 22:31:20 -0700 -Subject: [PATCH] fix segmentation fault in precompiled header generation - -Prevent a segmentation fault which occurs when using incorrect -structure trying to access name of some named operators, such as -CPP_NOT, CPP_AND etc. "token->val.node.spelling" cannot be used in -those cases, as is may not be initialized at all. - -[YOCTO #11738] - -Upstream-Status: Pending - -Signed-off-by: Juro Bystricky -Signed-off-by: Khem Raj ---- - libcpp/lex.c | 26 +++++++++++++++++++++----- - 1 file changed, 21 insertions(+), 5 deletions(-) - -diff --git a/libcpp/lex.c b/libcpp/lex.c -index 56ac3a1dd73..73a951148b3 100644 ---- a/libcpp/lex.c -+++ b/libcpp/lex.c -@@ -3311,11 +3311,27 @@ cpp_spell_token (cpp_reader *pfile, const cpp_token *token, - spell_ident: - case SPELL_IDENT: - if (forstring) -- { -- memcpy (buffer, NODE_NAME (token->val.node.spelling), -- NODE_LEN (token->val.node.spelling)); -- buffer += NODE_LEN (token->val.node.spelling); -- } -+ { -+ if (token->type == CPP_NAME) -+ { -+ memcpy (buffer, NODE_NAME (token->val.node.spelling), -+ NODE_LEN (token->val.node.spelling)); -+ buffer += NODE_LEN (token->val.node.spelling); -+ break; -+ } -+ /* NAMED_OP, cannot use node.spelling */ -+ if (token->flags & NAMED_OP) -+ { -+ const char *str = cpp_named_operator2name (token->type); -+ if (str) -+ { -+ size_t len = strlen(str); -+ memcpy(buffer, str, len); -+ buffer += len; -+ } -+ break; -+ } -+ } - else - buffer = _cpp_spell_ident_ucns (buffer, token->val.node.node); - break; diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0032-Fix-for-testsuite-failure.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0032-Fix-for-testsuite-failure.patch deleted file mode 100644 index 0a0767b44..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0032-Fix-for-testsuite-failure.patch +++ /dev/null @@ -1,255 +0,0 @@ -From 2512aacf023c679d86d8f40caff4f6ff412b32ff Mon Sep 17 00:00:00 2001 -From: RAGHUNATH LOLUR -Date: Wed, 6 Dec 2017 22:52:26 -0800 -Subject: [PATCH] Fix for testsuite failure - -2017-11-16 Raghunath Lolur - - * gcc.dg/pr56275.c: If SSE is disabled, ensure that - "-mfpmath" is not set to use SSE. Set "-mfpmath=387". - * gcc.dg/pr68306.c: Likewise - * gcc.dg/pr68306-2.c: Likewise - * gcc.dg/pr68306-3.c: Likewise - * gcc.dg/pr69634.c: Likewise - * gcc.target/i386/amd64-abi-1.c: Likewise - * gcc.target/i386/funcspec-6.c: Likewise - * gcc.target/i386/interrupt-387-err-1.c: Likewise - * gcc.target/i386/isa-14.c: Likewise - * gcc.target/i386/pr44948-2b.c: Likewise - * gcc.target/i386/pr53425-1.c: Likewise - * gcc.target/i386/pr53425-2.c: Likewise - * gcc.target/i386/pr55247.c: Likewise - * gcc.target/i386/pr59644.c: Likewise - * gcc.target/i386/pr62120.c: Likewise - * gcc.target/i386/pr70467-1.c: Likewise - * gcc.target/i386/warn-vect-op-1.c: Likewise - -If -Wall, -Werror are used during compilation various test cases fail -to compile. - -If SSE is disabled, be sure to -mfpmath=387 to resolve this. - -This patch removes the changes to Changelog from the original patch. -This will help us avoid conflicts. - -Upstream-Status: Pending - -Signed-off-by: Mark Hatle ---- - gcc/testsuite/gcc.dg/pr56275.c | 2 +- - gcc/testsuite/gcc.dg/pr68306-2.c | 2 +- - gcc/testsuite/gcc.dg/pr68306-3.c | 2 +- - gcc/testsuite/gcc.dg/pr68306.c | 2 +- - gcc/testsuite/gcc.dg/pr69634.c | 2 +- - gcc/testsuite/gcc.target/i386/amd64-abi-1.c | 2 +- - gcc/testsuite/gcc.target/i386/funcspec-6.c | 1 + - gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c | 2 +- - gcc/testsuite/gcc.target/i386/isa-14.c | 2 +- - gcc/testsuite/gcc.target/i386/pr44948-2b.c | 2 +- - gcc/testsuite/gcc.target/i386/pr53425-1.c | 2 +- - gcc/testsuite/gcc.target/i386/pr53425-2.c | 2 +- - gcc/testsuite/gcc.target/i386/pr55247.c | 2 +- - gcc/testsuite/gcc.target/i386/pr59644.c | 2 +- - gcc/testsuite/gcc.target/i386/pr62120.c | 2 +- - gcc/testsuite/gcc.target/i386/pr70467-1.c | 2 +- - gcc/testsuite/gcc.target/i386/warn-vect-op-1.c | 2 +- - 17 files changed, 17 insertions(+), 16 deletions(-) - -diff --git a/gcc/testsuite/gcc.dg/pr56275.c b/gcc/testsuite/gcc.dg/pr56275.c -index b901bb2b199..a4f6c95e1a1 100644 ---- a/gcc/testsuite/gcc.dg/pr56275.c -+++ b/gcc/testsuite/gcc.dg/pr56275.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O2" } */ --/* { dg-additional-options "-mno-sse" { target { i?86-*-* x86_64-*-* } } } */ -+/* { dg-additional-options "-mno-sse -mfpmath=387" { target { i?86-*-* x86_64-*-* } } } */ - - typedef long long v2tw __attribute__ ((vector_size (2 * sizeof (long long)))); - -diff --git a/gcc/testsuite/gcc.dg/pr68306-2.c b/gcc/testsuite/gcc.dg/pr68306-2.c -index 4672ebe7987..2a368c484b6 100644 ---- a/gcc/testsuite/gcc.dg/pr68306-2.c -+++ b/gcc/testsuite/gcc.dg/pr68306-2.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O3" } */ --/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ -+/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ - - struct { - int tz_minuteswest; -diff --git a/gcc/testsuite/gcc.dg/pr68306-3.c b/gcc/testsuite/gcc.dg/pr68306-3.c -index f5a8c102cf8..df3390c64c2 100644 ---- a/gcc/testsuite/gcc.dg/pr68306-3.c -+++ b/gcc/testsuite/gcc.dg/pr68306-3.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O3" } */ --/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ -+/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ - /* { dg-additional-options "-mno-altivec -mno-vsx" { target powerpc*-*-* } } */ - - extern void fn2(); -diff --git a/gcc/testsuite/gcc.dg/pr68306.c b/gcc/testsuite/gcc.dg/pr68306.c -index 54e5b40f221..0813389e2c1 100644 ---- a/gcc/testsuite/gcc.dg/pr68306.c -+++ b/gcc/testsuite/gcc.dg/pr68306.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O3" } */ --/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ -+/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ - - enum powerpc_pmc_type { PPC_PMC_IBM }; - struct { -diff --git a/gcc/testsuite/gcc.dg/pr69634.c b/gcc/testsuite/gcc.dg/pr69634.c -index 60a56149463..bcc23f9ccd6 100644 ---- a/gcc/testsuite/gcc.dg/pr69634.c -+++ b/gcc/testsuite/gcc.dg/pr69634.c -@@ -1,6 +1,6 @@ - /* { dg-do compile } */ - /* { dg-options "-O2 -fno-dce -fschedule-insns -fno-tree-vrp -fcompare-debug -Wno-psabi" } */ --/* { dg-additional-options "-mno-sse" { target i?86-*-* x86_64-*-* } } */ -+/* { dg-additional-options "-mno-sse -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ - /* { dg-require-effective-target scheduling } */ - - typedef unsigned short u16; -diff --git a/gcc/testsuite/gcc.target/i386/amd64-abi-1.c b/gcc/testsuite/gcc.target/i386/amd64-abi-1.c -index 69fde57bf06..7f1f1c03edf 100644 ---- a/gcc/testsuite/gcc.target/i386/amd64-abi-1.c -+++ b/gcc/testsuite/gcc.target/i386/amd64-abi-1.c -@@ -1,5 +1,5 @@ - /* { dg-do compile { target { ! ia32 } } } */ --/* { dg-options "-mno-sse" } */ -+/* { dg-options "-mno-sse -mfpmath=387" } */ - /* { dg-additional-options "-mabi=sysv" { target *-*-mingw* } } */ - - double foo(void) { return 0; } /* { dg-error "SSE disabled" } */ -diff --git a/gcc/testsuite/gcc.target/i386/funcspec-6.c b/gcc/testsuite/gcc.target/i386/funcspec-6.c -index ea896b7ebfd..bf15569b826 100644 ---- a/gcc/testsuite/gcc.target/i386/funcspec-6.c -+++ b/gcc/testsuite/gcc.target/i386/funcspec-6.c -@@ -1,6 +1,7 @@ - /* Test whether all of the 64-bit function specific options are accepted - without error. */ - /* { dg-do compile { target { ! ia32 } } } */ -+/* { dg-additional-options "-mfpmath=387" } */ - - #include "funcspec-56.inc" - -diff --git a/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c b/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c -index 8561a3c26d6..6377f814645 100644 ---- a/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c -+++ b/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c -@@ -1,5 +1,5 @@ - /* { dg-do compile } */ --/* { dg-options "-O2 -mgeneral-regs-only -mno-cld -mno-iamcu -m80387" } */ -+/* { dg-options "-O2 -mgeneral-regs-only -mno-cld -mno-iamcu -m80387 -mfpmath=387" } */ - - typedef unsigned int uword_t __attribute__ ((mode (__word__))); - -diff --git a/gcc/testsuite/gcc.target/i386/isa-14.c b/gcc/testsuite/gcc.target/i386/isa-14.c -index 5d49e6e77fe..1de2db92bdd 100644 ---- a/gcc/testsuite/gcc.target/i386/isa-14.c -+++ b/gcc/testsuite/gcc.target/i386/isa-14.c -@@ -1,5 +1,5 @@ - /* { dg-do run } */ --/* { dg-options "-march=x86-64 -msse4a -mfma4 -mno-sse" } */ -+/* { dg-options "-march=x86-64 -msse4a -mfma4 -mno-sse -mfpmath=387" } */ - - extern void abort (void); - -diff --git a/gcc/testsuite/gcc.target/i386/pr44948-2b.c b/gcc/testsuite/gcc.target/i386/pr44948-2b.c -index fa1769b62fb..f79fb12726f 100644 ---- a/gcc/testsuite/gcc.target/i386/pr44948-2b.c -+++ b/gcc/testsuite/gcc.target/i386/pr44948-2b.c -@@ -1,5 +1,5 @@ - /* { dg-do compile } */ --/* { dg-options "-O -mno-sse -Wno-psabi -mtune=generic" } */ -+/* { dg-options "-O -mno-sse -Wno-psabi -mtune=generic -mfpmath=387" } */ - - struct A - { -diff --git a/gcc/testsuite/gcc.target/i386/pr53425-1.c b/gcc/testsuite/gcc.target/i386/pr53425-1.c -index 2e89ff7d81d..6339bf6b736 100644 ---- a/gcc/testsuite/gcc.target/i386/pr53425-1.c -+++ b/gcc/testsuite/gcc.target/i386/pr53425-1.c -@@ -1,6 +1,6 @@ - /* PR target/53425 */ - /* { dg-do compile { target { ! ia32 } } } */ --/* { dg-options "-O2 -mno-sse" } */ -+/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ - /* { dg-skip-if "no SSE vector" { x86_64-*-mingw* } } */ - - typedef double __v2df __attribute__ ((__vector_size__ (16))); -diff --git a/gcc/testsuite/gcc.target/i386/pr53425-2.c b/gcc/testsuite/gcc.target/i386/pr53425-2.c -index 61f6283dbe9..2c5a55f0ac3 100644 ---- a/gcc/testsuite/gcc.target/i386/pr53425-2.c -+++ b/gcc/testsuite/gcc.target/i386/pr53425-2.c -@@ -1,6 +1,6 @@ - /* PR target/53425 */ - /* { dg-do compile { target { ! ia32 } } } */ --/* { dg-options "-O2 -mno-sse" } */ -+/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ - /* { dg-skip-if "no SSE vector" { x86_64-*-mingw* } } */ - - typedef float __v2sf __attribute__ ((__vector_size__ (8))); -diff --git a/gcc/testsuite/gcc.target/i386/pr55247.c b/gcc/testsuite/gcc.target/i386/pr55247.c -index 23366d0909d..9810e3abb76 100644 ---- a/gcc/testsuite/gcc.target/i386/pr55247.c -+++ b/gcc/testsuite/gcc.target/i386/pr55247.c -@@ -1,6 +1,6 @@ - /* { dg-do compile { target { ! ia32 } } } */ - /* { dg-require-effective-target maybe_x32 } */ --/* { dg-options "-O2 -mno-sse -mno-mmx -mx32 -maddress-mode=long" } */ -+/* { dg-options "-O2 -mno-sse -mno-mmx -mx32 -maddress-mode=long -mfpmath=387" } */ - - typedef unsigned int uint32_t; - typedef uint32_t Elf32_Word; -diff --git a/gcc/testsuite/gcc.target/i386/pr59644.c b/gcc/testsuite/gcc.target/i386/pr59644.c -index 96006b3e338..4287e4538bf 100644 ---- a/gcc/testsuite/gcc.target/i386/pr59644.c -+++ b/gcc/testsuite/gcc.target/i386/pr59644.c -@@ -1,6 +1,6 @@ - /* PR target/59644 */ - /* { dg-do run { target lp64 } } */ --/* { dg-options "-O2 -ffreestanding -mno-sse -mpreferred-stack-boundary=3 -maccumulate-outgoing-args -mno-red-zone" } */ -+/* { dg-options "-O2 -ffreestanding -mno-sse -mpreferred-stack-boundary=3 -maccumulate-outgoing-args -mno-red-zone -mfpmath=387" } */ - - /* This test uses __builtin_trap () instead of e.g. abort, - because due to -mpreferred-stack-boundary=3 it should not call -diff --git a/gcc/testsuite/gcc.target/i386/pr62120.c b/gcc/testsuite/gcc.target/i386/pr62120.c -index 28d85d37712..c93266bd4bc 100644 ---- a/gcc/testsuite/gcc.target/i386/pr62120.c -+++ b/gcc/testsuite/gcc.target/i386/pr62120.c -@@ -1,5 +1,5 @@ - /* { dg-do compile } */ --/* { dg-options "-mno-sse" } */ -+/* { dg-options "-mno-sse -mfpmath=387" } */ - - void foo () - { -diff --git a/gcc/testsuite/gcc.target/i386/pr70467-1.c b/gcc/testsuite/gcc.target/i386/pr70467-1.c -index 4e112c88d07..bcfb396a68d 100644 ---- a/gcc/testsuite/gcc.target/i386/pr70467-1.c -+++ b/gcc/testsuite/gcc.target/i386/pr70467-1.c -@@ -1,6 +1,6 @@ - /* PR rtl-optimization/70467 */ - /* { dg-do compile } */ --/* { dg-options "-O2 -mno-sse" } */ -+/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ - - void foo (unsigned long long *); - -diff --git a/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c b/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c -index 6cda1534311..26e37f5b8ba 100644 ---- a/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c -+++ b/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c -@@ -1,5 +1,5 @@ - /* { dg-do compile { target { ! ia32 } } } */ --/* { dg-options "-mno-sse -Wvector-operation-performance" } */ -+/* { dg-options "-mno-sse -Wvector-operation-performance -mfpmath=387" } */ - #define vector(elcount, type) \ - __attribute__((vector_size((elcount)*sizeof(type)))) type - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0033-Re-introduce-spe-commandline-options.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0033-Re-introduce-spe-commandline-options.patch deleted file mode 100644 index ba7c2b8fd..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0033-Re-introduce-spe-commandline-options.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 3fc06241ce37e2e4b3ed21ace28d347eb511448d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 6 Jun 2018 12:10:22 -0700 -Subject: [PATCH] Re-introduce spe commandline options - -This should ensure that we keep accepting -spe options - -Upstream-Status: Inappropriate [SPE port is removed from rs600 port] - -Signed-off-by: Khem Raj ---- - gcc/config/rs6000/rs6000.opt | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - -diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt -index f95b8279270..0e52d51409d 100644 ---- a/gcc/config/rs6000/rs6000.opt -+++ b/gcc/config/rs6000/rs6000.opt -@@ -344,6 +344,18 @@ mdebug= - Target RejectNegative Joined - -mdebug= Enable debug output. - -+mspe -+Target Var(rs6000_spe) Save -+Generate SPE SIMD instructions on E500. -+ -+mabi=spe -+Target RejectNegative Var(rs6000_spe_abi) Save -+Use the SPE ABI extensions. -+ -+mabi=no-spe -+Target RejectNegative Var(rs6000_spe_abi, 0) -+Do not use the SPE ABI extensions. -+ - mabi=altivec - Target RejectNegative Var(rs6000_altivec_abi) Save - Use the AltiVec ABI extensions. diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch deleted file mode 100644 index 4ce9dc6de..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch +++ /dev/null @@ -1,83 +0,0 @@ -From b42ff59c3fe2967d37815c8db72a47b9b7f585b4 Mon Sep 17 00:00:00 2001 -From: Szabolcs Nagy -Date: Sat, 24 Oct 2015 20:09:53 +0000 -Subject: [PATCH] libgcc_s: Use alias for __cpu_indicator_init instead of - symver - -Adapter from - -https://gcc.gnu.org/ml/gcc-patches/2015-05/msg00899.html - -This fix was debated but hasnt been applied gcc upstream since -they expect musl to support '@' in symbol versioning which is -a sun/gnu versioning extention. This patch however avoids the -need for the '@' symbols at all - -libgcc/Changelog: - -2015-05-11 Szabolcs Nagy - - * config/i386/cpuinfo.c (__cpu_indicator_init_local): Add. - (__cpu_indicator_init@GCC_4.8.0, __cpu_model@GCC_4.8.0): Remove. - - * config/i386/t-linux (HOST_LIBGCC2_CFLAGS): Remove -DUSE_ELF_SYMVER. - -gcc/Changelog: - -2015-05-11 Szabolcs Nagy - - * config/i386/i386-expand.c (ix86_expand_builtin): Make __builtin_cpu_init - call __cpu_indicator_init_local instead of __cpu_indicator_init. - -Upstream-Status: Pending - -Signed-off-by: Khem Raj ---- - gcc/config/i386/i386-expand.c | 4 ++-- - libgcc/config/i386/cpuinfo.c | 6 +++--- - libgcc/config/i386/t-linux | 2 +- - 3 files changed, 6 insertions(+), 6 deletions(-) - -diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c -index 48f00c5fcfc..468f5f71fac 100644 ---- a/gcc/config/i386/i386-expand.c -+++ b/gcc/config/i386/i386-expand.c -@@ -10941,10 +10941,10 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget, - { - case IX86_BUILTIN_CPU_INIT: - { -- /* Make it call __cpu_indicator_init in libgcc. */ -+ /* Make it call __cpu_indicator_init_local in libgcc.a. */ - tree call_expr, fndecl, type; - type = build_function_type_list (integer_type_node, NULL_TREE); -- fndecl = build_fn_decl ("__cpu_indicator_init", type); -+ fndecl = build_fn_decl ("__cpu_indicator_init_local", type); - call_expr = build_call_expr (fndecl, 0); - return expand_expr (call_expr, target, mode, EXPAND_NORMAL); - } -diff --git a/libgcc/config/i386/cpuinfo.c b/libgcc/config/i386/cpuinfo.c -index 00322c58622..f42bbb8af98 100644 ---- a/libgcc/config/i386/cpuinfo.c -+++ b/libgcc/config/i386/cpuinfo.c -@@ -508,7 +508,7 @@ __cpu_indicator_init (void) - return 0; - } - --#if defined SHARED && defined USE_ELF_SYMVER --__asm__ (".symver __cpu_indicator_init, __cpu_indicator_init@GCC_4.8.0"); --__asm__ (".symver __cpu_model, __cpu_model@GCC_4.8.0"); -+#ifndef SHARED -+int __cpu_indicator_init_local (void) -+ __attribute__ ((weak, alias ("__cpu_indicator_init"))); - #endif -diff --git a/libgcc/config/i386/t-linux b/libgcc/config/i386/t-linux -index 8506a635790..564296f788e 100644 ---- a/libgcc/config/i386/t-linux -+++ b/libgcc/config/i386/t-linux -@@ -3,5 +3,5 @@ - # t-slibgcc-elf-ver and t-linux - SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/i386/libgcc-glibc.ver - --HOST_LIBGCC2_CFLAGS += -mlong-double-80 -DUSE_ELF_SYMVER $(CET_FLAGS) -+HOST_LIBGCC2_CFLAGS += -mlong-double-80 $(CET_FLAGS) - CRTSTUFF_T_CFLAGS += $(CET_FLAGS) diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch deleted file mode 100644 index dd1bf6ded..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch +++ /dev/null @@ -1,182 +0,0 @@ -From 0395060a7dcf98c5f5a65103f6aaa71d6b862259 Mon Sep 17 00:00:00 2001 -From: Richard Purdie -Date: Tue, 10 Mar 2020 08:26:53 -0700 -Subject: [PATCH] gentypes/genmodes: Do not use __LINE__ for maintaining - reproducibility - -Inserting line numbers into generated code means its not always reproducible wth -differing versions of host gcc. Void the issue by not adding these. - -Upstream-Status: Inappropriate [OE Reproducibility specific] - -Signed-off-by: Richard Purdie -Signed-off-by: Khem Raj ---- - gcc/gengtype.c | 6 +++--- - gcc/genmodes.c | 32 ++++++++++++++++---------------- - 2 files changed, 19 insertions(+), 19 deletions(-) - -diff --git a/gcc/gengtype.c b/gcc/gengtype.c -index 981577481af..d5700fff401 100644 ---- a/gcc/gengtype.c -+++ b/gcc/gengtype.c -@@ -991,7 +991,7 @@ create_field_at (pair_p next, type_p type, const char *name, options_p opt, - /* Create a fake field with the given type and name. NEXT is the next - field in the chain. */ - #define create_field(next,type,name) \ -- create_field_all (next,type,name, 0, this_file, __LINE__) -+ create_field_all (next,type,name, 0, this_file, 0) - - /* Like create_field, but the field is only valid when condition COND - is true. */ -@@ -1024,7 +1024,7 @@ create_optional_field_ (pair_p next, type_p type, const char *name, - } - - #define create_optional_field(next,type,name,cond) \ -- create_optional_field_(next,type,name,cond,__LINE__) -+ create_optional_field_(next,type,name,cond,0) - - /* Reverse a linked list of 'struct pair's in place. */ - pair_p -@@ -5187,7 +5187,7 @@ main (int argc, char **argv) - /* These types are set up with #define or else outside of where - we can see them. We should initialize them before calling - read_input_list. */ --#define POS_HERE(Call) do { pos.file = this_file; pos.line = __LINE__; \ -+#define POS_HERE(Call) do { pos.file = this_file; pos.line = 0; \ - Call;} while (0) - POS_HERE (do_scalar_typedef ("CUMULATIVE_ARGS", &pos)); - POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos)); -diff --git a/gcc/genmodes.c b/gcc/genmodes.c -index bd78310ea24..dbd02c51a4c 100644 ---- a/gcc/genmodes.c -+++ b/gcc/genmodes.c -@@ -430,7 +430,7 @@ complete_all_modes (void) - } - - /* For each mode in class CLASS, construct a corresponding complex mode. */ --#define COMPLEX_MODES(C) make_complex_modes (MODE_##C, __FILE__, __LINE__) -+#define COMPLEX_MODES(C) make_complex_modes (MODE_##C, __FILE__, 0) - static void - make_complex_modes (enum mode_class cl, - const char *file, unsigned int line) -@@ -489,7 +489,7 @@ make_complex_modes (enum mode_class cl, - having as many components as necessary. ORDER is the sorting order - of the mode, with smaller numbers indicating a higher priority. */ - #define VECTOR_MODES_WITH_PREFIX(PREFIX, C, W, ORDER) \ -- make_vector_modes (MODE_##C, #PREFIX, W, ORDER, __FILE__, __LINE__) -+ make_vector_modes (MODE_##C, #PREFIX, W, ORDER, __FILE__, 0) - #define VECTOR_MODES(C, W) VECTOR_MODES_WITH_PREFIX (V, C, W, 0) - static void ATTRIBUTE_UNUSED - make_vector_modes (enum mode_class cl, const char *prefix, unsigned int width, -@@ -541,7 +541,7 @@ make_vector_modes (enum mode_class cl, const char *prefix, unsigned int width, - /* Create a vector of booleans called NAME with COUNT elements and - BYTESIZE bytes in total. */ - #define VECTOR_BOOL_MODE(NAME, COUNT, BYTESIZE) \ -- make_vector_bool_mode (#NAME, COUNT, BYTESIZE, __FILE__, __LINE__) -+ make_vector_bool_mode (#NAME, COUNT, BYTESIZE, __FILE__, 0) - static void ATTRIBUTE_UNUSED - make_vector_bool_mode (const char *name, unsigned int count, - unsigned int bytesize, const char *file, -@@ -563,7 +563,7 @@ make_vector_bool_mode (const char *name, unsigned int count, - /* Input. */ - - #define _SPECIAL_MODE(C, N) \ -- make_special_mode (MODE_##C, #N, __FILE__, __LINE__) -+ make_special_mode (MODE_##C, #N, __FILE__, 0) - #define RANDOM_MODE(N) _SPECIAL_MODE (RANDOM, N) - #define CC_MODE(N) _SPECIAL_MODE (CC, N) - -@@ -576,7 +576,7 @@ make_special_mode (enum mode_class cl, const char *name, - - #define INT_MODE(N, Y) FRACTIONAL_INT_MODE (N, -1U, Y) - #define FRACTIONAL_INT_MODE(N, B, Y) \ -- make_int_mode (#N, B, Y, __FILE__, __LINE__) -+ make_int_mode (#N, B, Y, __FILE__, 0) - - static void - make_int_mode (const char *name, -@@ -589,16 +589,16 @@ make_int_mode (const char *name, - } - - #define FRACT_MODE(N, Y, F) \ -- make_fixed_point_mode (MODE_FRACT, #N, Y, 0, F, __FILE__, __LINE__) -+ make_fixed_point_mode (MODE_FRACT, #N, Y, 0, F, __FILE__, 0) - - #define UFRACT_MODE(N, Y, F) \ -- make_fixed_point_mode (MODE_UFRACT, #N, Y, 0, F, __FILE__, __LINE__) -+ make_fixed_point_mode (MODE_UFRACT, #N, Y, 0, F, __FILE__, 0) - - #define ACCUM_MODE(N, Y, I, F) \ -- make_fixed_point_mode (MODE_ACCUM, #N, Y, I, F, __FILE__, __LINE__) -+ make_fixed_point_mode (MODE_ACCUM, #N, Y, I, F, __FILE__, 0) - - #define UACCUM_MODE(N, Y, I, F) \ -- make_fixed_point_mode (MODE_UACCUM, #N, Y, I, F, __FILE__, __LINE__) -+ make_fixed_point_mode (MODE_UACCUM, #N, Y, I, F, __FILE__, 0) - - /* Create a fixed-point mode by setting CL, NAME, BYTESIZE, IBIT, FBIT, - FILE, and LINE. */ -@@ -619,7 +619,7 @@ make_fixed_point_mode (enum mode_class cl, - - #define FLOAT_MODE(N, Y, F) FRACTIONAL_FLOAT_MODE (N, -1U, Y, F) - #define FRACTIONAL_FLOAT_MODE(N, B, Y, F) \ -- make_float_mode (#N, B, Y, #F, __FILE__, __LINE__) -+ make_float_mode (#N, B, Y, #F, __FILE__, 0) - - static void - make_float_mode (const char *name, -@@ -636,7 +636,7 @@ make_float_mode (const char *name, - #define DECIMAL_FLOAT_MODE(N, Y, F) \ - FRACTIONAL_DECIMAL_FLOAT_MODE (N, -1U, Y, F) - #define FRACTIONAL_DECIMAL_FLOAT_MODE(N, B, Y, F) \ -- make_decimal_float_mode (#N, B, Y, #F, __FILE__, __LINE__) -+ make_decimal_float_mode (#N, B, Y, #F, __FILE__, 0) - - static void - make_decimal_float_mode (const char *name, -@@ -651,7 +651,7 @@ make_decimal_float_mode (const char *name, - } - - #define RESET_FLOAT_FORMAT(N, F) \ -- reset_float_format (#N, #F, __FILE__, __LINE__) -+ reset_float_format (#N, #F, __FILE__, 0) - static void ATTRIBUTE_UNUSED - reset_float_format (const char *name, const char *format, - const char *file, unsigned int line) -@@ -672,7 +672,7 @@ reset_float_format (const char *name, const char *format, - - /* __intN support. */ - #define INT_N(M,PREC) \ -- make_int_n (#M, PREC, __FILE__, __LINE__) -+ make_int_n (#M, PREC, __FILE__, 0) - static void ATTRIBUTE_UNUSED - make_int_n (const char *m, int bitsize, - const char *file, unsigned int line) -@@ -701,7 +701,7 @@ make_int_n (const char *m, int bitsize, - /* Partial integer modes are specified by relation to a full integer - mode. */ - #define PARTIAL_INT_MODE(M,PREC,NAME) \ -- make_partial_integer_mode (#M, #NAME, PREC, __FILE__, __LINE__) -+ make_partial_integer_mode (#M, #NAME, PREC, __FILE__, 0) - static void ATTRIBUTE_UNUSED - make_partial_integer_mode (const char *base, const char *name, - unsigned int precision, -@@ -728,7 +728,7 @@ make_partial_integer_mode (const char *base, const char *name, - /* A single vector mode can be specified by naming its component - mode and the number of components. */ - #define VECTOR_MODE(C, M, N) \ -- make_vector_mode (MODE_##C, #M, N, __FILE__, __LINE__); -+ make_vector_mode (MODE_##C, #M, N, __FILE__, 0); - static void ATTRIBUTE_UNUSED - make_vector_mode (enum mode_class bclass, - const char *base, -@@ -771,7 +771,7 @@ make_vector_mode (enum mode_class bclass, - - /* Adjustability. */ - #define _ADD_ADJUST(A, M, X, C1, C2) \ -- new_adjust (#M, &adj_##A, #A, #X, MODE_##C1, MODE_##C2, __FILE__, __LINE__) -+ new_adjust (#M, &adj_##A, #A, #X, MODE_##C1, MODE_##C2, __FILE__, 0) - - #define ADJUST_NUNITS(M, X) _ADD_ADJUST (nunits, M, X, RANDOM, RANDOM) - #define ADJUST_BYTESIZE(M, X) _ADD_ADJUST (bytesize, M, X, RANDOM, RANDOM) diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0036-Enable-CET-in-cross-compiler-if-possible.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0036-Enable-CET-in-cross-compiler-if-possible.patch deleted file mode 100644 index af8064282..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0036-Enable-CET-in-cross-compiler-if-possible.patch +++ /dev/null @@ -1,150 +0,0 @@ -From 87a5ace658f2294b025e0420ef03ade1f45d926c Mon Sep 17 00:00:00 2001 -From: "H.J. Lu" -Date: Fri, 8 May 2020 06:11:05 -0700 -Subject: [PATCH] Enable CET in cross compiler if possible - -Don't perform CET run-time check for host when cross compiling. Instead, -enable CET in cross compiler if possible so that it will run on both CET -and non-CET hosts. - -config/ - - PR bootstrap/94998 - * cet.m4 (GCC_CET_HOST_FLAGS): Enable CET in cross compiler if - possible. - -libiberty/ - - PR bootstrap/94998 - * configure: Regenerated. - -lto-plugin/ - - PR bootstrap/94998 - * configure: Regenerated. - -Upstream-Status: Submitted [https://patchwork.ozlabs.org/project/gcc/patch/20200508131105.170077-1-hjl.tools@gmail.com/] -Signed-off-by: Khem Raj ---- - config/cet.m4 | 18 ++++++++++++------ - libiberty/configure | 12 +++++++++--- - lto-plugin/configure | 16 +++++++++++----- - 3 files changed, 32 insertions(+), 14 deletions(-) - -diff --git a/config/cet.m4 b/config/cet.m4 -index ea616b728a9..d9608699cd5 100644 ---- a/config/cet.m4 -+++ b/config/cet.m4 -@@ -111,7 +111,8 @@ if test x$may_have_cet = xyes; then - fi - - if test x$may_have_cet = xyes; then -- AC_TRY_RUN([ -+ if test x$cross_compiling = xno; then -+ AC_TRY_RUN([ - static void - foo (void) - { -@@ -137,12 +138,17 @@ main () - bar (); - return 0; - } -- ], -- [have_cet=no], -- [have_cet=yes]) -- if test x$enable_cet = xno -a x$have_cet = xyes; then -- AC_MSG_ERROR([Intel CET must be enabled on Intel CET enabled host]) -+ ], -+ [have_cet=no], -+ [have_cet=yes]) -+ if test x$enable_cet = xno -a x$have_cet = xyes; then -+ AC_MSG_ERROR([Intel CET must be enabled on Intel CET enabled host]) -+ fi - fi -+else -+ # Enable CET in cross compiler if possible so that it will run on both -+ # CET and non-CET hosts. -+ have_cet=yes - fi - if test x$enable_cet = xyes; then - $1="-fcf-protection" -diff --git a/libiberty/configure b/libiberty/configure -index bb76cf1b823..3f82c5bb865 100755 ---- a/libiberty/configure -+++ b/libiberty/configure -@@ -5375,7 +5375,8 @@ rm -f core conftest.err conftest.$ac_objext \ - fi - - if test x$may_have_cet = xyes; then -- if test "$cross_compiling" = yes; then : -+ if test x$cross_compiling = xno; then -+ if test "$cross_compiling" = yes; then : - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 - $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - as_fn_error $? "cannot run test program while cross compiling -@@ -5420,9 +5421,14 @@ rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext - fi - -- if test x$enable_cet = xno -a x$have_cet = xyes; then -- as_fn_error $? "Intel CET must be enabled on Intel CET enabled host" "$LINENO" 5 -+ if test x$enable_cet = xno -a x$have_cet = xyes; then -+ as_fn_error $? "Intel CET must be enabled on Intel CET enabled host" "$LINENO" 5 -+ fi - fi -+else -+ # Enable CET in cross compiler if possible so that it will run on both -+ # CET and non-CET hosts. -+ have_cet=yes - fi - if test x$enable_cet = xyes; then - CET_HOST_FLAGS="-fcf-protection" -diff --git a/lto-plugin/configure b/lto-plugin/configure -index 1baf6cc70b8..36c6ecc5875 100755 ---- a/lto-plugin/configure -+++ b/lto-plugin/configure -@@ -5768,7 +5768,8 @@ rm -f core conftest.err conftest.$ac_objext \ - fi - - if test x$may_have_cet = xyes; then -- if test "$cross_compiling" = yes; then : -+ if test x$cross_compiling = xno; then -+ if test "$cross_compiling" = yes; then : - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 - $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - as_fn_error $? "cannot run test program while cross compiling -@@ -5813,9 +5814,14 @@ rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext - fi - -- if test x$enable_cet = xno -a x$have_cet = xyes; then -- as_fn_error $? "Intel CET must be enabled on Intel CET enabled host" "$LINENO" 5 -+ if test x$enable_cet = xno -a x$have_cet = xyes; then -+ as_fn_error $? "Intel CET must be enabled on Intel CET enabled host" "$LINENO" 5 -+ fi - fi -+else -+ # Enable CET in cross compiler if possible so that it will run on both -+ # CET and non-CET hosts. -+ have_cet=yes - fi - if test x$enable_cet = xyes; then - CET_HOST_FLAGS="-fcf-protection" -@@ -11941,7 +11947,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 11944 "configure" -+#line 11950 "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H -@@ -12047,7 +12053,7 @@ else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF --#line 12050 "configure" -+#line 12056 "configure" - #include "confdefs.h" - - #if HAVE_DLFCN_H diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0037-mingw32-Enable-operation_not_supported.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0037-mingw32-Enable-operation_not_supported.patch deleted file mode 100644 index de82a3a53..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0037-mingw32-Enable-operation_not_supported.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 6f87a095f0e696bec07a50df789c9db8bdbca43d Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Tue, 12 May 2020 10:39:09 -0700 -Subject: [PATCH] mingw32: Enable operation_not_supported - -Fixes nativesdk build errors on mingw32 gcc-runtime - -Upstream-Status: Pending -Signed-off-by: Khem Raj ---- - libstdc++-v3/config/os/mingw32/error_constants.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/libstdc++-v3/config/os/mingw32/error_constants.h b/libstdc++-v3/config/os/mingw32/error_constants.h -index 68ac72a78fb..71cd5815b81 100644 ---- a/libstdc++-v3/config/os/mingw32/error_constants.h -+++ b/libstdc++-v3/config/os/mingw32/error_constants.h -@@ -107,7 +107,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION - #ifdef _GLIBCXX_HAVE_EPERM - operation_not_permitted = EPERM, - #endif --// operation_not_supported = EOPNOTSUPP, -+ operation_not_supported = EOPNOTSUPP, - #ifdef _GLIBCXX_HAVE_EWOULDBLOCK - operation_would_block = EWOULDBLOCK, - #endif diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/0038-libatomic-Do-not-enforce-march-on-aarch64.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/0038-libatomic-Do-not-enforce-march-on-aarch64.patch deleted file mode 100644 index 3946acea1..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/0038-libatomic-Do-not-enforce-march-on-aarch64.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 38d262bfe7c0c894c364dc6e4dc7971e78a73974 Mon Sep 17 00:00:00 2001 -From: Khem Raj -Date: Wed, 13 May 2020 15:10:38 -0700 -Subject: [PATCH] libatomic: Do not enforce march on aarch64 - -OE passes the right options via gcc compiler cmdline via TUNE_CCARGS -this can conflict between -mcpu settings and -march setting here, since --mcpu will translate into an appropriate -march, lets depend on that -instead of setting it explicitly - -Upstream-Status: Inappropriate [OE-Specific] - -Signed-off-by: Khem Raj ---- - libatomic/Makefile.am | 1 - - libatomic/Makefile.in | 1 - - 2 files changed, 2 deletions(-) - -diff --git a/libatomic/Makefile.am b/libatomic/Makefile.am -index 133fbbca77e..ac1ca645876 100644 ---- a/libatomic/Makefile.am -+++ b/libatomic/Makefile.am -@@ -125,7 +125,6 @@ libatomic_la_LIBADD = $(foreach s,$(SIZES),$(addsuffix _$(s)_.lo,$(SIZEOBJS))) - ## On a target-specific basis, include alternates to be selected by IFUNC. - if HAVE_IFUNC - if ARCH_AARCH64_LINUX --IFUNC_OPTIONS = -march=armv8-a+lse - libatomic_la_LIBADD += $(foreach s,$(SIZES),$(addsuffix _$(s)_1_.lo,$(SIZEOBJS))) - endif - if ARCH_ARM_LINUX -diff --git a/libatomic/Makefile.in b/libatomic/Makefile.in -index a51807e95c9..97df2d7ff03 100644 ---- a/libatomic/Makefile.in -+++ b/libatomic/Makefile.in -@@ -431,7 +431,6 @@ M_SRC = $(firstword $(filter %/$(M_FILE), $(all_c_files))) - libatomic_la_LIBADD = $(foreach s,$(SIZES),$(addsuffix \ - _$(s)_.lo,$(SIZEOBJS))) $(am__append_1) $(am__append_2) \ - $(am__append_3) $(am__append_4) --@ARCH_AARCH64_LINUX_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=armv8-a+lse - @ARCH_ARM_LINUX_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=armv7-a+fp -DHAVE_KERNEL64 - @ARCH_I386_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=i586 - @ARCH_X86_64_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -mcx16 diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.1/pr96130.patch b/poky/meta/recipes-devtools/gcc/gcc-10.1/pr96130.patch deleted file mode 100644 index f0e6f85e2..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-10.1/pr96130.patch +++ /dev/null @@ -1,106 +0,0 @@ -From 0d03c0ee5213703ec6d9ffa632fa5298d83adaaa Mon Sep 17 00:00:00 2001 -From: Jakub Jelinek -Date: Mon, 13 Jul 2020 18:25:53 +0200 -Subject: [PATCH] ipa-fnsummary: Fix ICE with switch predicates [PR96130] - -The following testcase ICEs since r10-3199. -There is a switch with default label, where the controlling expression has -range just 0..7 and there are case labels for all those 8 values, but -nothing has yet optimized away the default. -Since r10-3199, set_switch_stmt_execution_predicate sets the switch to -default label's edge's predicate to a false predicate and then -compute_bb_predicates propagates the predicates through the cfg, but false -predicates aren't really added. The caller of compute_bb_predicates -in one place handles NULL bb->aux as false predicate: - if (fbi.info) - { - if (bb->aux) - bb_predicate = *(predicate *) bb->aux; - else - bb_predicate = false; - } - else - bb_predicate = true; -but then in two further spots that the patch below is changing -it assumes bb->aux must be non-NULL. Those two spots are guarded by a -condition that is only true if fbi.info is non-NULL, so I think the right -fix is to treat NULL aux as false predicate in those spots too. - -2020-07-13 Jakub Jelinek - - PR ipa/96130 - * ipa-fnsummary.c (analyze_function_body): Treat NULL bb->aux - as false predicate. - - * gcc.dg/torture/pr96130.c: New test. - -(cherry picked from commit 776e48e0931db69f158f40e5cb8e15463d879a42) ---- - gcc/ipa-fnsummary.c | 10 ++++++++-- - gcc/testsuite/gcc.dg/torture/pr96130.c | 26 ++++++++++++++++++++++++++ - 2 files changed, 34 insertions(+), 2 deletions(-) - create mode 100644 gcc/testsuite/gcc.dg/torture/pr96130.c - -diff --git a/gcc/ipa-fnsummary.c b/gcc/ipa-fnsummary.c -index 045a0ecf766..55a0b272a96 100644 ---- a/gcc/ipa-fnsummary.c -+++ b/gcc/ipa-fnsummary.c -@@ -2766,7 +2766,10 @@ analyze_function_body (struct cgraph_node *node, bool early) - edge ex; - unsigned int j; - class tree_niter_desc niter_desc; -- bb_predicate = *(predicate *) loop->header->aux; -+ if (loop->header->aux) -+ bb_predicate = *(predicate *) loop->header->aux; -+ else -+ bb_predicate = false; - - exits = get_loop_exit_edges (loop); - FOR_EACH_VEC_ELT (exits, j, ex) -@@ -2799,7 +2802,10 @@ analyze_function_body (struct cgraph_node *node, bool early) - for (unsigned i = 0; i < loop->num_nodes; i++) - { - gimple_stmt_iterator gsi; -- bb_predicate = *(predicate *) body[i]->aux; -+ if (body[i]->aux) -+ bb_predicate = *(predicate *) body[i]->aux; -+ else -+ bb_predicate = false; - for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); - gsi_next (&gsi)) - { -diff --git a/gcc/testsuite/gcc.dg/torture/pr96130.c b/gcc/testsuite/gcc.dg/torture/pr96130.c -new file mode 100644 -index 00000000000..f722b9ad2a9 ---- /dev/null -+++ b/gcc/testsuite/gcc.dg/torture/pr96130.c -@@ -0,0 +1,26 @@ -+/* PR ipa/96130 */ -+/* { dg-do compile } */ -+ -+struct S { unsigned j : 3; }; -+int k, l, m; -+ -+void -+foo (struct S x) -+{ -+ while (l != 5) -+ switch (x.j) -+ { -+ case 1: -+ case 3: -+ case 4: -+ case 6: -+ case 2: -+ case 5: -+ l = m; -+ case 7: -+ case 0: -+ k = 0; -+ default: -+ break; -+ } -+} --- -2.18.4 - diff --git a/poky/meta/recipes-devtools/gcc/gcc-10.2.inc b/poky/meta/recipes-devtools/gcc/gcc-10.2.inc new file mode 100644 index 000000000..7625af511 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-10.2.inc @@ -0,0 +1,120 @@ +require gcc-common.inc + +# Third digit in PV should be incremented after a minor release + +PV = "10.2.0" + +# BINV should be incremented to a revision after a minor gcc release + +BINV = "10.2.0" + +FILESEXTRAPATHS =. "${FILE_DIRNAME}/gcc:${FILE_DIRNAME}/gcc/backport:" + +DEPENDS =+ "mpfr gmp libmpc zlib flex-native" +NATIVEDEPS = "mpfr-native gmp-native libmpc-native zlib-native flex-native" + +LICENSE = "GPL-3.0-with-GCC-exception & GPLv3" + +LIC_FILES_CHKSUM = "\ + file://COPYING;md5=59530bdf33659b29e73d4adb9f9f6552 \ + file://COPYING3;md5=d32239bcb673463ab874e80d47fae504 \ + file://COPYING3.LIB;md5=6a6a8e020838b23406c81b19c1d46df6 \ + file://COPYING.LIB;md5=2d5025d4aa3495befef8f17206a5b0a1 \ + file://COPYING.RUNTIME;md5=fe60d87048567d4fe8c8a0ed2448bcc8 \ +" + +BASEURI ?= "${GNU_MIRROR}/gcc/gcc-${PV}/gcc-${PV}.tar.xz" +#RELEASE ?= "93a49d2d2292893b9b7f38132df949c70942838c" +#BASEURI ?= "https://github.com/gcc-mirror/gcc/archive/${RELEASE}.zip;downloadfilename=gcc-${PV}-${RELEASE}.zip" +SRC_URI = "\ + ${BASEURI} \ + file://0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch \ + file://0002-gcc-poison-system-directories.patch \ + file://0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch \ + file://0004-64-bit-multilib-hack.patch \ + file://0005-optional-libstdc.patch \ + file://0006-COLLECT_GCC_OPTIONS.patch \ + file://0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch \ + file://0008-fortran-cross-compile-hack.patch \ + file://0009-cpp-honor-sysroot.patch \ + file://0010-MIPS64-Default-to-N64-ABI.patch \ + file://0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch \ + file://0012-gcc-Fix-argument-list-too-long-error.patch \ + file://0013-Disable-sdt.patch \ + file://0014-libtool.patch \ + file://0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch \ + file://0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch \ + file://0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch \ + file://0018-export-CPP.patch \ + file://0019-Ensure-target-gcc-headers-can-be-included.patch \ + file://0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch \ + file://0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch \ + file://0022-aarch64-Add-support-for-musl-ldso.patch \ + file://0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch \ + file://0024-handle-sysroot-support-for-nativesdk-gcc.patch \ + file://0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch \ + file://0026-Fix-various-_FOR_BUILD-and-related-variables.patch \ + file://0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch \ + file://0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch \ + file://0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch \ + file://0030-sync-gcc-stddef.h-with-musl.patch \ + file://0031-fix-segmentation-fault-in-precompiled-header-generat.patch \ + file://0032-Fix-for-testsuite-failure.patch \ + file://0033-Re-introduce-spe-commandline-options.patch \ + file://0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch \ + file://0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch \ + file://0036-mingw32-Enable-operation_not_supported.patch \ + file://0037-libatomic-Do-not-enforce-march-on-aarch64.patch \ + file://0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch \ + file://0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch \ + file://0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch \ + file://0001-aarch64-Fix-up-__aarch64_cas16_acq_rel-fallback.patch \ +" +SRC_URI[sha256sum] = "b8dd4368bb9c7f0b98188317ee0254dd8cc99d1e3a18d0ff146c855fe16c1d8c" + +S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${PV}" +# For dev release snapshotting +#S = "${TMPDIR}/work-shared/gcc-${PV}-${PR}/gcc-${RELEASE}" +#B = "${WORKDIR}/gcc-${PV}/build.${HOST_SYS}.${TARGET_SYS}" + +# Language Overrides +FORTRAN = "" +JAVA = "" + +LTO = "--enable-lto" +SSP ?= "--disable-libssp" +SSP_mingw32 = "--enable-libssp" + +EXTRA_OECONF_BASE = "\ + ${LTO} \ + ${SSP} \ + --enable-libitm \ + --disable-bootstrap \ + --with-system-zlib \ + ${@'--with-linker-hash-style=${LINKER_HASH_STYLE}' if '${LINKER_HASH_STYLE}' else ''} \ + --enable-linker-build-id \ + --with-ppl=no \ + --with-cloog=no \ + --enable-checking=release \ + --enable-cheaders=c_global \ + --without-isl \ +" + +EXTRA_OECONF_INITIAL = "\ + --disable-libgomp \ + --disable-libitm \ + --disable-libquadmath \ + --with-system-zlib \ + --disable-lto \ + --disable-plugin \ + --enable-linker-build-id \ + --enable-decimal-float=no \ + --without-isl \ + --disable-libssp \ +" + +EXTRA_OECONF_PATHS = "\ + --with-gxx-include-dir=/not/exist{target_includedir}/c++/${BINV} \ + --with-sysroot=/not/exist \ + --with-build-sysroot=${STAGING_DIR_TARGET} \ +" diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc index 553ef7fe6..db17ae468 100644 --- a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc +++ b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc @@ -106,7 +106,7 @@ do_install () { rm -f ${D}${bindir}/*c++ # We don't care about the gcc- copies - rm -f ${D}${bindir}/*gcc-?.?* + rm -f ${D}${bindir}/*gcc-${BINV}* # Cleanup empty directories which are not shipped # we use rmdir instead of 'rm -f' to ensure the non empty directories are not deleted diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.1.bb deleted file mode 100644 index bf53c5cd7..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.1.bb +++ /dev/null @@ -1,5 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require gcc-cross-canadian.inc - - - diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.2.bb new file mode 100644 index 000000000..bf53c5cd7 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian_10.2.bb @@ -0,0 +1,5 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require gcc-cross-canadian.inc + + + diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-cross_10.1.bb deleted file mode 100644 index b43cca0c5..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-cross_10.1.bb +++ /dev/null @@ -1,3 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require gcc-cross.inc - diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-cross_10.2.bb new file mode 100644 index 000000000..b43cca0c5 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-cross_10.2.bb @@ -0,0 +1,3 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require gcc-cross.inc + diff --git a/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.1.bb deleted file mode 100644 index 40a6c4fef..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.1.bb +++ /dev/null @@ -1,2 +0,0 @@ -require recipes-devtools/gcc/gcc-cross_${PV}.bb -require gcc-crosssdk.inc diff --git a/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.2.bb new file mode 100644 index 000000000..40a6c4fef --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-crosssdk_10.2.bb @@ -0,0 +1,2 @@ +require recipes-devtools/gcc/gcc-cross_${PV}.bb +require gcc-crosssdk.inc diff --git a/poky/meta/recipes-devtools/gcc/gcc-runtime.inc b/poky/meta/recipes-devtools/gcc/gcc-runtime.inc index df1432e68..9dcce2653 100644 --- a/poky/meta/recipes-devtools/gcc/gcc-runtime.inc +++ b/poky/meta/recipes-devtools/gcc/gcc-runtime.inc @@ -138,6 +138,10 @@ do_install_append_class-target () { ln -s ../${TARGET_SYS}/ext ${D}${includedir}/c++/${BINV}/${TARGET_ARCH}${TARGET_VENDOR_MULTILIB_ORIGINAL}-${TARGET_OS}/ext fi + if [ "${TARGET_ARCH}" == "x86_64" -a "${MULTILIB_VARIANTS}" != "" ];then + ln -sf ../${X86ARCH32}${TARGET_VENDOR}-${TARGET_OS} ${D}${includedir}/c++/${BINV}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}/32 + fi + if [ "${TCLIBC}" != "glibc" ]; then case "${TARGET_OS}" in "linux-musl" | "linux-*spe") extra_target_os="linux";; diff --git a/poky/meta/recipes-devtools/gcc/gcc-runtime_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-runtime_10.1.bb deleted file mode 100644 index dd430b57e..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-runtime_10.1.bb +++ /dev/null @@ -1,2 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require gcc-runtime.inc diff --git a/poky/meta/recipes-devtools/gcc/gcc-runtime_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-runtime_10.2.bb new file mode 100644 index 000000000..dd430b57e --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-runtime_10.2.bb @@ -0,0 +1,2 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require gcc-runtime.inc diff --git a/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.1.bb deleted file mode 100644 index f3c705811..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.1.bb +++ /dev/null @@ -1,7 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require gcc-sanitizers.inc - -# Building with thumb enabled on armv4t armv5t fails with -# sanitizer_linux.s:5749: Error: lo register required -- `ldr ip,[sp],#8' -ARM_INSTRUCTION_SET_armv4 = "arm" -ARM_INSTRUCTION_SET_armv5 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.2.bb new file mode 100644 index 000000000..f3c705811 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-sanitizers_10.2.bb @@ -0,0 +1,7 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require gcc-sanitizers.inc + +# Building with thumb enabled on armv4t armv5t fails with +# sanitizer_linux.s:5749: Error: lo register required -- `ldr ip,[sp],#8' +ARM_INSTRUCTION_SET_armv4 = "arm" +ARM_INSTRUCTION_SET_armv5 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/gcc-source_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc-source_10.1.bb deleted file mode 100644 index b890fa33e..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc-source_10.1.bb +++ /dev/null @@ -1,4 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require recipes-devtools/gcc/gcc-source.inc - -EXCLUDE_FROM_WORLD = "1" diff --git a/poky/meta/recipes-devtools/gcc/gcc-source_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc-source_10.2.bb new file mode 100644 index 000000000..b890fa33e --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc-source_10.2.bb @@ -0,0 +1,4 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require recipes-devtools/gcc/gcc-source.inc + +EXCLUDE_FROM_WORLD = "1" diff --git a/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-Fix-up-__aarch64_cas16_acq_rel-fallback.patch b/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-Fix-up-__aarch64_cas16_acq_rel-fallback.patch new file mode 100644 index 000000000..c060accd9 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-Fix-up-__aarch64_cas16_acq_rel-fallback.patch @@ -0,0 +1,66 @@ +Upstream-Status: Backport +Signed-off-by: Ross Burton + +From fd2ec4542fd2975e6d3f2f1c1a2639945a84f9e1 Mon Sep 17 00:00:00 2001 +From: Jakub Jelinek +Date: Mon, 3 Aug 2020 22:55:28 +0200 +Subject: [PATCH] aarch64: Fix up __aarch64_cas16_acq_rel fallback + +As mentioned in the PR, the fallback path when LSE is unavailable writes +incorrect registers to the memory if the previous content compares equal +to x0, x1 - it writes copy of x0, x1 from the start of function, but it +should write x2, x3. + +2020-08-03 Jakub Jelinek + + PR target/96402 + * config/aarch64/lse.S (__aarch64_cas16_acq_rel): Use x2, x3 instead + of x(tmp0), x(tmp1) in STXP arguments. + + * gcc.target/aarch64/pr96402.c: New test. + +(cherry picked from commit 90b43856fdff7d96d93d22970eca8a86c56e0ddc) +--- + gcc/testsuite/gcc.target/aarch64/pr96402.c | 16 ++++++++++++++++ + libgcc/config/aarch64/lse.S | 2 +- + 2 files changed, 17 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.target/aarch64/pr96402.c + +diff --git a/gcc/testsuite/gcc.target/aarch64/pr96402.c b/gcc/testsuite/gcc.target/aarch64/pr96402.c +new file mode 100644 +index 00000000000..fa2dddfac15 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/pr96402.c +@@ -0,0 +1,16 @@ ++/* PR target/96402 */ ++/* { dg-do run { target int128 } } */ ++/* { dg-options "-moutline-atomics" } */ ++ ++int ++main () ++{ ++ __int128 a = 0; ++ __sync_val_compare_and_swap (&a, (__int128) 0, (__int128) 1); ++ if (a != 1) ++ __builtin_abort (); ++ __sync_val_compare_and_swap (&a, (__int128) 1, (((__int128) 0xdeadbeeffeedbac1ULL) << 64) | 0xabadcafe00c0ffeeULL); ++ if (a != ((((__int128) 0xdeadbeeffeedbac1ULL) << 64) | 0xabadcafe00c0ffeeULL)) ++ __builtin_abort (); ++ return 0; ++} +diff --git a/libgcc/config/aarch64/lse.S b/libgcc/config/aarch64/lse.S +index 64691c601c1..c8fbfbce4fd 100644 +--- a/libgcc/config/aarch64/lse.S ++++ b/libgcc/config/aarch64/lse.S +@@ -203,7 +203,7 @@ STARTFN NAME(cas) + cmp x0, x(tmp0) + ccmp x1, x(tmp1), #0, eq + bne 1f +- STXP w(tmp2), x(tmp0), x(tmp1), [x4] ++ STXP w(tmp2), x2, x3, [x4] + cbnz w(tmp2), 0b + 1: ret + +-- +2.26.2 + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch b/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch new file mode 100644 index 000000000..73de4c759 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0001-aarch64-New-Straight-Line-Speculation-SLS-mitigation.patch @@ -0,0 +1,202 @@ +CVE: CVE-2020-13844 +Upstream-Status: Backport +Signed-off-by: Ross Burton + +From 1ff243934ac443b5f58cd02a5012ce58ecc31fb2 Mon Sep 17 00:00:00 2001 +From: Matthew Malcomson +Date: Thu, 9 Jul 2020 09:11:58 +0100 +Subject: [PATCH 1/3] aarch64: New Straight Line Speculation (SLS) mitigation + flags + +Here we introduce the flags that will be used for straight line speculation. + +The new flag introduced is `-mharden-sls=`. +This flag can take arguments of `none`, `all`, or a comma seperated list of one +or more of `retbr` or `blr`. +`none` indicates no special mitigation of the straight line speculation +vulnerability. +`all` requests all mitigations currently implemented. +`retbr` requests that the RET and BR instructions have a speculation barrier +inserted after them. +`blr` requests that BLR instructions are replaced by a BL to a function stub +using a BR with a speculation barrier after it. + +Setting this on a per-function basis using attributes or the like is not +enabled, but may be in the future. + +gcc/ChangeLog: + +2020-06-02 Matthew Malcomson + + * config/aarch64/aarch64-protos.h (aarch64_harden_sls_retbr_p): + New. + (aarch64_harden_sls_blr_p): New. + * config/aarch64/aarch64.c (enum aarch64_sls_hardening_type): + New. + (aarch64_harden_sls_retbr_p): New. + (aarch64_harden_sls_blr_p): New. + (aarch64_validate_sls_mitigation): New. + (aarch64_override_options): Parse options for SLS mitigation. + * config/aarch64/aarch64.opt (-mharden-sls): New option. + * doc/invoke.texi: Document new option. +--- + gcc/config/aarch64/aarch64-protos.h | 3 ++ + gcc/config/aarch64/aarch64.c | 76 +++++++++++++++++++++++++++++++++++++ + gcc/config/aarch64/aarch64.opt | 4 ++ + gcc/doc/invoke.texi | 12 ++++++ + 4 files changed, 95 insertions(+) + +diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h +index 723d9ba..eb5f4b4 100644 +--- a/gcc/config/aarch64/aarch64-protos.h ++++ b/gcc/config/aarch64/aarch64-protos.h +@@ -781,4 +781,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; + + tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); + ++extern bool aarch64_harden_sls_retbr_p (void); ++extern bool aarch64_harden_sls_blr_p (void); ++ + #endif /* GCC_AARCH64_PROTOS_H */ +diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c +index b86434a..437a9cf 100644 +--- a/gcc/config/aarch64/aarch64.c ++++ b/gcc/config/aarch64/aarch64.c +@@ -14494,6 +14494,79 @@ aarch64_validate_mcpu (const char *str, const struct processor **res, + return false; + } + ++/* Straight line speculation indicators. */ ++enum aarch64_sls_hardening_type ++{ ++ SLS_NONE = 0, ++ SLS_RETBR = 1, ++ SLS_BLR = 2, ++ SLS_ALL = 3, ++}; ++static enum aarch64_sls_hardening_type aarch64_sls_hardening; ++ ++/* Return whether we should mitigatate Straight Line Speculation for the RET ++ and BR instructions. */ ++bool ++aarch64_harden_sls_retbr_p (void) ++{ ++ return aarch64_sls_hardening & SLS_RETBR; ++} ++ ++/* Return whether we should mitigatate Straight Line Speculation for the BLR ++ instruction. */ ++bool ++aarch64_harden_sls_blr_p (void) ++{ ++ return aarch64_sls_hardening & SLS_BLR; ++} ++ ++/* As of yet we only allow setting these options globally, in the future we may ++ allow setting them per function. */ ++static void ++aarch64_validate_sls_mitigation (const char *const_str) ++{ ++ char *token_save = NULL; ++ char *str = NULL; ++ ++ if (strcmp (const_str, "none") == 0) ++ { ++ aarch64_sls_hardening = SLS_NONE; ++ return; ++ } ++ if (strcmp (const_str, "all") == 0) ++ { ++ aarch64_sls_hardening = SLS_ALL; ++ return; ++ } ++ ++ char *str_root = xstrdup (const_str); ++ str = strtok_r (str_root, ",", &token_save); ++ if (!str) ++ error ("invalid argument given to %<-mharden-sls=%>"); ++ ++ int temp = SLS_NONE; ++ while (str) ++ { ++ if (strcmp (str, "blr") == 0) ++ temp |= SLS_BLR; ++ else if (strcmp (str, "retbr") == 0) ++ temp |= SLS_RETBR; ++ else if (strcmp (str, "none") == 0 || strcmp (str, "all") == 0) ++ { ++ error ("%<%s%> must be by itself for %<-mharden-sls=%>", str); ++ break; ++ } ++ else ++ { ++ error ("invalid argument %<%s%> for %<-mharden-sls=%>", str); ++ break; ++ } ++ str = strtok_r (NULL, ",", &token_save); ++ } ++ aarch64_sls_hardening = (aarch64_sls_hardening_type) temp; ++ free (str_root); ++} ++ + /* Parses CONST_STR for branch protection features specified in + aarch64_branch_protect_types, and set any global variables required. Returns + the parsing result and assigns LAST_STR to the last processed token from +@@ -14738,6 +14811,9 @@ aarch64_override_options (void) + selected_arch = NULL; + selected_tune = NULL; + ++ if (aarch64_harden_sls_string) ++ aarch64_validate_sls_mitigation (aarch64_harden_sls_string); ++ + if (aarch64_branch_protection_string) + aarch64_validate_mbranch_protection (aarch64_branch_protection_string); + +diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt +index d99d14c..5170361 100644 +--- a/gcc/config/aarch64/aarch64.opt ++++ b/gcc/config/aarch64/aarch64.opt +@@ -71,6 +71,10 @@ mgeneral-regs-only + Target Report RejectNegative Mask(GENERAL_REGS_ONLY) Save + Generate code which uses only the general registers. + ++mharden-sls= ++Target RejectNegative Joined Var(aarch64_harden_sls_string) ++Generate code to mitigate against straight line speculation. ++ + mfix-cortex-a53-835769 + Target Report Var(aarch64_fix_a53_err835769) Init(2) Save + Workaround for ARM Cortex-A53 Erratum number 835769. +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index a2794a6..bd5b77a 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -696,6 +696,7 @@ Objective-C and Objective-C++ Dialects}. + -msign-return-address=@var{scope} @gol + -mbranch-protection=@var{none}|@var{standard}|@var{pac-ret}[+@var{leaf} + +@var{b-key}]|@var{bti} @gol ++-mharden-sls=@var{opts} @gol + -march=@var{name} -mcpu=@var{name} -mtune=@var{name} @gol + -moverride=@var{string} -mverbose-cost-dump @gol + -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{sysreg} @gol +@@ -17065,6 +17066,17 @@ functions. The optional argument @samp{b-key} can be used to sign the functions + with the B-key instead of the A-key. + @samp{bti} turns on branch target identification mechanism. + ++@item -mharden-sls=@var{opts} ++@opindex mharden-sls ++Enable compiler hardening against straight line speculation (SLS). ++@var{opts} is a comma-separated list of the following options: ++@table @samp ++@item retbr ++@item blr ++@end table ++In addition, @samp{-mharden-sls=all} enables all SLS hardening while ++@samp{-mharden-sls=none} disables all SLS hardening. ++ + @item -msve-vector-bits=@var{bits} + @opindex msve-vector-bits + Specify the number of bits in an SVE vector register. This option only has +-- +2.7.4 + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch b/poky/meta/recipes-devtools/gcc/gcc/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch new file mode 100644 index 000000000..82ae9f8d1 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0001-gcc-4.3.1-ARCH_FLAGS_FOR_TARGET.patch @@ -0,0 +1,39 @@ +From f2a5dc3bc7e5727d6bf77e1c6e8a31a6f000883d Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 08:37:11 +0400 +Subject: [PATCH] gcc-4.3.1: ARCH_FLAGS_FOR_TARGET + +Signed-off-by: Khem Raj + +Upstream-Status: Inappropriate [embedded specific] +--- + configure | 2 +- + configure.ac | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/configure b/configure +index 4cc938ebb7d..226a64939d1 100755 +--- a/configure ++++ b/configure +@@ -7722,7 +7722,7 @@ fi + # for target_alias and gcc doesn't manage it consistently. + target_configargs="--cache-file=./config.cache ${target_configargs}" + +-FLAGS_FOR_TARGET= ++FLAGS_FOR_TARGET="$ARCH_FLAGS_FOR_TARGET" + case " $target_configdirs " in + *" newlib "*) + case " $target_configargs " in +diff --git a/configure.ac b/configure.ac +index c78d9cbea62..f024f4bac9b 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -3227,7 +3227,7 @@ fi + # for target_alias and gcc doesn't manage it consistently. + target_configargs="--cache-file=./config.cache ${target_configargs}" + +-FLAGS_FOR_TARGET= ++FLAGS_FOR_TARGET="$ARCH_FLAGS_FOR_TARGET" + case " $target_configdirs " in + *" newlib "*) + case " $target_configargs " in diff --git a/poky/meta/recipes-devtools/gcc/gcc/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch b/poky/meta/recipes-devtools/gcc/gcc/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch new file mode 100644 index 000000000..823cc8b66 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0002-aarch64-Introduce-SLS-mitigation-for-RET-and-BR-inst.patch @@ -0,0 +1,607 @@ +Upstream-Status: Backport +Signed-off-by: Ross Burton + +From b1204d16e1ec96a4aa89e44de8990e2499ffdb22 Mon Sep 17 00:00:00 2001 +From: Matthew Malcomson +Date: Thu, 9 Jul 2020 09:11:59 +0100 +Subject: [PATCH 2/3] aarch64: Introduce SLS mitigation for RET and BR + instructions + +Instructions following RET or BR are not necessarily executed. In order +to avoid speculation past RET and BR we can simply append a speculation +barrier. + +Since these speculation barriers will not be architecturally executed, +they are not expected to add a high performance penalty. + +The speculation barrier is to be SB when targeting architectures which +have this enabled, and DSB SY + ISB otherwise. + +We add tests for each of the cases where such an instruction was seen. + +This is implemented by modifying each machine description pattern that +emits either a RET or a BR instruction. We choose not to use something +like `TARGET_ASM_FUNCTION_EPILOGUE` since it does not affect the +`indirect_jump`, `jump`, `sibcall_insn` and `sibcall_value_insn` +patterns and we find it preferable to implement the functionality in the +same way for every pattern. + +There is one particular case which is slightly tricky. The +implementation of TARGET_ASM_TRAMPOLINE_TEMPLATE uses a BR which needs +to be mitigated against. The trampoline template is used *once* per +compilation unit, and the TRAMPOLINE_SIZE is exposed to the user via the +builtin macro __LIBGCC_TRAMPOLINE_SIZE__. +In the future we may implement function specific attributes to turn on +and off hardening on a per-function basis. +The fixed nature of the trampoline described above implies it will be +safer to ensure this speculation barrier is always used. + +Testing: + Bootstrap and regtest done on aarch64-none-linux + Used a temporary hack(1) to use these options on every test in the + testsuite and a script to check that the output never emitted an + unmitigated RET or BR. + +1) Temporary hack was a change to the testsuite to always use +`-save-temps` and run a script on the assembly output of those +compilations which produced one to ensure every RET or BR is immediately +followed by a speculation barrier. + +gcc/ChangeLog: + + * config/aarch64/aarch64-protos.h (aarch64_sls_barrier): New. + * config/aarch64/aarch64.c (aarch64_output_casesi): Emit + speculation barrier after BR instruction if needs be. + (aarch64_trampoline_init): Handle ptr_mode value & adjust size + of code copied. + (aarch64_sls_barrier): New. + (aarch64_asm_trampoline_template): Add needed barriers. + * config/aarch64/aarch64.h (AARCH64_ISA_SB): New. + (TARGET_SB): New. + (TRAMPOLINE_SIZE): Account for barrier. + * config/aarch64/aarch64.md (indirect_jump, *casesi_dispatch, + simple_return, *do_return, *sibcall_insn, *sibcall_value_insn): + Emit barrier if needs be, also account for possible barrier using + "sls_length" attribute. + (sls_length): New attribute. + (length): Determine default using any non-default sls_length + value. + +gcc/testsuite/ChangeLog: + + * gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c: New test. + * gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c: + New test. + * gcc.target/aarch64/sls-mitigation/sls-mitigation.exp: New file. + * lib/target-supports.exp (check_effective_target_aarch64_asm_sb_ok): + New proc. +--- + gcc/config/aarch64/aarch64-protos.h | 1 + + gcc/config/aarch64/aarch64.c | 41 ++++++- + gcc/config/aarch64/aarch64.h | 10 +- + gcc/config/aarch64/aarch64.md | 76 +++++++++---- + .../aarch64/sls-mitigation/sls-miti-retbr-pacret.c | 21 ++++ + .../aarch64/sls-mitigation/sls-miti-retbr.c | 119 +++++++++++++++++++++ + .../aarch64/sls-mitigation/sls-mitigation.exp | 73 +++++++++++++ + gcc/testsuite/lib/target-supports.exp | 2 +- + 8 files changed, 318 insertions(+), 25 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c + create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c + create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp + +diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h +index eb5f4b4..ee0ffde 100644 +--- a/gcc/config/aarch64/aarch64-protos.h ++++ b/gcc/config/aarch64/aarch64-protos.h +@@ -781,6 +781,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; + + tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); + ++const char *aarch64_sls_barrier (int); + extern bool aarch64_harden_sls_retbr_p (void); + extern bool aarch64_harden_sls_blr_p (void); + +diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c +index 437a9cf..44e3d1f 100644 +--- a/gcc/config/aarch64/aarch64.c ++++ b/gcc/config/aarch64/aarch64.c +@@ -10852,8 +10852,8 @@ aarch64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) + static void + aarch64_asm_trampoline_template (FILE *f) + { +- int offset1 = 16; +- int offset2 = 20; ++ int offset1 = 24; ++ int offset2 = 28; + + if (aarch64_bti_enabled ()) + { +@@ -10876,6 +10876,17 @@ aarch64_asm_trampoline_template (FILE *f) + } + asm_fprintf (f, "\tbr\t%s\n", reg_names [IP1_REGNUM]); + ++ /* We always emit a speculation barrier. ++ This is because the same trampoline template is used for every nested ++ function. Since nested functions are not particularly common or ++ performant we don't worry too much about the extra instructions to copy ++ around. ++ This is not yet a problem, since we have not yet implemented function ++ specific attributes to choose between hardening against straight line ++ speculation or not, but such function specific attributes are likely to ++ happen in the future. */ ++ asm_fprintf (f, "\tdsb\tsy\n\tisb\n"); ++ + /* The trampoline needs an extra padding instruction. In case if BTI is + enabled the padding instruction is replaced by the BTI instruction at + the beginning. */ +@@ -10890,10 +10901,14 @@ static void + aarch64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) + { + rtx fnaddr, mem, a_tramp; +- const int tramp_code_sz = 16; ++ const int tramp_code_sz = 24; + + /* Don't need to copy the trailing D-words, we fill those in below. */ +- emit_block_move (m_tramp, assemble_trampoline_template (), ++ /* We create our own memory address in Pmode so that `emit_block_move` can ++ use parts of the backend which expect Pmode addresses. */ ++ rtx temp = convert_memory_address (Pmode, XEXP (m_tramp, 0)); ++ emit_block_move (gen_rtx_MEM (BLKmode, temp), ++ assemble_trampoline_template (), + GEN_INT (tramp_code_sz), BLOCK_OP_NORMAL); + mem = adjust_address (m_tramp, ptr_mode, tramp_code_sz); + fnaddr = XEXP (DECL_RTL (fndecl), 0); +@@ -11084,6 +11099,8 @@ aarch64_output_casesi (rtx *operands) + output_asm_insn (buf, operands); + output_asm_insn (patterns[index][1], operands); + output_asm_insn ("br\t%3", operands); ++ output_asm_insn (aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()), ++ operands); + assemble_label (asm_out_file, label); + return ""; + } +@@ -22924,6 +22941,22 @@ aarch64_file_end_indicate_exec_stack () + #undef GNU_PROPERTY_AARCH64_FEATURE_1_BTI + #undef GNU_PROPERTY_AARCH64_FEATURE_1_AND + ++/* Helper function for straight line speculation. ++ Return what barrier should be emitted for straight line speculation ++ mitigation. ++ When not mitigating against straight line speculation this function returns ++ an empty string. ++ When mitigating against straight line speculation, use: ++ * SB when the v8.5-A SB extension is enabled. ++ * DSB+ISB otherwise. */ ++const char * ++aarch64_sls_barrier (int mitigation_required) ++{ ++ return mitigation_required ++ ? (TARGET_SB ? "sb" : "dsb\tsy\n\tisb") ++ : ""; ++} ++ + /* Target-specific selftests. */ + + #if CHECKING_P +diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h +index 1ce23c6..c21015f 100644 +--- a/gcc/config/aarch64/aarch64.h ++++ b/gcc/config/aarch64/aarch64.h +@@ -281,6 +281,7 @@ extern unsigned aarch64_architecture_version; + #define AARCH64_ISA_F32MM (aarch64_isa_flags & AARCH64_FL_F32MM) + #define AARCH64_ISA_F64MM (aarch64_isa_flags & AARCH64_FL_F64MM) + #define AARCH64_ISA_BF16 (aarch64_isa_flags & AARCH64_FL_BF16) ++#define AARCH64_ISA_SB (aarch64_isa_flags & AARCH64_FL_SB) + + /* Crypto is an optional extension to AdvSIMD. */ + #define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO) +@@ -378,6 +379,9 @@ extern unsigned aarch64_architecture_version; + #define TARGET_FIX_ERR_A53_835769_DEFAULT 1 + #endif + ++/* SB instruction is enabled through +sb. */ ++#define TARGET_SB (AARCH64_ISA_SB) ++ + /* Apply the workaround for Cortex-A53 erratum 835769. */ + #define TARGET_FIX_ERR_A53_835769 \ + ((aarch64_fix_a53_err835769 == 2) \ +@@ -1058,8 +1062,10 @@ typedef struct + + #define RETURN_ADDR_RTX aarch64_return_addr + +-/* BTI c + 3 insns + 2 pointer-sized entries. */ +-#define TRAMPOLINE_SIZE (TARGET_ILP32 ? 24 : 32) ++/* BTI c + 3 insns ++ + sls barrier of DSB + ISB. ++ + 2 pointer-sized entries. */ ++#define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16)) + + /* Trampolines contain dwords, so must be dword aligned. */ + #define TRAMPOLINE_ALIGNMENT 64 +diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md +index 8c8be3c..dda04ee 100644 +--- a/gcc/config/aarch64/aarch64.md ++++ b/gcc/config/aarch64/aarch64.md +@@ -407,10 +407,25 @@ + ;; Attribute that specifies whether the alternative uses MOVPRFX. + (define_attr "movprfx" "no,yes" (const_string "no")) + ++;; Attribute to specify that an alternative has the length of a single ++;; instruction plus a speculation barrier. ++(define_attr "sls_length" "none,retbr,casesi" (const_string "none")) ++ + (define_attr "length" "" + (cond [(eq_attr "movprfx" "yes") + (const_int 8) +- ] (const_int 4))) ++ ++ (eq_attr "sls_length" "retbr") ++ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 4) ++ (match_test "TARGET_SB") (const_int 8)] ++ (const_int 12)) ++ ++ (eq_attr "sls_length" "casesi") ++ (cond [(match_test "!aarch64_harden_sls_retbr_p ()") (const_int 16) ++ (match_test "TARGET_SB") (const_int 20)] ++ (const_int 24)) ++ ] ++ (const_int 4))) + + ;; Strictly for compatibility with AArch32 in pipeline models, since AArch64 has + ;; no predicated insns. +@@ -447,8 +462,12 @@ + (define_insn "indirect_jump" + [(set (pc) (match_operand:DI 0 "register_operand" "r"))] + "" +- "br\\t%0" +- [(set_attr "type" "branch")] ++ { ++ output_asm_insn ("br\\t%0", operands); ++ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); ++ } ++ [(set_attr "type" "branch") ++ (set_attr "sls_length" "retbr")] + ) + + (define_insn "jump" +@@ -765,7 +784,7 @@ + "* + return aarch64_output_casesi (operands); + " +- [(set_attr "length" "16") ++ [(set_attr "sls_length" "casesi") + (set_attr "type" "branch")] + ) + +@@ -844,18 +863,23 @@ + [(return)] + "" + { ++ const char *ret = NULL; + if (aarch64_return_address_signing_enabled () + && TARGET_ARMV8_3 + && !crtl->calls_eh_return) + { + if (aarch64_ra_sign_key == AARCH64_KEY_B) +- return "retab"; ++ ret = "retab"; + else +- return "retaa"; ++ ret = "retaa"; + } +- return "ret"; ++ else ++ ret = "ret"; ++ output_asm_insn (ret, operands); ++ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); + } +- [(set_attr "type" "branch")] ++ [(set_attr "type" "branch") ++ (set_attr "sls_length" "retbr")] + ) + + (define_expand "return" +@@ -867,8 +891,12 @@ + (define_insn "simple_return" + [(simple_return)] + "" +- "ret" +- [(set_attr "type" "branch")] ++ { ++ output_asm_insn ("ret", operands); ++ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); ++ } ++ [(set_attr "type" "branch") ++ (set_attr "sls_length" "retbr")] + ) + + (define_insn "*cb1" +@@ -1066,10 +1094,16 @@ + (unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI) + (return)] + "SIBLING_CALL_P (insn)" +- "@ +- br\\t%0 +- b\\t%c0" +- [(set_attr "type" "branch, branch")] ++ { ++ if (which_alternative == 0) ++ { ++ output_asm_insn ("br\\t%0", operands); ++ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); ++ } ++ return "b\\t%c0"; ++ } ++ [(set_attr "type" "branch, branch") ++ (set_attr "sls_length" "retbr,none")] + ) + + (define_insn "*sibcall_value_insn" +@@ -1080,10 +1114,16 @@ + (unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI) + (return)] + "SIBLING_CALL_P (insn)" +- "@ +- br\\t%1 +- b\\t%c1" +- [(set_attr "type" "branch, branch")] ++ { ++ if (which_alternative == 0) ++ { ++ output_asm_insn ("br\\t%1", operands); ++ return aarch64_sls_barrier (aarch64_harden_sls_retbr_p ()); ++ } ++ return "b\\t%c1"; ++ } ++ [(set_attr "type" "branch, branch") ++ (set_attr "sls_length" "retbr,none")] + ) + + ;; Call subroutine returning any type. +diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c +new file mode 100644 +index 0000000..fa1887a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr-pacret.c +@@ -0,0 +1,21 @@ ++/* Avoid ILP32 since pacret is only available for LP64 */ ++/* { dg-do compile { target { ! ilp32 } } } */ ++/* { dg-additional-options "-mharden-sls=retbr -mbranch-protection=pac-ret -march=armv8.3-a" } */ ++ ++/* Testing the do_return pattern for retaa and retab. */ ++long retbr_subcall(void); ++long retbr_do_return_retaa(void) ++{ ++ return retbr_subcall()+1; ++} ++ ++__attribute__((target("branch-protection=pac-ret+b-key"))) ++long retbr_do_return_retab(void) ++{ ++ return retbr_subcall()+1; ++} ++ ++/* Ensure there are no BR or RET instructions which are not directly followed ++ by a speculation barrier. */ ++/* { dg-final { scan-assembler-not {\t(br|ret|retaa|retab)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb)} } } */ ++/* { dg-final { scan-assembler-not {ret\t} } } */ +diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c +new file mode 100644 +index 0000000..76b8d03 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-retbr.c +@@ -0,0 +1,119 @@ ++/* We ensure that -Wpedantic is off since it complains about the trampolines ++ we explicitly want to test. */ ++/* { dg-additional-options "-mharden-sls=retbr -Wno-pedantic " } */ ++/* ++ Ensure that the SLS hardening of RET and BR leaves no unprotected RET/BR ++ instructions. ++ */ ++typedef int (foo) (int, int); ++typedef void (bar) (int, int); ++struct sls_testclass { ++ foo *x; ++ bar *y; ++ int left; ++ int right; ++}; ++ ++int ++retbr_sibcall_value_insn (struct sls_testclass x) ++{ ++ return x.x(x.left, x.right); ++} ++ ++void ++retbr_sibcall_insn (struct sls_testclass x) ++{ ++ x.y(x.left, x.right); ++} ++ ++/* Aim to test two different returns. ++ One that introduces a tail call in the middle of the function, and one that ++ has a normal return. */ ++int ++retbr_multiple_returns (struct sls_testclass x) ++{ ++ int temp; ++ if (x.left % 10) ++ return x.x(x.left, 100); ++ else if (x.right % 20) ++ { ++ return x.x(x.left * x.right, 100); ++ } ++ temp = x.left % x.right; ++ temp *= 100; ++ temp /= 2; ++ return temp % 3; ++} ++ ++void ++retbr_multiple_returns_void (struct sls_testclass x) ++{ ++ if (x.left % 10) ++ { ++ x.y(x.left, 100); ++ } ++ else if (x.right % 20) ++ { ++ x.y(x.left * x.right, 100); ++ } ++ return; ++} ++ ++/* Testing the casesi jump via register. */ ++__attribute__ ((optimize ("Os"))) ++int ++retbr_casesi_dispatch (struct sls_testclass x) ++{ ++ switch (x.left) ++ { ++ case -5: ++ return -2; ++ case -3: ++ return -1; ++ case 0: ++ return 0; ++ case 3: ++ return 1; ++ case 5: ++ break; ++ default: ++ __builtin_unreachable (); ++ } ++ return x.right; ++} ++ ++/* Testing the BR in trampolines is mitigated against. */ ++void f1 (void *); ++void f3 (void *, void (*)(void *)); ++void f2 (void *); ++ ++int ++retbr_trampolines (void *a, int b) ++{ ++ if (!b) ++ { ++ f1 (a); ++ return 1; ++ } ++ if (b) ++ { ++ void retbr_tramp_internal (void *c) ++ { ++ if (c == a) ++ f2 (c); ++ } ++ f3 (a, retbr_tramp_internal); ++ } ++ return 0; ++} ++ ++/* Testing the indirect_jump pattern. */ ++void ++retbr_indirect_jump (int *buf) ++{ ++ __builtin_longjmp(buf, 1); ++} ++ ++/* Ensure there are no BR or RET instructions which are not directly followed ++ by a speculation barrier. */ ++/* { dg-final { scan-assembler-not {\t(br|ret|retaa|retab)\tx[0-9][0-9]?\n\t(?!dsb\tsy\n\tisb|sb)} } } */ +diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp +new file mode 100644 +index 0000000..8122503 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-mitigation.exp +@@ -0,0 +1,73 @@ ++# Regression driver for SLS mitigation on AArch64. ++# Copyright (C) 2020 Free Software Foundation, Inc. ++# Contributed by ARM Ltd. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify it ++# under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, but ++# WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++# General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . */ ++ ++# Exit immediately if this isn't an AArch64 target. ++if {![istarget aarch64*-*-*] } then { ++ return ++} ++ ++# Load support procs. ++load_lib gcc-dg.exp ++load_lib torture-options.exp ++ ++# If a testcase doesn't have special options, use these. ++global DEFAULT_CFLAGS ++if ![info exists DEFAULT_CFLAGS] then { ++ set DEFAULT_CFLAGS " " ++} ++ ++# Initialize `dg'. ++dg-init ++torture-init ++ ++# Use different architectures as well as the normal optimisation options. ++# (i.e. use both SB and DSB+ISB barriers). ++ ++set save-dg-do-what-default ${dg-do-what-default} ++# Main loop. ++# Run with torture tests (i.e. a bunch of different optimisation levels) just ++# to increase test coverage. ++set dg-do-what-default assemble ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \ ++ "-save-temps" $DEFAULT_CFLAGS ++ ++# Run the same tests but this time with SB extension. ++# Since not all supported assemblers will support that extension we decide ++# whether to assemble or just compile based on whether the extension is ++# supported for the available assembler. ++ ++set templist {} ++foreach x $DG_TORTURE_OPTIONS { ++ lappend templist "$x -march=armv8.3-a+sb " ++ lappend templist "$x -march=armv8-a+sb " ++} ++set-torture-options $templist ++if { [check_effective_target_aarch64_asm_sb_ok] } { ++ set dg-do-what-default assemble ++} else { ++ set dg-do-what-default compile ++} ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.\[cCS\]]] \ ++ "-save-temps" $DEFAULT_CFLAGS ++set dg-do-what-default ${save-dg-do-what-default} ++ ++# All done. ++torture-finish ++dg-finish +diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp +index 8a186dd..9d2e093 100644 +--- a/gcc/testsuite/lib/target-supports.exp ++++ b/gcc/testsuite/lib/target-supports.exp +@@ -9432,7 +9432,7 @@ proc check_effective_target_aarch64_tiny { } { + # various architecture extensions via the .arch_extension pseudo-op. + + foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve" +- "i8mm" "f32mm" "f64mm" "bf16" } { ++ "i8mm" "f32mm" "f64mm" "bf16" "sb" } { + eval [string map [list FUNC $aarch64_ext] { + proc check_effective_target_aarch64_asm_FUNC_ok { } { + if { [istarget aarch64*-*-*] } { +-- +2.7.4 + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0002-gcc-poison-system-directories.patch b/poky/meta/recipes-devtools/gcc/gcc/0002-gcc-poison-system-directories.patch new file mode 100644 index 000000000..30a848601 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0002-gcc-poison-system-directories.patch @@ -0,0 +1,200 @@ +From 74cc21f474402cf3578e37e1d7a1a22bbd070f6a Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 08:59:00 +0400 +Subject: [PATCH] gcc: poison-system-directories + +Add /sw/include and /opt/include based on the original +zecke-no-host-includes.patch patch. The original patch checked for +/usr/include, /sw/include and /opt/include and then triggered a failure and +aborted. + +Instead, we add the two missing items to the current scan. If the user +wants this to be a failure, they can add "-Werror=poison-system-directories". + +Signed-off-by: Mark Hatle +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + gcc/common.opt | 4 ++++ + gcc/config.in | 6 ++++++ + gcc/configure | 16 ++++++++++++++++ + gcc/configure.ac | 10 ++++++++++ + gcc/doc/invoke.texi | 9 +++++++++ + gcc/gcc.c | 2 ++ + gcc/incpath.c | 21 +++++++++++++++++++++ + 7 files changed, 68 insertions(+) + +diff --git a/gcc/common.opt b/gcc/common.opt +index 65a82410abc..415f38fa1f4 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -682,6 +682,10 @@ Wreturn-local-addr + Common Var(warn_return_local_addr) Init(1) Warning + Warn about returning a pointer/reference to a local or temporary variable. + ++Wpoison-system-directories ++Common Var(flag_poison_system_directories) Init(1) Warning ++Warn for -I and -L options using system directories if cross compiling ++ + Wshadow + Common Var(warn_shadow) Warning + Warn when one variable shadows another. Same as -Wshadow=global. +diff --git a/gcc/config.in b/gcc/config.in +index 809e7b26823..5adeaeed36b 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -224,6 +224,12 @@ + #endif + + ++/* Define to warn for use of native system header directories */ ++#ifndef USED_FOR_TARGET ++#undef ENABLE_POISON_SYSTEM_DIRECTORIES ++#endif ++ ++ + /* Define if you want all operations on RTL (the basic data structure of the + optimizer and back end) to be checked for dynamic type safety at runtime. + This is quite expensive. */ +diff --git a/gcc/configure b/gcc/configure +index cd3d9516fce..8de766a942c 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -1010,6 +1010,7 @@ with_system_zlib + enable_maintainer_mode + enable_link_mutex + enable_version_specific_runtime_libs ++enable_poison_system_directories + enable_plugin + enable_host_shared + enable_libquadmath_support +@@ -1766,6 +1767,8 @@ Optional Features: + --enable-version-specific-runtime-libs + specify that runtime libraries should be installed + in a compiler-specific directory ++ --enable-poison-system-directories ++ warn for use of native system header directories + --enable-plugin enable plugin support + --enable-host-shared build host code as shared libraries + --disable-libquadmath-support +@@ -30235,6 +30238,19 @@ if test "${enable_version_specific_runtime_libs+set}" = set; then : + fi + + ++# Check whether --enable-poison-system-directories was given. ++if test "${enable_poison_system_directories+set}" = set; then : ++ enableval=$enable_poison_system_directories; ++else ++ enable_poison_system_directories=no ++fi ++ ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ ++$as_echo "#define ENABLE_POISON_SYSTEM_DIRECTORIES 1" >>confdefs.h ++ ++fi ++ + # Substitute configuration variables + + +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 0de3b4bf97b..8bfd6feb780 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -6595,6 +6595,16 @@ AC_ARG_ENABLE(version-specific-runtime-libs, + [specify that runtime libraries should be + installed in a compiler-specific directory])]) + ++AC_ARG_ENABLE([poison-system-directories], ++ AS_HELP_STRING([--enable-poison-system-directories], ++ [warn for use of native system header directories]),, ++ [enable_poison_system_directories=no]) ++if test "x${enable_poison_system_directories}" = "xyes"; then ++ AC_DEFINE([ENABLE_POISON_SYSTEM_DIRECTORIES], ++ [1], ++ [Define to warn for use of native system header directories]) ++fi ++ + # Substitute configuration variables + AC_SUBST(subdirs) + AC_SUBST(srcdir) +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index f98161391a0..f12d8d12150 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -348,6 +348,7 @@ Objective-C and Objective-C++ Dialects}. + -Wpacked -Wno-packed-bitfield-compat -Wpacked-not-aligned -Wpadded @gol + -Wparentheses -Wno-pedantic-ms-format @gol + -Wpointer-arith -Wno-pointer-compare -Wno-pointer-to-int-cast @gol ++-Wno-poison-system-directories @gol + -Wno-pragmas -Wno-prio-ctor-dtor -Wredundant-decls @gol + -Wrestrict -Wno-return-local-addr -Wreturn-type @gol + -Wno-scalar-storage-order -Wsequence-point @gol +@@ -6924,6 +6925,14 @@ made up of data only and thus requires no special treatment. But, for + most targets, it is made up of code and thus requires the stack to be + made executable in order for the program to work properly. + ++@item -Wno-poison-system-directories ++@opindex Wno-poison-system-directories ++Do not warn for @option{-I} or @option{-L} options using system ++directories such as @file{/usr/include} when cross compiling. This ++option is intended for use in chroot environments when such ++directories contain the correct headers and libraries for the target ++system rather than the host. ++ + @item -Wfloat-equal + @opindex Wfloat-equal + @opindex Wno-float-equal +diff --git a/gcc/gcc.c b/gcc/gcc.c +index 9f790db0daf..b2200c5185a 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -1041,6 +1041,8 @@ proper position among the other output files. */ + "%{fuse-ld=*:-fuse-ld=%*} " LINK_COMPRESS_DEBUG_SPEC \ + "%X %{o*} %{e*} %{N} %{n} %{r}\ + %{s} %{t} %{u*} %{z} %{Z} %{!nostdlib:%{!r:%{!nostartfiles:%S}}} \ ++ %{Wno-poison-system-directories:--no-poison-system-directories} \ ++ %{Werror=poison-system-directories:--error-poison-system-directories} \ + %{static|no-pie|static-pie:} %@{L*} %(mfwrap) %(link_libgcc) " \ + VTABLE_VERIFICATION_SPEC " " SANITIZER_EARLY_SPEC " %o "" \ + %{fopenacc|fopenmp|%:gt(%{ftree-parallelize-loops=*:%*} 1):\ +diff --git a/gcc/incpath.c b/gcc/incpath.c +index 8a2bda00f80..9098ab044ab 100644 +--- a/gcc/incpath.c ++++ b/gcc/incpath.c +@@ -26,6 +26,7 @@ + #include "intl.h" + #include "incpath.h" + #include "cppdefault.h" ++#include "diagnostic-core.h" + + /* Microsoft Windows does not natively support inodes. + VMS has non-numeric inodes. */ +@@ -393,6 +394,26 @@ merge_include_chains (const char *sysroot, cpp_reader *pfile, int verbose) + } + fprintf (stderr, _("End of search list.\n")); + } ++ ++#ifdef ENABLE_POISON_SYSTEM_DIRECTORIES ++ if (flag_poison_system_directories) ++ { ++ struct cpp_dir *p; ++ ++ for (p = heads[INC_QUOTE]; p; p = p->next) ++ { ++ if ((!strncmp (p->name, "/usr/include", 12)) ++ || (!strncmp (p->name, "/usr/local/include", 18)) ++ || (!strncmp (p->name, "/usr/X11R6/include", 18)) ++ || (!strncmp (p->name, "/sw/include", 11)) ++ || (!strncmp (p->name, "/opt/include", 12))) ++ warning (OPT_Wpoison_system_directories, ++ "include location \"%s\" is unsafe for " ++ "cross-compilation", ++ p->name); ++ } ++ } ++#endif + } + + /* Use given -I paths for #include "..." but not #include <...>, and diff --git a/poky/meta/recipes-devtools/gcc/gcc/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch b/poky/meta/recipes-devtools/gcc/gcc/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch new file mode 100644 index 000000000..716a36717 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0003-aarch64-Mitigate-SLS-for-BLR-instruction.patch @@ -0,0 +1,658 @@ +Upstream-Status: Backport +Signed-off-by: Ross Burton + +From a5e7efc40ed841934c1d913f39476afa17d8e5f7 Mon Sep 17 00:00:00 2001 +From: Matthew Malcomson +Date: Thu, 9 Jul 2020 09:11:59 +0100 +Subject: [PATCH 3/3] aarch64: Mitigate SLS for BLR instruction + +This patch introduces the mitigation for Straight Line Speculation past +the BLR instruction. + +This mitigation replaces BLR instructions with a BL to a stub which uses +a BR to jump to the original value. These function stubs are then +appended with a speculation barrier to ensure no straight line +speculation happens after these jumps. + +When optimising for speed we use a set of stubs for each function since +this should help the branch predictor make more accurate predictions +about where a stub should branch. + +When optimising for size we use one set of stubs for all functions. +This set of stubs can have human readable names, and we are using +`__call_indirect_x` for register x. + +When BTI branch protection is enabled the BLR instruction can jump to a +`BTI c` instruction using any register, while the BR instruction can +only jump to a `BTI c` instruction using the x16 or x17 registers. +Hence, in order to ensure this transformation is safe we mov the value +of the original register into x16 and use x16 for the BR. + +As an example when optimising for size: +a + BLR x0 +instruction would get transformed to something like + BL __call_indirect_x0 +where __call_indirect_x0 labels a thunk that contains +__call_indirect_x0: + MOV X16, X0 + BR X16 + + +The first version of this patch used local symbols specific to a +compilation unit to try and avoid relocations. +This was mistaken since functions coming from the same compilation unit +can still be in different sections, and the assembler will insert +relocations at jumps between sections. + +On any relocation the linker is permitted to emit a veneer to handle +jumps between symbols that are very far apart. The registers x16 and +x17 may be clobbered by these veneers. +Hence the function stubs cannot rely on the values of x16 and x17 being +the same as just before the function stub is called. + +Similar can be said for the hot/cold partitioning of single functions, +so function-local stubs have the same restriction. + +This updated version of the patch never emits function stubs for x16 and +x17, and instead forces other registers to be used. + +Given the above, there is now no benefit to local symbols (since they +are not enough to avoid dealing with linker intricacies). This patch +now uses global symbols with hidden visibility each stored in their own +COMDAT section. This means stubs can be shared between compilation +units while still avoiding the PLT indirection. + +This patch also removes the `__call_indirect_x30` stub (and +function-local equivalent) which would simply jump back to the original +location. + +The function-local stubs are emitted to the assembly output file in one +chunk, which means we need not add the speculation barrier directly +after each one. +This is because we know for certain that the instructions directly after +the BR in all but the last function stub will be from another one of +these stubs and hence will not contain a speculation gadget. +Instead we add a speculation barrier at the end of the sequence of +stubs. + +The global stubs are emitted in COMDAT/.linkonce sections by +themselves so that the linker can remove duplicates from multiple object +files. This means they are not emitted in one chunk, and each one must +include the speculation barrier. + +Another difference is that since the global stubs are shared across +compilation units we do not know that all functions will be targeting an +architecture supporting the SB instruction. +Rather than provide multiple stubs for each architecture, we provide a +stub that will work for all architectures -- using the DSB+ISB barrier. + +This mitigation does not apply for BLR instructions in the following +places: +- Some accesses to thread-local variables use a code sequence with a BLR + instruction. This code sequence is part of the binary interface between + compiler and linker. If this BLR instruction needs to be mitigated, it'd + probably be best to do so in the linker. It seems that the code sequence + for thread-local variable access is unlikely to lead to a Spectre Revalation + Gadget. +- PLT stubs are produced by the linker and each contain a BLR instruction. + It seems that at most only after the last PLT stub a Spectre Revalation + Gadget might appear. + +Testing: + Bootstrap and regtest on AArch64 + (with BOOT_CFLAGS="-mharden-sls=retbr,blr") + Used a temporary hack(1) in gcc-dg.exp to use these options on every + test in the testsuite, a slight modification to emit the speculation + barrier after every function stub, and a script to check that the + output never emitted a BLR, or unmitigated BR or RET instruction. + Similar on an aarch64-none-elf cross-compiler. + +1) Temporary hack emitted a speculation barrier at the end of every stub +function, and used a script to ensure that: + a) Every RET or BR is immediately followed by a speculation barrier. + b) No BLR instruction is emitted by compiler. + +gcc/ChangeLog: + + * config/aarch64/aarch64-protos.h (aarch64_indirect_call_asm): + New declaration. + * config/aarch64/aarch64.c (aarch64_regno_regclass): Handle new + stub registers class. + (aarch64_class_max_nregs): Likewise. + (aarch64_register_move_cost): Likewise. + (aarch64_sls_shared_thunks): Global array to store stub labels. + (aarch64_sls_emit_function_stub): New. + (aarch64_create_blr_label): New. + (aarch64_sls_emit_blr_function_thunks): New. + (aarch64_sls_emit_shared_blr_thunks): New. + (aarch64_asm_file_end): New. + (aarch64_indirect_call_asm): New. + (TARGET_ASM_FILE_END): Use aarch64_asm_file_end. + (TARGET_ASM_FUNCTION_EPILOGUE): Use + aarch64_sls_emit_blr_function_thunks. + * config/aarch64/aarch64.h (STB_REGNUM_P): New. + (enum reg_class): Add STUB_REGS class. + (machine_function): Introduce `call_via` array for + function-local stub labels. + * config/aarch64/aarch64.md (*call_insn, *call_value_insn): Use + aarch64_indirect_call_asm to emit code when hardening BLR + instructions. + * config/aarch64/constraints.md (Ucr): New constraint + representing registers for indirect calls. Is GENERAL_REGS + usually, and STUB_REGS when hardening BLR instruction against + SLS. + * config/aarch64/predicates.md (aarch64_general_reg): STUB_REGS class + is also a general register. + +gcc/testsuite/ChangeLog: + + * gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c: New test. + * gcc.target/aarch64/sls-mitigation/sls-miti-blr.c: New test. +--- + gcc/config/aarch64/aarch64-protos.h | 1 + + gcc/config/aarch64/aarch64.c | 225 ++++++++++++++++++++- + gcc/config/aarch64/aarch64.h | 15 ++ + gcc/config/aarch64/aarch64.md | 11 +- + gcc/config/aarch64/constraints.md | 9 + + gcc/config/aarch64/predicates.md | 3 +- + .../aarch64/sls-mitigation/sls-miti-blr-bti.c | 40 ++++ + .../aarch64/sls-mitigation/sls-miti-blr.c | 33 +++ + 8 files changed, 328 insertions(+), 9 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c + create mode 100644 gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c + +diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h +index ee0ffde..839f801 100644 +--- a/gcc/config/aarch64/aarch64-protos.h ++++ b/gcc/config/aarch64/aarch64-protos.h +@@ -782,6 +782,7 @@ extern const atomic_ool_names aarch64_ool_ldeor_names; + tree aarch64_resolve_overloaded_builtin_general (location_t, tree, void *); + + const char *aarch64_sls_barrier (int); ++const char *aarch64_indirect_call_asm (rtx); + extern bool aarch64_harden_sls_retbr_p (void); + extern bool aarch64_harden_sls_blr_p (void); + +diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c +index 2389d49..0f7bba3 100644 +--- a/gcc/config/aarch64/aarch64.c ++++ b/gcc/config/aarch64/aarch64.c +@@ -10605,6 +10605,9 @@ aarch64_label_mentioned_p (rtx x) + enum reg_class + aarch64_regno_regclass (unsigned regno) + { ++ if (STUB_REGNUM_P (regno)) ++ return STUB_REGS; ++ + if (GP_REGNUM_P (regno)) + return GENERAL_REGS; + +@@ -10939,6 +10942,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode) + unsigned int nregs, vec_flags; + switch (regclass) + { ++ case STUB_REGS: + case TAILCALL_ADDR_REGS: + case POINTER_REGS: + case GENERAL_REGS: +@@ -13155,10 +13159,12 @@ aarch64_register_move_cost (machine_mode mode, + = aarch64_tune_params.regmove_cost; + + /* Caller save and pointer regs are equivalent to GENERAL_REGS. */ +- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS) ++ if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS ++ || to == STUB_REGS) + to = GENERAL_REGS; + +- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS) ++ if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS ++ || from == STUB_REGS) + from = GENERAL_REGS; + + /* Make RDFFR very expensive. In particular, if we know that the FFR +@@ -22957,6 +22963,215 @@ aarch64_sls_barrier (int mitigation_required) + : ""; + } + ++static GTY (()) tree aarch64_sls_shared_thunks[30]; ++static GTY (()) bool aarch64_sls_shared_thunks_needed = false; ++const char *indirect_symbol_names[30] = { ++ "__call_indirect_x0", ++ "__call_indirect_x1", ++ "__call_indirect_x2", ++ "__call_indirect_x3", ++ "__call_indirect_x4", ++ "__call_indirect_x5", ++ "__call_indirect_x6", ++ "__call_indirect_x7", ++ "__call_indirect_x8", ++ "__call_indirect_x9", ++ "__call_indirect_x10", ++ "__call_indirect_x11", ++ "__call_indirect_x12", ++ "__call_indirect_x13", ++ "__call_indirect_x14", ++ "__call_indirect_x15", ++ "", /* "__call_indirect_x16", */ ++ "", /* "__call_indirect_x17", */ ++ "__call_indirect_x18", ++ "__call_indirect_x19", ++ "__call_indirect_x20", ++ "__call_indirect_x21", ++ "__call_indirect_x22", ++ "__call_indirect_x23", ++ "__call_indirect_x24", ++ "__call_indirect_x25", ++ "__call_indirect_x26", ++ "__call_indirect_x27", ++ "__call_indirect_x28", ++ "__call_indirect_x29", ++}; ++ ++/* Function to create a BLR thunk. This thunk is used to mitigate straight ++ line speculation. Instead of a simple BLR that can be speculated past, ++ we emit a BL to this thunk, and this thunk contains a BR to the relevant ++ register. These thunks have the relevant speculation barries put after ++ their indirect branch so that speculation is blocked. ++ ++ We use such a thunk so the speculation barriers are kept off the ++ architecturally executed path in order to reduce the performance overhead. ++ ++ When optimizing for size we use stubs shared by the linked object. ++ When optimizing for performance we emit stubs for each function in the hope ++ that the branch predictor can better train on jumps specific for a given ++ function. */ ++rtx ++aarch64_sls_create_blr_label (int regnum) ++{ ++ gcc_assert (STUB_REGNUM_P (regnum)); ++ if (optimize_function_for_size_p (cfun)) ++ { ++ /* For the thunks shared between different functions in this compilation ++ unit we use a named symbol -- this is just for users to more easily ++ understand the generated assembly. */ ++ aarch64_sls_shared_thunks_needed = true; ++ const char *thunk_name = indirect_symbol_names[regnum]; ++ if (aarch64_sls_shared_thunks[regnum] == NULL) ++ { ++ /* Build a decl representing this function stub and record it for ++ later. We build a decl here so we can use the GCC machinery for ++ handling sections automatically (through `get_named_section` and ++ `make_decl_one_only`). That saves us a lot of trouble handling ++ the specifics of different output file formats. */ ++ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, ++ get_identifier (thunk_name), ++ build_function_type_list (void_type_node, ++ NULL_TREE)); ++ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL, ++ NULL_TREE, void_type_node); ++ TREE_PUBLIC (decl) = 1; ++ TREE_STATIC (decl) = 1; ++ DECL_IGNORED_P (decl) = 1; ++ DECL_ARTIFICIAL (decl) = 1; ++ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl)); ++ resolve_unique_section (decl, 0, false); ++ aarch64_sls_shared_thunks[regnum] = decl; ++ } ++ ++ return gen_rtx_SYMBOL_REF (Pmode, thunk_name); ++ } ++ ++ if (cfun->machine->call_via[regnum] == NULL) ++ cfun->machine->call_via[regnum] ++ = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ()); ++ return cfun->machine->call_via[regnum]; ++} ++ ++/* Helper function for aarch64_sls_emit_blr_function_thunks and ++ aarch64_sls_emit_shared_blr_thunks below. */ ++static void ++aarch64_sls_emit_function_stub (FILE *out_file, int regnum) ++{ ++ /* Save in x16 and branch to that function so this transformation does ++ not prevent jumping to `BTI c` instructions. */ ++ asm_fprintf (out_file, "\tmov\tx16, x%d\n", regnum); ++ asm_fprintf (out_file, "\tbr\tx16\n"); ++} ++ ++/* Emit all BLR stubs for this particular function. ++ Here we emit all the BLR stubs needed for the current function. Since we ++ emit these stubs in a consecutive block we know there will be no speculation ++ gadgets between each stub, and hence we only emit a speculation barrier at ++ the end of the stub sequences. ++ ++ This is called in the TARGET_ASM_FUNCTION_EPILOGUE hook. */ ++void ++aarch64_sls_emit_blr_function_thunks (FILE *out_file) ++{ ++ if (! aarch64_harden_sls_blr_p ()) ++ return; ++ ++ bool any_functions_emitted = false; ++ /* We must save and restore the current function section since this assembly ++ is emitted at the end of the function. This means it can be emitted *just ++ after* the cold section of a function. That cold part would be emitted in ++ a different section. That switch would trigger a `.cfi_endproc` directive ++ to be emitted in the original section and a `.cfi_startproc` directive to ++ be emitted in the new section. Switching to the original section without ++ restoring would mean that the `.cfi_endproc` emitted as a function ends ++ would happen in a different section -- leaving an unmatched ++ `.cfi_startproc` in the cold text section and an unmatched `.cfi_endproc` ++ in the standard text section. */ ++ section *save_text_section = in_section; ++ switch_to_section (function_section (current_function_decl)); ++ for (int regnum = 0; regnum < 30; ++regnum) ++ { ++ rtx specu_label = cfun->machine->call_via[regnum]; ++ if (specu_label == NULL) ++ continue; ++ ++ targetm.asm_out.print_operand (out_file, specu_label, 0); ++ asm_fprintf (out_file, ":\n"); ++ aarch64_sls_emit_function_stub (out_file, regnum); ++ any_functions_emitted = true; ++ } ++ if (any_functions_emitted) ++ /* Can use the SB if needs be here, since this stub will only be used ++ by the current function, and hence for the current target. */ ++ asm_fprintf (out_file, "\t%s\n", aarch64_sls_barrier (true)); ++ switch_to_section (save_text_section); ++} ++ ++/* Emit shared BLR stubs for the current compilation unit. ++ Over the course of compiling this unit we may have converted some BLR ++ instructions to a BL to a shared stub function. This is where we emit those ++ stub functions. ++ This function is for the stubs shared between different functions in this ++ compilation unit. We share when optimizing for size instead of speed. ++ ++ This function is called through the TARGET_ASM_FILE_END hook. */ ++void ++aarch64_sls_emit_shared_blr_thunks (FILE *out_file) ++{ ++ if (! aarch64_sls_shared_thunks_needed) ++ return; ++ ++ for (int regnum = 0; regnum < 30; ++regnum) ++ { ++ tree decl = aarch64_sls_shared_thunks[regnum]; ++ if (!decl) ++ continue; ++ ++ const char *name = indirect_symbol_names[regnum]; ++ switch_to_section (get_named_section (decl, NULL, 0)); ++ ASM_OUTPUT_ALIGN (out_file, 2); ++ targetm.asm_out.globalize_label (out_file, name); ++ /* Only emits if the compiler is configured for an assembler that can ++ handle visibility directives. */ ++ targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN); ++ ASM_OUTPUT_TYPE_DIRECTIVE (out_file, name, "function"); ++ ASM_OUTPUT_LABEL (out_file, name); ++ aarch64_sls_emit_function_stub (out_file, regnum); ++ /* Use the most conservative target to ensure it can always be used by any ++ function in the translation unit. */ ++ asm_fprintf (out_file, "\tdsb\tsy\n\tisb\n"); ++ ASM_DECLARE_FUNCTION_SIZE (out_file, name, decl); ++ } ++} ++ ++/* Implement TARGET_ASM_FILE_END. */ ++void ++aarch64_asm_file_end () ++{ ++ aarch64_sls_emit_shared_blr_thunks (asm_out_file); ++ /* Since this function will be called for the ASM_FILE_END hook, we ensure ++ that what would be called otherwise (e.g. `file_end_indicate_exec_stack` ++ for FreeBSD) still gets called. */ ++#ifdef TARGET_ASM_FILE_END ++ TARGET_ASM_FILE_END (); ++#endif ++} ++ ++const char * ++aarch64_indirect_call_asm (rtx addr) ++{ ++ gcc_assert (REG_P (addr)); ++ if (aarch64_harden_sls_blr_p ()) ++ { ++ rtx stub_label = aarch64_sls_create_blr_label (REGNO (addr)); ++ output_asm_insn ("bl\t%0", &stub_label); ++ } ++ else ++ output_asm_insn ("blr\t%0", &addr); ++ return ""; ++} ++ + /* Target-specific selftests. */ + + #if CHECKING_P +@@ -23507,6 +23722,12 @@ aarch64_libgcc_floating_mode_supported_p + #undef TARGET_MD_ASM_ADJUST + #define TARGET_MD_ASM_ADJUST arm_md_asm_adjust + ++#undef TARGET_ASM_FILE_END ++#define TARGET_ASM_FILE_END aarch64_asm_file_end ++ ++#undef TARGET_ASM_FUNCTION_EPILOGUE ++#define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks ++ + struct gcc_target targetm = TARGET_INITIALIZER; + + #include "gt-aarch64.h" +diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h +index 8e0fc37..7331450 100644 +--- a/gcc/config/aarch64/aarch64.h ++++ b/gcc/config/aarch64/aarch64.h +@@ -643,6 +643,16 @@ extern unsigned aarch64_architecture_version; + #define GP_REGNUM_P(REGNO) \ + (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM)) + ++/* Registers known to be preserved over a BL instruction. This consists of the ++ GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the ++ BL instruction itself, while the x16 and x17 registers may be used by ++ veneers which can be inserted by the linker. */ ++#define STUB_REGNUM_P(REGNO) \ ++ (GP_REGNUM_P (REGNO) \ ++ && (REGNO) != R16_REGNUM \ ++ && (REGNO) != R17_REGNUM \ ++ && (REGNO) != R30_REGNUM) \ ++ + #define FP_REGNUM_P(REGNO) \ + (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM)) + +@@ -667,6 +677,7 @@ enum reg_class + { + NO_REGS, + TAILCALL_ADDR_REGS, ++ STUB_REGS, + GENERAL_REGS, + STACK_REG, + POINTER_REGS, +@@ -689,6 +700,7 @@ enum reg_class + { \ + "NO_REGS", \ + "TAILCALL_ADDR_REGS", \ ++ "STUB_REGS", \ + "GENERAL_REGS", \ + "STACK_REG", \ + "POINTER_REGS", \ +@@ -708,6 +720,7 @@ enum reg_class + { \ + { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \ + { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\ ++ { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \ + { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \ + { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \ + { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \ +@@ -862,6 +875,8 @@ typedef struct GTY (()) machine_function + struct aarch64_frame frame; + /* One entry for each hard register. */ + bool reg_is_wrapped_separately[LAST_SAVED_REGNUM]; ++ /* One entry for each general purpose register. */ ++ rtx call_via[SP_REGNUM]; + bool label_is_assembled; + } machine_function; + #endif +diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md +index dda04ee..43da754 100644 +--- a/gcc/config/aarch64/aarch64.md ++++ b/gcc/config/aarch64/aarch64.md +@@ -1022,16 +1022,15 @@ + ) + + (define_insn "*call_insn" +- [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "r, Usf")) ++ [(call (mem:DI (match_operand:DI 0 "aarch64_call_insn_operand" "Ucr, Usf")) + (match_operand 1 "" "")) + (unspec:DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_CALLEE_ABI) + (clobber (reg:DI LR_REGNUM))] + "" + "@ +- blr\\t%0 ++ * return aarch64_indirect_call_asm (operands[0]); + bl\\t%c0" +- [(set_attr "type" "call, call")] +-) ++ [(set_attr "type" "call, call")]) + + (define_expand "call_value" + [(parallel +@@ -1050,13 +1049,13 @@ + + (define_insn "*call_value_insn" + [(set (match_operand 0 "" "") +- (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "r, Usf")) ++ (call (mem:DI (match_operand:DI 1 "aarch64_call_insn_operand" "Ucr, Usf")) + (match_operand 2 "" ""))) + (unspec:DI [(match_operand:DI 3 "const_int_operand")] UNSPEC_CALLEE_ABI) + (clobber (reg:DI LR_REGNUM))] + "" + "@ +- blr\\t%1 ++ * return aarch64_indirect_call_asm (operands[1]); + bl\\t%c1" + [(set_attr "type" "call, call")] + ) +diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md +index d993268..8cc6f50 100644 +--- a/gcc/config/aarch64/constraints.md ++++ b/gcc/config/aarch64/constraints.md +@@ -24,6 +24,15 @@ + (define_register_constraint "Ucs" "TAILCALL_ADDR_REGS" + "@internal Registers suitable for an indirect tail call") + ++(define_register_constraint "Ucr" ++ "aarch64_harden_sls_blr_p () ? STUB_REGS : GENERAL_REGS" ++ "@internal Registers to be used for an indirect call. ++ This is usually the general registers, but when we are hardening against ++ Straight Line Speculation we disallow x16, x17, and x30 so we can use ++ indirection stubs. These indirection stubs cannot use the above registers ++ since they will be reached by a BL that may have to go through a linker ++ veneer.") ++ + (define_register_constraint "w" "FP_REGS" + "Floating point and SIMD vector registers.") + +diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md +index 215fcec..1754b1e 100644 +--- a/gcc/config/aarch64/predicates.md ++++ b/gcc/config/aarch64/predicates.md +@@ -32,7 +32,8 @@ + + (define_predicate "aarch64_general_reg" + (and (match_operand 0 "register_operand") +- (match_test "REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS"))) ++ (match_test "REGNO_REG_CLASS (REGNO (op)) == STUB_REGS ++ || REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS"))) + + ;; Return true if OP a (const_int 0) operand. + (define_predicate "const0_operand" +diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c +new file mode 100644 +index 0000000..b1fb754 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr-bti.c +@@ -0,0 +1,40 @@ ++/* { dg-do compile } */ ++/* { dg-additional-options "-mharden-sls=blr -mbranch-protection=bti" } */ ++/* ++ Ensure that the SLS hardening of BLR leaves no BLR instructions. ++ Here we also check that there are no BR instructions with anything except an ++ x16 or x17 register. This is because a `BTI c` instruction can be branched ++ to using a BLR instruction using any register, but can only be branched to ++ with a BR using an x16 or x17 register. ++ */ ++typedef int (foo) (int, int); ++typedef void (bar) (int, int); ++struct sls_testclass { ++ foo *x; ++ bar *y; ++ int left; ++ int right; ++}; ++ ++/* We test both RTL patterns for a call which returns a value and a call which ++ does not. */ ++int blr_call_value (struct sls_testclass x) ++{ ++ int retval = x.x(x.left, x.right); ++ if (retval % 10) ++ return 100; ++ return 9; ++} ++ ++int blr_call (struct sls_testclass x) ++{ ++ x.y(x.left, x.right); ++ if (x.left % 10) ++ return 100; ++ return 9; ++} ++ ++/* { dg-final { scan-assembler-not {\tblr\t} } } */ ++/* { dg-final { scan-assembler-not {\tbr\tx(?!16|17)} } } */ ++/* { dg-final { scan-assembler {\tbr\tx(16|17)} } } */ ++ +diff --git a/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c +new file mode 100644 +index 0000000..88bafff +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/sls-mitigation/sls-miti-blr.c +@@ -0,0 +1,33 @@ ++/* { dg-additional-options "-mharden-sls=blr -save-temps" } */ ++/* Ensure that the SLS hardening of BLR leaves no BLR instructions. ++ We only test that all BLR instructions have been removed, not that the ++ resulting code makes sense. */ ++typedef int (foo) (int, int); ++typedef void (bar) (int, int); ++struct sls_testclass { ++ foo *x; ++ bar *y; ++ int left; ++ int right; ++}; ++ ++/* We test both RTL patterns for a call which returns a value and a call which ++ does not. */ ++int blr_call_value (struct sls_testclass x) ++{ ++ int retval = x.x(x.left, x.right); ++ if (retval % 10) ++ return 100; ++ return 9; ++} ++ ++int blr_call (struct sls_testclass x) ++{ ++ x.y(x.left, x.right); ++ if (x.left % 10) ++ return 100; ++ return 9; ++} ++ ++/* { dg-final { scan-assembler-not {\tblr\t} } } */ ++/* { dg-final { scan-assembler {\tbr\tx[0-9][0-9]?} } } */ +-- +2.7.4 + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch b/poky/meta/recipes-devtools/gcc/gcc/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch new file mode 100644 index 000000000..27237feb5 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0003-gcc-4.3.3-SYSROOT_CFLAGS_FOR_TARGET.patch @@ -0,0 +1,70 @@ +From 6e3395c0bc933bdc3242d1dead4896d0aa4e11a8 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:08:31 +0400 +Subject: [PATCH] gcc-4.3.3: SYSROOT_CFLAGS_FOR_TARGET + +Before committing, I noticed that PR/32161 was marked as a dup of PR/32009, but my previous patch did not fix it. + +This alternative patch is better because it lets you just use CFLAGS_FOR_TARGET to set the compilation flags for libgcc. Since bootstrapped target libraries are never compiled with the native compiler, it makes little sense to use different flags for stage1 and later stages. And it also makes little sense to use a different variable than CFLAGS_FOR_TARGET. + +Other changes I had to do include: + +- moving the creation of default CFLAGS_FOR_TARGET from Makefile.am to configure.ac, because otherwise the BOOT_CFLAGS are substituted into CFLAGS_FOR_TARGET (which is "-O2 -g $(CFLAGS)") via $(CFLAGS). It is also cleaner this way though. + +- passing the right CFLAGS to configure scripts as exported environment variables + +I also stopped passing LIBCFLAGS to configure scripts since they are unused in the whole src tree. And I updated the documentation as H-P reminded me to do. + +Bootstrapped/regtested i686-pc-linux-gnu, will commit to 4.4 shortly. Ok for 4.3? + +Signed-off-by: Paolo Bonzini +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + configure | 32 ++++++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +diff --git a/configure b/configure +index 226a64939d1..b31dc137fc9 100755 +--- a/configure ++++ b/configure +@@ -6971,6 +6971,38 @@ fi + + + ++# During gcc bootstrap, if we use some random cc for stage1 then CFLAGS ++# might be empty or "-g". We don't require a C++ compiler, so CXXFLAGS ++# might also be empty (or "-g", if a non-GCC C++ compiler is in the path). ++# We want to ensure that TARGET libraries (which we know are built with ++# gcc) are built with "-O2 -g", so include those options when setting ++# CFLAGS_FOR_TARGET and CXXFLAGS_FOR_TARGET. ++if test "x$CFLAGS_FOR_TARGET" = x; then ++ CFLAGS_FOR_TARGET=$CFLAGS ++ case " $CFLAGS " in ++ *" -O2 "*) ;; ++ *) CFLAGS_FOR_TARGET="-O2 $CFLAGS" ;; ++ esac ++ case " $CFLAGS " in ++ *" -g "* | *" -g3 "*) ;; ++ *) CFLAGS_FOR_TARGET="-g $CFLAGS" ;; ++ esac ++fi ++ ++ ++if test "x$CXXFLAGS_FOR_TARGET" = x; then ++ CXXFLAGS_FOR_TARGET=$CXXFLAGS ++ case " $CXXFLAGS " in ++ *" -O2 "*) ;; ++ *) CXXFLAGS_FOR_TARGET="-O2 $CXXFLAGS" ;; ++ esac ++ case " $CXXFLAGS " in ++ *" -g "* | *" -g3 "*) ;; ++ *) CXXFLAGS_FOR_TARGET="-g $CXXFLAGS" ;; ++ esac ++fi ++ ++ + # Handle --with-headers=XXX. If the value is not "yes", the contents of + # the named directory are copied to $(tooldir)/sys-include. + if test x"${with_headers}" != x && test x"${with_headers}" != xno ; then diff --git a/poky/meta/recipes-devtools/gcc/gcc/0004-64-bit-multilib-hack.patch b/poky/meta/recipes-devtools/gcc/gcc/0004-64-bit-multilib-hack.patch new file mode 100644 index 000000000..7c751bef6 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0004-64-bit-multilib-hack.patch @@ -0,0 +1,116 @@ +From 85a7c5aeb82ed61e6ef6d8e061b9da9e6a4a652c Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:10:06 +0400 +Subject: [PATCH] 64-bit multilib hack. + +GCC has internal multilib handling code but it assumes a very specific rigid directory +layout. The build system implementation of multilib layout is very generic and allows +complete customisation of the library directories. + +This patch is a partial solution to allow any custom directories to be passed into gcc +and handled correctly. It forces gcc to use the base_libdir (which is the current +directory, "."). We need to do this for each multilib that is configured as we don't +know which compiler options may be being passed into the compiler. Since we have a compiler +per mulitlib at this point that isn't an issue. + +The one problem is the target compiler is only going to work for the default multlilib at +this point. Ideally we'd figure out which multilibs were being enabled with which paths +and be able to patch these entries with a complete set of correct paths but this we +don't have such code at this point. This is something the target gcc recipe should do +and override these platform defaults in its build config. + +Do same for riscv64 and aarch64 + +RP 15/8/11 + +Upstream-Status: Inappropriate[OE-Specific] + +Signed-off-by: Khem Raj +Signed-off-by: Elvis Dowson +Signed-off-by: Mark Hatle +--- + gcc/config/aarch64/t-aarch64-linux | 8 ++++---- + gcc/config/i386/t-linux64 | 6 ++---- + gcc/config/mips/t-linux64 | 10 +++------- + gcc/config/riscv/t-linux | 6 ++++-- + gcc/config/rs6000/t-linux64 | 5 ++--- + 5 files changed, 15 insertions(+), 20 deletions(-) + +diff --git a/gcc/config/aarch64/t-aarch64-linux b/gcc/config/aarch64/t-aarch64-linux +index 83e59e33b85..b1356be1fb4 100644 +--- a/gcc/config/aarch64/t-aarch64-linux ++++ b/gcc/config/aarch64/t-aarch64-linux +@@ -21,8 +21,8 @@ + LIB1ASMSRC = aarch64/lib1funcs.asm + LIB1ASMFUNCS = _aarch64_sync_cache_range + +-AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be) +-MULTILIB_OSDIRNAMES = mabi.lp64=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu) +-MULTIARCH_DIRNAME = $(call if_multiarch,aarch64$(AARCH_BE)-linux-gnu) ++#AARCH_BE = $(if $(findstring TARGET_BIG_ENDIAN_DEFAULT=1, $(tm_defines)),_be) ++#MULTILIB_OSDIRNAMES = mabi.lp64=../lib64$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu) ++#MULTIARCH_DIRNAME = $(call if_multiarch,aarch64$(AARCH_BE)-linux-gnu) + +-MULTILIB_OSDIRNAMES += mabi.ilp32=../libilp32$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu_ilp32) ++#MULTILIB_OSDIRNAMES += mabi.ilp32=../libilp32$(call if_multiarch,:aarch64$(AARCH_BE)-linux-gnu_ilp32) +diff --git a/gcc/config/i386/t-linux64 b/gcc/config/i386/t-linux64 +index 1171e218578..5e057b7e5db 100644 +--- a/gcc/config/i386/t-linux64 ++++ b/gcc/config/i386/t-linux64 +@@ -32,7 +32,5 @@ + # + comma=, + MULTILIB_OPTIONS = $(subst $(comma),/,$(TM_MULTILIB_CONFIG)) +-MULTILIB_DIRNAMES = $(patsubst m%, %, $(subst /, ,$(MULTILIB_OPTIONS))) +-MULTILIB_OSDIRNAMES = m64=../lib64$(call if_multiarch,:x86_64-linux-gnu) +-MULTILIB_OSDIRNAMES+= m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:i386-linux-gnu) +-MULTILIB_OSDIRNAMES+= mx32=../libx32$(call if_multiarch,:x86_64-linux-gnux32) ++MULTILIB_DIRNAMES = . . ++MULTILIB_OSDIRNAMES = ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) +diff --git a/gcc/config/mips/t-linux64 b/gcc/config/mips/t-linux64 +index ceb58d3b5f3..43fe2bf28ab 100644 +--- a/gcc/config/mips/t-linux64 ++++ b/gcc/config/mips/t-linux64 +@@ -17,10 +17,6 @@ + # . + + MULTILIB_OPTIONS = mabi=n32/mabi=32/mabi=64 +-MULTILIB_DIRNAMES = n32 32 64 +-MIPS_EL = $(if $(filter %el, $(firstword $(subst -, ,$(target)))),el) +-MIPS_SOFT = $(if $(strip $(filter MASK_SOFT_FLOAT_ABI, $(target_cpu_default)) $(filter soft, $(with_float))),soft) +-MULTILIB_OSDIRNAMES = \ +- ../lib32$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabin32$(MIPS_SOFT)) \ +- ../lib$(call if_multiarch,:mips$(MIPS_EL)-linux-gnu$(MIPS_SOFT)) \ +- ../lib64$(call if_multiarch,:mips64$(MIPS_EL)-linux-gnuabi64$(MIPS_SOFT)) ++MULTILIB_DIRNAMES = . . . ++MULTILIB_OSDIRNAMES = ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) ++ +diff --git a/gcc/config/riscv/t-linux b/gcc/config/riscv/t-linux +index 216d2776a18..e4d817621fc 100644 +--- a/gcc/config/riscv/t-linux ++++ b/gcc/config/riscv/t-linux +@@ -1,3 +1,5 @@ + # Only XLEN and ABI affect Linux multilib dir names, e.g. /lib32/ilp32d/ +-MULTILIB_DIRNAMES := $(patsubst rv32%,lib32,$(patsubst rv64%,lib64,$(MULTILIB_DIRNAMES))) +-MULTILIB_OSDIRNAMES := $(patsubst lib%,../lib%,$(MULTILIB_DIRNAMES)) ++#MULTILIB_DIRNAMES := $(patsubst rv32%,lib32,$(patsubst rv64%,lib64,$(MULTILIB_DIRNAMES))) ++MULTILIB_DIRNAMES := . . ++#MULTILIB_OSDIRNAMES := $(patsubst lib%,../lib%,$(MULTILIB_DIRNAMES)) ++MULTILIB_OSDIRNAMES := ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) +diff --git a/gcc/config/rs6000/t-linux64 b/gcc/config/rs6000/t-linux64 +index 264a7e27524..dc9d440f66b 100644 +--- a/gcc/config/rs6000/t-linux64 ++++ b/gcc/config/rs6000/t-linux64 +@@ -26,10 +26,9 @@ + # MULTILIB_OSDIRNAMES according to what is found on the target. + + MULTILIB_OPTIONS := m64/m32 +-MULTILIB_DIRNAMES := 64 32 ++MULTILIB_DIRNAMES := . . + MULTILIB_EXTRA_OPTS := +-MULTILIB_OSDIRNAMES := m64=../lib64$(call if_multiarch,:powerpc64-linux-gnu) +-MULTILIB_OSDIRNAMES += m32=$(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib)$(call if_multiarch,:powerpc-linux-gnu) ++MULTILIB_OSDIRNAMES := ../$(shell basename $(base_libdir)) ../$(shell basename $(base_libdir)) + + rs6000-linux.o: $(srcdir)/config/rs6000/rs6000-linux.c + $(COMPILE) $< diff --git a/poky/meta/recipes-devtools/gcc/gcc/0005-optional-libstdc.patch b/poky/meta/recipes-devtools/gcc/gcc/0005-optional-libstdc.patch new file mode 100644 index 000000000..4020c9e3c --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0005-optional-libstdc.patch @@ -0,0 +1,122 @@ +From 6ddfb0bfcd1eea71acd37ab06f7a4510b9f1d12b Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:12:56 +0400 +Subject: [PATCH] optional libstdc + +gcc-runtime builds libstdc++ separately from gcc-cross-*. Its configure tests using g++ +will not run correctly since by default the linker will try to link against libstdc++ +which shouldn't exist yet. We need an option to disable -lstdc++ +option whilst leaving -lc, -lgcc and other automatic library dependencies added by gcc +driver. This patch adds such an option which only disables the -lstdc++. + +A "standard" gcc build uses xgcc and hence avoids this. We should ask upstream how to +do this officially, the likely answer is don't build libstdc++ separately. + +RP 29/6/10 + +Signed-off-by: Khem Raj + +Upstream-Status: Inappropriate [embedded specific] +--- + gcc/c-family/c.opt | 4 ++++ + gcc/cp/g++spec.c | 1 + + gcc/doc/invoke.texi | 32 +++++++++++++++++++++++++++++++- + gcc/gcc.c | 1 + + 4 files changed, 37 insertions(+), 1 deletion(-) + +diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt +index c49da99d395..35f712e2c84 100644 +--- a/gcc/c-family/c.opt ++++ b/gcc/c-family/c.opt +@@ -2025,6 +2025,10 @@ nostdinc++ + C++ ObjC++ + Do not search standard system include directories for C++. + ++nostdlib++ ++Driver ++Do not link standard C++ runtime library ++ + o + C ObjC C++ ObjC++ Joined Separate + ; Documented in common.opt +diff --git a/gcc/cp/g++spec.c b/gcc/cp/g++spec.c +index 0ab63bcd211..7b081e9e4f0 100644 +--- a/gcc/cp/g++spec.c ++++ b/gcc/cp/g++spec.c +@@ -137,6 +137,7 @@ lang_specific_driver (struct cl_decoded_option **in_decoded_options, + switch (decoded_options[i].opt_index) + { + case OPT_nostdlib: ++ case OPT_nostdlib__: + case OPT_nodefaultlibs: + library = -1; + break; +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index f12d8d12150..cf6cb428e7d 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -230,6 +230,9 @@ in the following sections. + -fno-weak -nostdinc++ @gol + -fvisibility-inlines-hidden @gol + -fvisibility-ms-compat @gol ++-fvtable-verify=@r{[}std@r{|}preinit@r{|}none@r{]} @gol ++-fvtv-counts -fvtv-debug @gol ++-nostdlib++ @gol + -fext-numeric-literals @gol + -Wabi-tag -Wcatch-value -Wcatch-value=@var{n} @gol + -Wno-class-conversion -Wclass-memaccess @gol +@@ -599,7 +602,7 @@ Objective-C and Objective-C++ Dialects}. + -pie -pthread -r -rdynamic @gol + -s -static -static-pie -static-libgcc -static-libstdc++ @gol + -static-libasan -static-libtsan -static-liblsan -static-libubsan @gol +--shared -shared-libgcc -symbolic @gol ++-shared -shared-libgcc -symbolic -nostdlib++ @gol + -T @var{script} -Wl,@var{option} -Xlinker @var{option} @gol + -u @var{symbol} -z @var{keyword}} + +@@ -14407,6 +14410,33 @@ Specify that the program entry point is @var{entry}. The argument is + interpreted by the linker; the GNU linker accepts either a symbol name + or an address. + ++@item -nostdlib++ ++@opindex nostdlib++ ++Do not use the standard system C++ runtime libraries when linking. ++Only the libraries you specify will be passed to the linker. ++ ++@cindex @option{-lgcc}, use with @option{-nostdlib} ++@cindex @option{-nostdlib} and unresolved references ++@cindex unresolved references and @option{-nostdlib} ++@cindex @option{-lgcc}, use with @option{-nodefaultlibs} ++@cindex @option{-nodefaultlibs} and unresolved references ++@cindex unresolved references and @option{-nodefaultlibs} ++One of the standard libraries bypassed by @option{-nostdlib} and ++@option{-nodefaultlibs} is @file{libgcc.a}, a library of internal subroutines ++which GCC uses to overcome shortcomings of particular machines, or special ++needs for some languages. ++(@xref{Interface,,Interfacing to GCC Output,gccint,GNU Compiler ++Collection (GCC) Internals}, ++for more discussion of @file{libgcc.a}.) ++In most cases, you need @file{libgcc.a} even when you want to avoid ++other standard libraries. In other words, when you specify @option{-nostdlib} ++or @option{-nodefaultlibs} you should usually specify @option{-lgcc} as well. ++This ensures that you have no unresolved references to internal GCC ++library subroutines. ++(An example of such an internal subroutine is @code{__main}, used to ensure C++ ++constructors are called; @pxref{Collect2,,@code{collect2}, gccint, ++GNU Compiler Collection (GCC) Internals}.) ++ + @item -pie + @opindex pie + Produce a dynamically linked position independent executable on targets +diff --git a/gcc/gcc.c b/gcc/gcc.c +index b2200c5185a..f8be58ce0a6 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -1051,6 +1051,7 @@ proper position among the other output files. */ + %(mflib) " STACK_SPLIT_SPEC "\ + %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} " SANITIZER_SPEC " \ + %{!nostdlib:%{!r:%{!nodefaultlibs:%(link_ssp) %(link_gcc_c_sequence)}}}\ ++ %{!nostdlib++:}\ + %{!nostdlib:%{!r:%{!nostartfiles:%E}}} %{T*} \n%(post_link) }}}}}}" + #endif + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0006-COLLECT_GCC_OPTIONS.patch b/poky/meta/recipes-devtools/gcc/gcc/0006-COLLECT_GCC_OPTIONS.patch new file mode 100644 index 000000000..9fbbe8070 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0006-COLLECT_GCC_OPTIONS.patch @@ -0,0 +1,35 @@ +From a6c90d3a9c5010b4aa7cc30467cf81ca7e0f430e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:16:28 +0400 +Subject: [PATCH] COLLECT_GCC_OPTIONS + +This patch adds --sysroot into COLLECT_GCC_OPTIONS which is used to +invoke collect2. + +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + gcc/gcc.c | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/gcc/gcc.c b/gcc/gcc.c +index f8be58ce0a6..48b0f9dde81 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -4806,6 +4806,15 @@ set_collect_gcc_options (void) + sizeof ("COLLECT_GCC_OPTIONS=") - 1); + + first_time = TRUE; ++#ifdef HAVE_LD_SYSROOT ++ if (target_system_root_changed && target_system_root) ++ { ++ obstack_grow (&collect_obstack, "'--sysroot=", sizeof("'--sysroot=")-1); ++ obstack_grow (&collect_obstack, target_system_root,strlen(target_system_root)); ++ obstack_grow (&collect_obstack, "'", 1); ++ first_time = FALSE; ++ } ++#endif + for (i = 0; (int) i < n_switches; i++) + { + const char *const *args; diff --git a/poky/meta/recipes-devtools/gcc/gcc/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch b/poky/meta/recipes-devtools/gcc/gcc/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch new file mode 100644 index 000000000..a764bdd0f --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0007-Use-the-defaults.h-in-B-instead-of-S-and-t-oe-in-B.patch @@ -0,0 +1,92 @@ +From 5670d4489f119d2da661734895ac0be99b606d1b Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:17:25 +0400 +Subject: [PATCH] Use the defaults.h in ${B} instead of ${S}, and t-oe in ${B} + +Use the defaults.h in ${B} instead of ${S}, and t-oe in ${B}, so that +the source can be shared between gcc-cross-initial, +gcc-cross-intermediate, gcc-cross, gcc-runtime, and also the sdk build. + +Signed-off-by: Khem Raj + +Upstream-Status: Pending + +While compiling gcc-crosssdk-initial-x86_64 on some host, there is +occasionally failure that test the existance of default.h doesn't +work, the reason is tm_include_list='** defaults.h' rather than +tm_include_list='** ./defaults.h' + +So we add the test condition for this situation. +Signed-off-by: Hongxu Jia +--- + gcc/Makefile.in | 2 +- + gcc/configure | 4 ++-- + gcc/configure.ac | 4 ++-- + gcc/mkconfig.sh | 4 ++-- + 4 files changed, 7 insertions(+), 7 deletions(-) + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 543b477ff18..a67d2cc18d6 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -540,7 +540,7 @@ TARGET_SYSTEM_ROOT = @TARGET_SYSTEM_ROOT@ + TARGET_SYSTEM_ROOT_DEFINE = @TARGET_SYSTEM_ROOT_DEFINE@ + + xmake_file=@xmake_file@ +-tmake_file=@tmake_file@ ++tmake_file=@tmake_file@ ./t-oe + TM_ENDIAN_CONFIG=@TM_ENDIAN_CONFIG@ + TM_MULTILIB_CONFIG=@TM_MULTILIB_CONFIG@ + TM_MULTILIB_EXCEPTIONS_CONFIG=@TM_MULTILIB_EXCEPTIONS_CONFIG@ +diff --git a/gcc/configure b/gcc/configure +index 8de766a942c..b26e8fc7fee 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -12705,8 +12705,8 @@ for f in $tm_file; do + tm_include_list="${tm_include_list} $f" + ;; + defaults.h ) +- tm_file_list="${tm_file_list} \$(srcdir)/$f" +- tm_include_list="${tm_include_list} $f" ++ tm_file_list="${tm_file_list} ./$f" ++ tm_include_list="${tm_include_list} ./$f" + ;; + * ) + tm_file_list="${tm_file_list} \$(srcdir)/config/$f" +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 8bfd6feb780..26fa46802c7 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -2138,8 +2138,8 @@ for f in $tm_file; do + tm_include_list="${tm_include_list} $f" + ;; + defaults.h ) +- tm_file_list="${tm_file_list} \$(srcdir)/$f" +- tm_include_list="${tm_include_list} $f" ++ tm_file_list="${tm_file_list} ./$f" ++ tm_include_list="${tm_include_list} ./$f" + ;; + * ) + tm_file_list="${tm_file_list} \$(srcdir)/config/$f" +diff --git a/gcc/mkconfig.sh b/gcc/mkconfig.sh +index d2c677a4a42..d03852481cb 100644 +--- a/gcc/mkconfig.sh ++++ b/gcc/mkconfig.sh +@@ -77,7 +77,7 @@ if [ -n "$HEADERS" ]; then + if [ $# -ge 1 ]; then + echo '#ifdef IN_GCC' >> ${output}T + for file in "$@"; do +- if test x"$file" = x"defaults.h"; then ++ if test x"$file" = x"./defaults.h" -o x"$file" = x"defaults.h"; then + postpone_defaults_h="yes" + else + echo "# include \"$file\"" >> ${output}T +@@ -106,7 +106,7 @@ esac + + # If we postponed including defaults.h, add the #include now. + if test x"$postpone_defaults_h" = x"yes"; then +- echo "# include \"defaults.h\"" >> ${output}T ++ echo "# include \"./defaults.h\"" >> ${output}T + fi + + # Add multiple inclusion protection guard, part two. diff --git a/poky/meta/recipes-devtools/gcc/gcc/0008-fortran-cross-compile-hack.patch b/poky/meta/recipes-devtools/gcc/gcc/0008-fortran-cross-compile-hack.patch new file mode 100644 index 000000000..714db3bef --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0008-fortran-cross-compile-hack.patch @@ -0,0 +1,43 @@ +From f05062625e7a4751be723595a2f7a4b7fbeff311 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:20:01 +0400 +Subject: [PATCH] fortran cross-compile hack. + +* Fortran would have searched for arm-angstrom-gnueabi-gfortran but would have used +used gfortan. For gcc_4.2.2.bb we want to use the gfortran compiler from our cross +directory. + +Signed-off-by: Khem Raj + +Upstream-Status: Inappropriate [embedded specific] +--- + libgfortran/configure | 2 +- + libgfortran/configure.ac | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/libgfortran/configure b/libgfortran/configure +index b4cf854ddb3..e8e0ac3b1cf 100755 +--- a/libgfortran/configure ++++ b/libgfortran/configure +@@ -13090,7 +13090,7 @@ esac + + # We need gfortran to compile parts of the library + #AC_PROG_FC(gfortran) +-FC="$GFORTRAN" ++#FC="$GFORTRAN" + ac_ext=${ac_fc_srcext-f} + ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' + ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +diff --git a/libgfortran/configure.ac b/libgfortran/configure.ac +index 711dc60ff78..3c9bbfbf47d 100644 +--- a/libgfortran/configure.ac ++++ b/libgfortran/configure.ac +@@ -258,7 +258,7 @@ AC_SUBST(enable_static) + + # We need gfortran to compile parts of the library + #AC_PROG_FC(gfortran) +-FC="$GFORTRAN" ++#FC="$GFORTRAN" + AC_PROG_FC(gfortran) + + # extra LD Flags which are required for targets diff --git a/poky/meta/recipes-devtools/gcc/gcc/0009-cpp-honor-sysroot.patch b/poky/meta/recipes-devtools/gcc/gcc/0009-cpp-honor-sysroot.patch new file mode 100644 index 000000000..8ad6853d8 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0009-cpp-honor-sysroot.patch @@ -0,0 +1,51 @@ +From 1d76de7f1f5c99f1fa1a4b14aedad3d702e4e136 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:22:00 +0400 +Subject: [PATCH] cpp: honor sysroot. + +Currently, if the gcc toolchain is relocated and installed from sstate, then you try and compile +preprocessed source (.i or .ii files), the compiler will try and access the builtin sysroot location +rather than the --sysroot option specified on the commandline. If access to that directory is +permission denied (unreadable), gcc will error. + +This happens when ccache is in use due to the fact it uses preprocessed source files. + +The fix below adds %I to the cpp-output spec macro so the default substitutions for -iprefix, +-isystem, -isysroot happen and the correct sysroot is used. + +[YOCTO #2074] + +RP 2012/04/13 + +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + gcc/cp/lang-specs.h | 2 +- + gcc/gcc.c | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/gcc/cp/lang-specs.h b/gcc/cp/lang-specs.h +index 0ad4a33b93e..16c744f4f90 100644 +--- a/gcc/cp/lang-specs.h ++++ b/gcc/cp/lang-specs.h +@@ -66,5 +66,5 @@ along with GCC; see the file COPYING3. If not see + {".ii", "@c++-cpp-output", 0, 0, 0}, + {"@c++-cpp-output", + "%{!E:%{!M:%{!MM:" +- " cc1plus -fpreprocessed %i %(cc1_options) %2" ++ " cc1plus -fpreprocessed %i %I %(cc1_options) %2" + " %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, +diff --git a/gcc/gcc.c b/gcc/gcc.c +index 48b0f9dde81..c87f603955f 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -1348,7 +1348,7 @@ static const struct compiler default_compilers[] = + %W{o*:--output-pch=%*}}%V}}}}}}}", 0, 0, 0}, + {".i", "@cpp-output", 0, 0, 0}, + {"@cpp-output", +- "%{!M:%{!MM:%{!E:cc1 -fpreprocessed %i %(cc1_options) %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, ++ "%{!M:%{!MM:%{!E:cc1 -fpreprocessed %i %I %(cc1_options) %{!fsyntax-only:%(invoke_as)}}}}", 0, 0, 0}, + {".s", "@assembler", 0, 0, 0}, + {"@assembler", + "%{!M:%{!MM:%{!E:%{!S:as %(asm_debug) %(asm_options) %i %A }}}}", 0, 0, 0}, diff --git a/poky/meta/recipes-devtools/gcc/gcc/0010-MIPS64-Default-to-N64-ABI.patch b/poky/meta/recipes-devtools/gcc/gcc/0010-MIPS64-Default-to-N64-ABI.patch new file mode 100644 index 000000000..625e2d870 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0010-MIPS64-Default-to-N64-ABI.patch @@ -0,0 +1,54 @@ +From 4fad4433c96bc9d0d9d124f9674fb3389f6f426e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:23:08 +0400 +Subject: [PATCH] MIPS64: Default to N64 ABI + +MIPS64 defaults to n32 ABI, this patch makes it +so that it defaults to N64 ABI + +Signed-off-by: Khem Raj + +Upstream-Status: Inappropriate [OE config specific] +--- + gcc/config.gcc | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index cf1a87e2efd..37c4221a39f 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -2511,29 +2511,29 @@ mips*-*-linux*) # Linux MIPS, either endian. + default_mips_arch=mips32 + ;; + mips64el-st-linux-gnu) +- default_mips_abi=n32 ++ default_mips_abi=64 + tm_file="${tm_file} mips/st.h" + tmake_file="${tmake_file} mips/t-st" + enable_mips_multilibs="yes" + ;; + mips64octeon*-*-linux*) +- default_mips_abi=n32 ++ default_mips_abi=64 + tm_defines="${tm_defines} MIPS_CPU_STRING_DEFAULT=\\\"octeon\\\"" + target_cpu_default=MASK_SOFT_FLOAT_ABI + enable_mips_multilibs="yes" + ;; + mipsisa64r6*-*-linux*) +- default_mips_abi=n32 ++ default_mips_abi=64 + default_mips_arch=mips64r6 + enable_mips_multilibs="yes" + ;; + mipsisa64r2*-*-linux*) +- default_mips_abi=n32 ++ default_mips_abi=64 + default_mips_arch=mips64r2 + enable_mips_multilibs="yes" + ;; + mips64*-*-linux* | mipsisa64*-*-linux*) +- default_mips_abi=n32 ++ default_mips_abi=64 + enable_mips_multilibs="yes" + ;; + esac diff --git a/poky/meta/recipes-devtools/gcc/gcc/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch b/poky/meta/recipes-devtools/gcc/gcc/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch new file mode 100644 index 000000000..e35797633 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0011-Define-GLIBC_DYNAMIC_LINKER-and-UCLIBC_DYNAMIC_LINKE.patch @@ -0,0 +1,243 @@ +From 8fc016a53c22c19feccbfa13ebdf19090dc67058 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:24:50 +0400 +Subject: [PATCH] Define GLIBC_DYNAMIC_LINKER and UCLIBC_DYNAMIC_LINKER + relative to SYSTEMLIBS_DIR + +This patch defines GLIBC_DYNAMIC_LINKER and UCLIBC_DYNAMIC_LINKER +relative to SYSTEMLIBS_DIR which can be set in generated headers +This breaks the assumption of hardcoded multilib in gcc +Change is only for the supported architectures in OE including +SH, sparc, alpha for possible future support (if any) + +Removes the do_headerfix task in metadata + +Signed-off-by: Khem Raj + +Upstream-Status: Inappropriate [OE configuration] +--- + gcc/config/alpha/linux-elf.h | 4 ++-- + gcc/config/arm/linux-eabi.h | 4 ++-- + gcc/config/arm/linux-elf.h | 2 +- + gcc/config/i386/linux.h | 2 +- + gcc/config/i386/linux64.h | 6 +++--- + gcc/config/linux.h | 8 ++++---- + gcc/config/mips/linux.h | 12 ++++++------ + gcc/config/riscv/linux.h | 2 +- + gcc/config/rs6000/linux64.h | 15 +++++---------- + gcc/config/sh/linux.h | 2 +- + gcc/config/sparc/linux.h | 2 +- + gcc/config/sparc/linux64.h | 4 ++-- + 12 files changed, 29 insertions(+), 34 deletions(-) + +diff --git a/gcc/config/alpha/linux-elf.h b/gcc/config/alpha/linux-elf.h +index e25fcac3c59..01aca0c6542 100644 +--- a/gcc/config/alpha/linux-elf.h ++++ b/gcc/config/alpha/linux-elf.h +@@ -23,8 +23,8 @@ along with GCC; see the file COPYING3. If not see + #define EXTRA_SPECS \ + { "elf_dynamic_linker", ELF_DYNAMIC_LINKER }, + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" +-#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" ++#define UCLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-uClibc.so.0" + #if DEFAULT_LIBC == LIBC_UCLIBC + #define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" + #elif DEFAULT_LIBC == LIBC_GLIBC +diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h +index 5bdcfa0c5d3..0c0332f317f 100644 +--- a/gcc/config/arm/linux-eabi.h ++++ b/gcc/config/arm/linux-eabi.h +@@ -65,8 +65,8 @@ + GLIBC_DYNAMIC_LINKER_DEFAULT and TARGET_DEFAULT_FLOAT_ABI. */ + + #undef GLIBC_DYNAMIC_LINKER +-#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT "/lib/ld-linux.so.3" +-#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT "/lib/ld-linux-armhf.so.3" ++#define GLIBC_DYNAMIC_LINKER_SOFT_FLOAT SYSTEMLIBS_DIR "ld-linux.so.3" ++#define GLIBC_DYNAMIC_LINKER_HARD_FLOAT SYSTEMLIBS_DIR "ld-linux-armhf.so.3" + #define GLIBC_DYNAMIC_LINKER_DEFAULT GLIBC_DYNAMIC_LINKER_SOFT_FLOAT + + #define GLIBC_DYNAMIC_LINKER \ +diff --git a/gcc/config/arm/linux-elf.h b/gcc/config/arm/linux-elf.h +index 0ec3aa53189..abfa9566d74 100644 +--- a/gcc/config/arm/linux-elf.h ++++ b/gcc/config/arm/linux-elf.h +@@ -60,7 +60,7 @@ + + #define LIBGCC_SPEC "%{mfloat-abi=soft*:-lfloat} -lgcc" + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" + + #define LINUX_TARGET_LINK_SPEC "%{h*} \ + %{static:-Bstatic} \ +diff --git a/gcc/config/i386/linux.h b/gcc/config/i386/linux.h +index 9f823f125ed..e0390b7d5e3 100644 +--- a/gcc/config/i386/linux.h ++++ b/gcc/config/i386/linux.h +@@ -20,7 +20,7 @@ along with GCC; see the file COPYING3. If not see + . */ + + #define GNU_USER_LINK_EMULATION "elf_i386" +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" + + #undef MUSL_DYNAMIC_LINKER + #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-i386.so.1" +diff --git a/gcc/config/i386/linux64.h b/gcc/config/i386/linux64.h +index 6cb68d1ccfa..7de09ec857c 100644 +--- a/gcc/config/i386/linux64.h ++++ b/gcc/config/i386/linux64.h +@@ -27,9 +27,9 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + #define GNU_USER_LINK_EMULATION64 "elf_x86_64" + #define GNU_USER_LINK_EMULATIONX32 "elf32_x86_64" + +-#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" +-#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux-x86-64.so.2" +-#define GLIBC_DYNAMIC_LINKERX32 "/libx32/ld-linux-x32.so.2" ++#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-linux-x86-64.so.2" ++#define GLIBC_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ld-linux-x32.so.2" + + #undef MUSL_DYNAMIC_LINKER32 + #define MUSL_DYNAMIC_LINKER32 "/lib/ld-musl-i386.so.1" +diff --git a/gcc/config/linux.h b/gcc/config/linux.h +index 95654bcdb5a..0c1a8118a26 100644 +--- a/gcc/config/linux.h ++++ b/gcc/config/linux.h +@@ -94,10 +94,10 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + GLIBC_DYNAMIC_LINKER must be defined for each target using them, or + GLIBC_DYNAMIC_LINKER32 and GLIBC_DYNAMIC_LINKER64 for targets + supporting both 32-bit and 64-bit compilation. */ +-#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" +-#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0" +-#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0" +-#define UCLIBC_DYNAMIC_LINKERX32 "/lib/ldx32-uClibc.so.0" ++#define UCLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-uClibc.so.0" ++#define UCLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-uClibc.so.0" ++#define UCLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld64-uClibc.so.0" ++#define UCLIBC_DYNAMIC_LINKERX32 SYSTEMLIBS_DIR "ldx32-uClibc.so.0" + #define BIONIC_DYNAMIC_LINKER "/system/bin/linker" + #define BIONIC_DYNAMIC_LINKER32 "/system/bin/linker" + #define BIONIC_DYNAMIC_LINKER64 "/system/bin/linker64" +diff --git a/gcc/config/mips/linux.h b/gcc/config/mips/linux.h +index 54446e58e5f..4786ee304c1 100644 +--- a/gcc/config/mips/linux.h ++++ b/gcc/config/mips/linux.h +@@ -22,20 +22,20 @@ along with GCC; see the file COPYING3. If not see + #define GNU_USER_LINK_EMULATIONN32 "elf32%{EB:b}%{EL:l}tsmipn32" + + #define GLIBC_DYNAMIC_LINKER32 \ +- "%{mnan=2008:/lib/ld-linux-mipsn8.so.1;:/lib/ld.so.1}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" + #define GLIBC_DYNAMIC_LINKER64 \ +- "%{mnan=2008:/lib64/ld-linux-mipsn8.so.1;:/lib64/ld.so.1}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" + #define GLIBC_DYNAMIC_LINKERN32 \ +- "%{mnan=2008:/lib32/ld-linux-mipsn8.so.1;:/lib32/ld.so.1}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-linux-mipsn8.so.1;:" SYSTEMLIBS_DIR "ld.so.1}" + + #undef UCLIBC_DYNAMIC_LINKER32 + #define UCLIBC_DYNAMIC_LINKER32 \ +- "%{mnan=2008:/lib/ld-uClibc-mipsn8.so.0;:/lib/ld-uClibc.so.0}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld-uClibc.so.0}" + #undef UCLIBC_DYNAMIC_LINKER64 + #define UCLIBC_DYNAMIC_LINKER64 \ +- "%{mnan=2008:/lib/ld64-uClibc-mipsn8.so.0;:/lib/ld64-uClibc.so.0}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld64-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld64-uClibc.so.0}" + #define UCLIBC_DYNAMIC_LINKERN32 \ +- "%{mnan=2008:/lib32/ld-uClibc-mipsn8.so.0;:/lib32/ld-uClibc.so.0}" ++ "%{mnan=2008:" SYSTEMLIBS_DIR "ld-uClibc-mipsn8.so.0;:" SYSTEMLIBS_DIR "ld-uClibc.so.0}" + + #undef MUSL_DYNAMIC_LINKER32 + #define MUSL_DYNAMIC_LINKER32 \ +diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h +index 4afef7c228c..01997330741 100644 +--- a/gcc/config/riscv/linux.h ++++ b/gcc/config/riscv/linux.h +@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3. If not see + GNU_USER_TARGET_OS_CPP_BUILTINS(); \ + } while (0) + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1" + + #define MUSL_ABI_SUFFIX \ + "%{mabi=ilp32:-sf}" \ +diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h +index 34776c8421e..967c1c43c63 100644 +--- a/gcc/config/rs6000/linux64.h ++++ b/gcc/config/rs6000/linux64.h +@@ -419,24 +419,19 @@ extern int dot_symbols; + #undef LINK_OS_DEFAULT_SPEC + #define LINK_OS_DEFAULT_SPEC "%(link_os_linux)" + +-#define GLIBC_DYNAMIC_LINKER32 "%(dynamic_linker_prefix)/lib/ld.so.1" +- ++#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld.so.1" + #ifdef LINUX64_DEFAULT_ABI_ELFv2 +-#define GLIBC_DYNAMIC_LINKER64 \ +-"%{mabi=elfv1:%(dynamic_linker_prefix)/lib64/ld64.so.1;" \ +-":%(dynamic_linker_prefix)/lib64/ld64.so.2}" ++#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv1:" SYSTEMLIBS_DIR "ld64.so.1;:" SYSTEMLIBS_DIR "ld64.so.2}" + #else +-#define GLIBC_DYNAMIC_LINKER64 \ +-"%{mabi=elfv2:%(dynamic_linker_prefix)/lib64/ld64.so.2;" \ +-":%(dynamic_linker_prefix)/lib64/ld64.so.1}" ++#define GLIBC_DYNAMIC_LINKER64 "%{mabi=elfv2:" SYSTEMLIBS_DIR "ld64.so.2;:" SYSTEMLIBS_DIR "ld64.so.1}" + #endif + + #undef MUSL_DYNAMIC_LINKER32 + #define MUSL_DYNAMIC_LINKER32 \ +- "/lib/ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" ++ SYSTEMLIBS_DIR "ld-musl-powerpc" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" + #undef MUSL_DYNAMIC_LINKER64 + #define MUSL_DYNAMIC_LINKER64 \ +- "/lib/ld-musl-powerpc64" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" ++ SYSTEMLIBS_DIR "ld-musl-powerpc64" MUSL_DYNAMIC_LINKER_E "%{msoft-float:-sf}.so.1" + + #undef DEFAULT_ASM_ENDIAN + #if (TARGET_DEFAULT & MASK_LITTLE_ENDIAN) +diff --git a/gcc/config/sh/linux.h b/gcc/config/sh/linux.h +index c1d0441d488..81373eb8336 100644 +--- a/gcc/config/sh/linux.h ++++ b/gcc/config/sh/linux.h +@@ -64,7 +64,7 @@ along with GCC; see the file COPYING3. If not see + "/lib/ld-musl-sh" MUSL_DYNAMIC_LINKER_E MUSL_DYNAMIC_LINKER_FP \ + "%{mfdpic:-fdpic}.so.1" + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" + + #undef SUBTARGET_LINK_EMUL_SUFFIX + #define SUBTARGET_LINK_EMUL_SUFFIX "%{mfdpic:_fd;:_linux}" +diff --git a/gcc/config/sparc/linux.h b/gcc/config/sparc/linux.h +index 81201e67a2f..8b6fc577594 100644 +--- a/gcc/config/sparc/linux.h ++++ b/gcc/config/sparc/linux.h +@@ -84,7 +84,7 @@ extern const char *host_detect_local_cpu (int argc, const char **argv); + When the -shared link option is used a final link is not being + done. */ + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux.so.2" + + #undef LINK_SPEC + #define LINK_SPEC "-m elf32_sparc %{shared:-shared} \ +diff --git a/gcc/config/sparc/linux64.h b/gcc/config/sparc/linux64.h +index a1a0efd8f28..85d1084afc2 100644 +--- a/gcc/config/sparc/linux64.h ++++ b/gcc/config/sparc/linux64.h +@@ -84,8 +84,8 @@ along with GCC; see the file COPYING3. If not see + When the -shared link option is used a final link is not being + done. */ + +-#define GLIBC_DYNAMIC_LINKER32 "/lib/ld-linux.so.2" +-#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER32 SYSTEMLIBS_DIR "ld-linux.so.2" ++#define GLIBC_DYNAMIC_LINKER64 SYSTEMLIBS_DIR "ld-linux.so.2" + + #ifdef SPARC_BI_ARCH + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0012-gcc-Fix-argument-list-too-long-error.patch b/poky/meta/recipes-devtools/gcc/gcc/0012-gcc-Fix-argument-list-too-long-error.patch new file mode 100644 index 000000000..88e1715b5 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0012-gcc-Fix-argument-list-too-long-error.patch @@ -0,0 +1,41 @@ +From a22d1264049d29b90663cf5667049ae6f9b7a5ce Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:26:37 +0400 +Subject: [PATCH] gcc: Fix argument list too long error. + +There would be an "Argument list too long" error when the +build directory is longer than 200, this is caused by: + +headers=`echo $(PLUGIN_HEADERS) | tr ' ' '\012' | sort -u` + +The PLUGIN_HEADERS is too long before sort, so the "echo" can't handle +it, use the $(sort list) of GNU make which can handle the too long list +would fix the problem, the header would be short enough after sorted. +The "tr ' ' '\012'" was used for translating the space to "\n", the +$(sort list) doesn't need this. + +Signed-off-by: Robert Yang +Signed-off-by: Khem Raj + +RP: gcc then added *.h and *.def additions to this list, breaking the original +fix. Add the sort to the original gcc code, leaving the tr+sort to fix the original +issue but include the new files too as reported by Zhuang + +Upstream-Status: Pending +--- + gcc/Makefile.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index a67d2cc18d6..480c9366418 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -3606,7 +3606,7 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype + # We keep the directory structure for files in config or c-family and .def + # files. All other files are flattened to a single directory. + $(mkinstalldirs) $(DESTDIR)$(plugin_includedir) +- headers=`echo $(PLUGIN_HEADERS) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \ ++ headers=`echo $(sort $(PLUGIN_HEADERS)) $$(cd $(srcdir); echo *.h *.def) | tr ' ' '\012' | sort -u`; \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`; \ + for file in $$headers; do \ + if [ -f $$file ] ; then \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0013-Disable-sdt.patch b/poky/meta/recipes-devtools/gcc/gcc/0013-Disable-sdt.patch new file mode 100644 index 000000000..207cdb57a --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0013-Disable-sdt.patch @@ -0,0 +1,110 @@ +From fa47586935a18ecfc2ad5586802e326e21741b7b Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:28:10 +0400 +Subject: [PATCH] Disable sdt. + +We don't list dtrace in DEPENDS so we shouldn't be depending on this header. +It may or may not exist from preivous builds though. To be determinstic, disable +sdt.h usage always. This avoids build failures if the header is removed after configure +but before libgcc is compiled for example. + +RP 2012/8/7 + +Signed-off-by: Khem Raj + +Disable sdt for libstdc++-v3. + +Signed-off-by: Robert Yang + +Upstream-Status: Inappropriate [hack] +--- + gcc/configure | 12 ++++++------ + gcc/configure.ac | 18 +++++++++--------- + libstdc++-v3/configure | 6 +++--- + libstdc++-v3/configure.ac | 2 +- + 4 files changed, 19 insertions(+), 19 deletions(-) + +diff --git a/gcc/configure b/gcc/configure +index b26e8fc7fee..6080f86145e 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -29789,12 +29789,12 @@ fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking sys/sdt.h in the target C library" >&5 + $as_echo_n "checking sys/sdt.h in the target C library... " >&6; } + have_sys_sdt_h=no +-if test -f $target_header_dir/sys/sdt.h; then +- have_sys_sdt_h=yes +- +-$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h +- +-fi ++#if test -f $target_header_dir/sys/sdt.h; then ++# have_sys_sdt_h=yes ++# ++#$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h ++# ++#fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_sys_sdt_h" >&5 + $as_echo "$have_sys_sdt_h" >&6; } + +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 26fa46802c7..42be5252778 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -6190,15 +6190,15 @@ fi + AC_SUBST([enable_default_ssp]) + + # Test for on the target. +-GCC_TARGET_TEMPLATE([HAVE_SYS_SDT_H]) +-AC_MSG_CHECKING(sys/sdt.h in the target C library) +-have_sys_sdt_h=no +-if test -f $target_header_dir/sys/sdt.h; then +- have_sys_sdt_h=yes +- AC_DEFINE(HAVE_SYS_SDT_H, 1, +- [Define if your target C library provides sys/sdt.h]) +-fi +-AC_MSG_RESULT($have_sys_sdt_h) ++#GCC_TARGET_TEMPLATE([HAVE_SYS_SDT_H]) ++#AC_MSG_CHECKING(sys/sdt.h in the target C library) ++#have_sys_sdt_h=no ++#if test -f $target_header_dir/sys/sdt.h; then ++# have_sys_sdt_h=yes ++# AC_DEFINE(HAVE_SYS_SDT_H, 1, ++# [Define if your target C library provides sys/sdt.h]) ++#fi ++#AC_MSG_RESULT($have_sys_sdt_h) + + # Check if TFmode long double should be used by default or not. + # Some glibc targets used DFmode long double, but with glibc 2.4 +diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure +index 9f9c5a2419a..71ed13b815b 100755 +--- a/libstdc++-v3/configure ++++ b/libstdc++-v3/configure +@@ -22615,11 +22615,11 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' + ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' + ac_compiler_gnu=$ac_cv_c_compiler_gnu + +- if test $glibcxx_cv_sys_sdt_h = yes; then ++# if test $glibcxx_cv_sys_sdt_h = yes; then + +-$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h ++#$as_echo "#define HAVE_SYS_SDT_H 1" >>confdefs.h + +- fi ++# fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $glibcxx_cv_sys_sdt_h" >&5 + $as_echo "$glibcxx_cv_sys_sdt_h" >&6; } + +diff --git a/libstdc++-v3/configure.ac b/libstdc++-v3/configure.ac +index 699e55fd829..5c7a7bda439 100644 +--- a/libstdc++-v3/configure.ac ++++ b/libstdc++-v3/configure.ac +@@ -241,7 +241,7 @@ GLIBCXX_CHECK_SC_NPROCESSORS_ONLN + GLIBCXX_CHECK_SC_NPROC_ONLN + GLIBCXX_CHECK_PTHREADS_NUM_PROCESSORS_NP + GLIBCXX_CHECK_SYSCTL_HW_NCPU +-GLIBCXX_CHECK_SDT_H ++#GLIBCXX_CHECK_SDT_H + + # Check for available headers. + AC_CHECK_HEADERS([endian.h execinfo.h float.h fp.h ieeefp.h inttypes.h \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0014-libtool.patch b/poky/meta/recipes-devtools/gcc/gcc/0014-libtool.patch new file mode 100644 index 000000000..f4e70c3b1 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0014-libtool.patch @@ -0,0 +1,39 @@ +From 6ecd478881468934444ff85611fd43f7033b1e81 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:29:11 +0400 +Subject: [PATCH] libtool + +libstdc++ from gcc-runtime gets created with -rpath=/usr/lib/../lib for qemux86-64 +when running on am x86_64 build host. + +This patch stops this speading to libdir in the libstdc++.la file within libtool. +Arguably, it shouldn't be passing this into libtool in the first place but +for now this resolves the nastiest problems this causes. + +func_normal_abspath would resolve an empty path to `pwd` so we need +to filter the zero case. + +RP 2012/8/24 + +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + ltmain.sh | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/ltmain.sh b/ltmain.sh +index 70990740b6c..ee938056bef 100644 +--- a/ltmain.sh ++++ b/ltmain.sh +@@ -6359,6 +6359,10 @@ func_mode_link () + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" ++ if test -n "$install_libdir"; then ++ func_normal_abspath "$install_libdir" ++ install_libdir=$func_normal_abspath_result ++ fi + + oldlibs= + if test -z "$rpath"; then diff --git a/poky/meta/recipes-devtools/gcc/gcc/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch b/poky/meta/recipes-devtools/gcc/gcc/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch new file mode 100644 index 000000000..bc2674abc --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0015-gcc-armv4-pass-fix-v4bx-to-linker-to-support-EABI.patch @@ -0,0 +1,40 @@ +From de4427fa49c07dc651ee6ceaf5c5078700ca3b08 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:30:32 +0400 +Subject: [PATCH] gcc: armv4: pass fix-v4bx to linker to support EABI. + +The LINK_SPEC for linux gets overwritten by linux-eabi.h which +means the value of TARGET_FIX_V4BX_SPEC gets lost and as a result +the option is not passed to linker when chosing march=armv4 +This patch redefines this in linux-eabi.h and reinserts it +for eabi defaulting toolchains. + +We might want to send it upstream. + +Signed-off-by: Khem Raj + +Upstream-Status: Pending +--- + gcc/config/arm/linux-eabi.h | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +diff --git a/gcc/config/arm/linux-eabi.h b/gcc/config/arm/linux-eabi.h +index 0c0332f317f..7b3769e8459 100644 +--- a/gcc/config/arm/linux-eabi.h ++++ b/gcc/config/arm/linux-eabi.h +@@ -91,10 +91,14 @@ + #define MUSL_DYNAMIC_LINKER \ + "/lib/ld-musl-arm" MUSL_DYNAMIC_LINKER_E "%{mfloat-abi=hard:hf}%{mfdpic:-fdpic}.so.1" + ++/* For armv4 we pass --fix-v4bx to linker to support EABI */ ++#undef TARGET_FIX_V4BX_SPEC ++#define TARGET_FIX_V4BX_SPEC "%{mcpu=arm8|mcpu=arm810|mcpu=strongarm*|march=armv4: --fix-v4bx}" ++ + /* At this point, bpabi.h will have clobbered LINK_SPEC. We want to + use the GNU/Linux version, not the generic BPABI version. */ + #undef LINK_SPEC +-#define LINK_SPEC EABI_LINK_SPEC \ ++#define LINK_SPEC TARGET_FIX_V4BX_SPEC EABI_LINK_SPEC \ + LINUX_OR_ANDROID_LD (LINUX_TARGET_LINK_SPEC, \ + LINUX_TARGET_LINK_SPEC " " ANDROID_LINK_SPEC) + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch b/poky/meta/recipes-devtools/gcc/gcc/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch new file mode 100644 index 000000000..1dc4bb859 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0016-Use-the-multilib-config-files-from-B-instead-of-usin.patch @@ -0,0 +1,99 @@ +From 6b363c2c1c089ee900efa6013aefba1003840a37 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 29 Mar 2013 09:33:04 +0400 +Subject: [PATCH] Use the multilib config files from ${B} instead of using the + ones from ${S} + +Use the multilib config files from ${B} instead of using the ones from ${S} +so that the source can be shared between gcc-cross-initial, +gcc-cross-intermediate, gcc-cross, gcc-runtime, and also the sdk build. + +Signed-off-by: Khem Raj +Signed-off-by: Constantin Musca + +Upstream-Status: Inappropriate [configuration] +--- + gcc/configure | 22 ++++++++++++++++++---- + gcc/configure.ac | 22 ++++++++++++++++++---- + 2 files changed, 36 insertions(+), 8 deletions(-) + +diff --git a/gcc/configure b/gcc/configure +index 6080f86145e..825a9652329 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -12685,10 +12685,20 @@ done + tmake_file_= + for f in ${tmake_file} + do +- if test -f ${srcdir}/config/$f +- then +- tmake_file_="${tmake_file_} \$(srcdir)/config/$f" +- fi ++ case $f in ++ */t-linux64 ) ++ if test -f ./config/$f ++ then ++ tmake_file_="${tmake_file_} ./config/$f" ++ fi ++ ;; ++ * ) ++ if test -f ${srcdir}/config/$f ++ then ++ tmake_file_="${tmake_file_} \$(srcdir)/config/$f" ++ fi ++ ;; ++ esac + done + tmake_file="${tmake_file_}${omp_device_property_tmake_file}" + +@@ -12699,6 +12709,10 @@ tm_file_list="options.h" + tm_include_list="options.h insn-constants.h" + for f in $tm_file; do + case $f in ++ */linux64.h ) ++ tm_file_list="${tm_file_list} ./config/$f" ++ tm_include_list="${tm_include_list} ./config/$f" ++ ;; + ./* ) + f=`echo $f | sed 's/^..//'` + tm_file_list="${tm_file_list} $f" +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 42be5252778..6099eb3251f 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -2118,10 +2118,20 @@ done + tmake_file_= + for f in ${tmake_file} + do +- if test -f ${srcdir}/config/$f +- then +- tmake_file_="${tmake_file_} \$(srcdir)/config/$f" +- fi ++ case $f in ++ */t-linux64 ) ++ if test -f ./config/$f ++ then ++ tmake_file_="${tmake_file_} ./config/$f" ++ fi ++ ;; ++ * ) ++ if test -f ${srcdir}/config/$f ++ then ++ tmake_file_="${tmake_file_} \$(srcdir)/config/$f" ++ fi ++ ;; ++ esac + done + tmake_file="${tmake_file_}${omp_device_property_tmake_file}" + +@@ -2132,6 +2142,10 @@ tm_file_list="options.h" + tm_include_list="options.h insn-constants.h" + for f in $tm_file; do + case $f in ++ */linux64.h ) ++ tm_file_list="${tm_file_list} ./config/$f" ++ tm_include_list="${tm_include_list} ./config/$f" ++ ;; + ./* ) + f=`echo $f | sed 's/^..//'` + tm_file_list="${tm_file_list} $f" diff --git a/poky/meta/recipes-devtools/gcc/gcc/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch b/poky/meta/recipes-devtools/gcc/gcc/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch new file mode 100644 index 000000000..05f12847e --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0017-Avoid-using-libdir-from-.la-which-usually-points-to-.patch @@ -0,0 +1,28 @@ +From 08752c2f1d21553301bee5757c453c6a36cbe03c Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 20 Feb 2015 09:39:38 +0000 +Subject: [PATCH] Avoid using libdir from .la which usually points to a host + path + +Upstream-Status: Inappropriate [embedded specific] + +Signed-off-by: Jonathan Liu +Signed-off-by: Khem Raj +--- + ltmain.sh | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/ltmain.sh b/ltmain.sh +index ee938056bef..9ebc7e3d1e0 100644 +--- a/ltmain.sh ++++ b/ltmain.sh +@@ -5628,6 +5628,9 @@ func_mode_link () + absdir="$abs_ladir" + libdir="$abs_ladir" + else ++ # Instead of using libdir from .la which usually points to a host path, ++ # use the path the .la is contained in. ++ libdir="$abs_ladir" + dir="$libdir" + absdir="$libdir" + fi diff --git a/poky/meta/recipes-devtools/gcc/gcc/0018-export-CPP.patch b/poky/meta/recipes-devtools/gcc/gcc/0018-export-CPP.patch new file mode 100644 index 000000000..886a1221d --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0018-export-CPP.patch @@ -0,0 +1,50 @@ +From 5c3d66378c7ff60ca11a875aa4aa6f8a8529d43a Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 20 Feb 2015 09:40:59 +0000 +Subject: [PATCH] export CPP + +The OE environment sets and exports CPP as being the target gcc. When +building gcc-cross-canadian for a mingw targetted sdk, the following can be found +in build.x86_64-pokysdk-mingw32.i586-poky-linux/build-x86_64-linux/libiberty/config.log: + +configure:3641: checking for _FILE_OFFSET_BITS value needed for large files +configure:3666: gcc -c -isystem/media/build1/poky/build/tmp/sysroots/x86_64-linux/usr/include -O2 -pipe conftest.c >&5 +configure:3666: $? = 0 +configure:3698: result: no +configure:3786: checking how to run the C preprocessor +configure:3856: result: x86_64-pokysdk-mingw32-gcc -E --sysroot=/media/build1/poky/build/tmp/sysroots/x86_64-nativesdk-mingw32-pokysdk-mingw32 +configure:3876: x86_64-pokysdk-mingw32-gcc -E --sysroot=/media/build1/poky/build/tmp/sysroots/x86_64-nativesdk-mingw32-pokysdk-mingw32 conftest.c +configure:3876: $? = 0 + +Note this is a *build* target (in build-x86_64-linux) so it should be +using the host "gcc", not x86_64-pokysdk-mingw32-gcc. Since the mingw32 +headers are very different, using the wrong cpp is a real problem. It is leaking +into configure through the CPP variable. Ultimately this leads to build +failures related to not being able to include a process.h file for pem-unix.c. + +The fix is to ensure we export a sane CPP value into the build +environment when using build targets. We could define a CPP_FOR_BUILD value which may be +the version which needs to be upstreamed but for now, this fix is good enough to +avoid the problem. + +RP 22/08/2013 + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + Makefile.in | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/Makefile.in b/Makefile.in +index 36e369df6e7..c717903bb13 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -149,6 +149,7 @@ BUILD_EXPORTS = \ + AR="$(AR_FOR_BUILD)"; export AR; \ + AS="$(AS_FOR_BUILD)"; export AS; \ + CC="$(CC_FOR_BUILD)"; export CC; \ ++ CPP="$(CC_FOR_BUILD) -E"; export CPP; \ + CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ + CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ + CXX="$(CXX_FOR_BUILD)"; export CXX; \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0019-Ensure-target-gcc-headers-can-be-included.patch b/poky/meta/recipes-devtools/gcc/gcc/0019-Ensure-target-gcc-headers-can-be-included.patch new file mode 100644 index 000000000..2797b2c22 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0019-Ensure-target-gcc-headers-can-be-included.patch @@ -0,0 +1,57 @@ +From 378b752c5d9a3dba4e58cdadf8b4b4f34ea99a76 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 20 Feb 2015 10:25:11 +0000 +Subject: [PATCH] Ensure target gcc headers can be included + +There are a few headers installed as part of the OpenEmbedded +gcc-runtime target (omp.h, ssp/*.h). Being installed from a recipe +built for the target architecture, these are within the target +sysroot and not cross/nativesdk; thus they weren't able to be +found by gcc with the existing search paths. Add support for +picking up these headers under the sysroot supplied on the gcc +command line in order to resolve this. + +Upstream-Status: Pending + +Signed-off-by: Paul Eggleton +Signed-off-by: Khem Raj +--- + gcc/Makefile.in | 2 ++ + gcc/cppdefault.c | 4 ++++ + 2 files changed, 6 insertions(+) + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 480c9366418..011c7ac2db6 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -618,6 +618,7 @@ libexecdir = @libexecdir@ + + # Directory in which the compiler finds libraries etc. + libsubdir = $(libdir)/gcc/$(real_target_noncanonical)/$(version)$(accel_dir_suffix) ++libsubdir_target = $(target_noncanonical)/$(version) + # Directory in which the compiler finds executables + libexecsubdir = $(libexecdir)/gcc/$(real_target_noncanonical)/$(version)$(accel_dir_suffix) + # Directory in which all plugin resources are installed +@@ -2946,6 +2947,7 @@ CFLAGS-intl.o += -DLOCALEDIR=\"$(localedir)\" + + PREPROCESSOR_DEFINES = \ + -DGCC_INCLUDE_DIR=\"$(libsubdir)/include\" \ ++ -DGCC_INCLUDE_SUBDIR_TARGET=\"$(libsubdir_target)/include\" \ + -DFIXED_INCLUDE_DIR=\"$(libsubdir)/include-fixed\" \ + -DGPLUSPLUS_INCLUDE_DIR=\"$(gcc_gxx_include_dir)\" \ + -DGPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT=$(gcc_gxx_include_dir_add_sysroot) \ +diff --git a/gcc/cppdefault.c b/gcc/cppdefault.c +index af38cc494ea..2f43b88a0c3 100644 +--- a/gcc/cppdefault.c ++++ b/gcc/cppdefault.c +@@ -59,6 +59,10 @@ const struct default_include cpp_include_defaults[] + /* This is the dir for gcc's private headers. */ + { GCC_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, + #endif ++#ifdef GCC_INCLUDE_SUBDIR_TARGET ++ /* This is the dir for gcc's private headers under the specified sysroot. */ ++ { STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET, "GCC", 0, 0, 1, 0 }, ++#endif + #ifdef LOCAL_INCLUDE_DIR + /* /usr/local/include comes before the fixincluded header files. */ + { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 }, diff --git a/poky/meta/recipes-devtools/gcc/gcc/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch b/poky/meta/recipes-devtools/gcc/gcc/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch new file mode 100644 index 000000000..c3baf8b45 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0020-Don-t-search-host-directory-during-relink-if-inst_pr.patch @@ -0,0 +1,35 @@ +From 870e805d705d99d9b9d7dbd09727f9c1d2ad9c1d Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 3 Mar 2015 08:21:19 +0000 +Subject: [PATCH] Don't search host directory during "relink" if $inst_prefix + is provided + +http://lists.gnu.org/archive/html/libtool-patches/2011-01/msg00026.html + +Upstream-Status: Submitted + +Signed-off-by: Khem Raj +--- + ltmain.sh | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/ltmain.sh b/ltmain.sh +index 9ebc7e3d1e0..7ea79fa8be6 100644 +--- a/ltmain.sh ++++ b/ltmain.sh +@@ -6004,12 +6004,13 @@ func_mode_link () + fi + else + # We cannot seem to hardcode it, guess we'll fake it. ++ # Default if $libdir is not relative to the prefix: + add_dir="-L$libdir" +- # Try looking first in the location we're being installed to. ++ + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) +- add_dir="$add_dir -L$inst_prefix_dir$libdir" ++ add_dir="-L$inst_prefix_dir$libdir" + ;; + esac + fi diff --git a/poky/meta/recipes-devtools/gcc/gcc/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch b/poky/meta/recipes-devtools/gcc/gcc/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch new file mode 100644 index 000000000..abee48669 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0021-Use-SYSTEMLIBS_DIR-replacement-instead-of-hardcoding.patch @@ -0,0 +1,26 @@ +From aba42de763a619355471efd1573561b0cbf51162 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 28 Apr 2015 23:15:27 -0700 +Subject: [PATCH] Use SYSTEMLIBS_DIR replacement instead of hardcoding + base_libdir + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + gcc/config/aarch64/aarch64-linux.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h +index e587e2e9ad6..ddc62895693 100644 +--- a/gcc/config/aarch64/aarch64-linux.h ++++ b/gcc/config/aarch64/aarch64-linux.h +@@ -21,7 +21,7 @@ + #ifndef GCC_AARCH64_LINUX_H + #define GCC_AARCH64_LINUX_H + +-#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" ++#define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" + + #undef MUSL_DYNAMIC_LINKER + #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" diff --git a/poky/meta/recipes-devtools/gcc/gcc/0022-aarch64-Add-support-for-musl-ldso.patch b/poky/meta/recipes-devtools/gcc/gcc/0022-aarch64-Add-support-for-musl-ldso.patch new file mode 100644 index 000000000..c55b66d4b --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0022-aarch64-Add-support-for-musl-ldso.patch @@ -0,0 +1,25 @@ +From d63820a78d92f302410358293546f01c7ad17bd8 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 28 Apr 2015 23:18:39 -0700 +Subject: [PATCH] aarch64: Add support for musl ldso + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + gcc/config/aarch64/aarch64-linux.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h +index ddc62895693..b301825313a 100644 +--- a/gcc/config/aarch64/aarch64-linux.h ++++ b/gcc/config/aarch64/aarch64-linux.h +@@ -24,7 +24,7 @@ + #define GLIBC_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-linux-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" + + #undef MUSL_DYNAMIC_LINKER +-#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" ++#define MUSL_DYNAMIC_LINKER SYSTEMLIBS_DIR "ld-musl-aarch64%{mbig-endian:_be}%{mabi=ilp32:_ilp32}.so.1" + + #undef ASAN_CC1_SPEC + #define ASAN_CC1_SPEC "%{%:sanitize(address):-funwind-tables}" diff --git a/poky/meta/recipes-devtools/gcc/gcc/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch b/poky/meta/recipes-devtools/gcc/gcc/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch new file mode 100644 index 000000000..80c4d2292 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0023-libcc1-fix-libcc1-s-install-path-and-rpath.patch @@ -0,0 +1,51 @@ +From 3474e16ad4ea8cf4e0e330568e3bc9039e723dce Mon Sep 17 00:00:00 2001 +From: Robert Yang +Date: Sun, 5 Jul 2015 20:25:18 -0700 +Subject: [PATCH] libcc1: fix libcc1's install path and rpath + +* Install libcc1.so and libcc1plugin.so into + $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version), as what we + had done to lto-plugin. +* Fix bad RPATH iussue: + gcc-5.2.0: package gcc-plugins contains bad RPATH /patht/to/tmp/sysroots/qemux86-64/usr/lib64/../lib64 in file + /path/to/gcc/5.2.0-r0/packages-split/gcc-plugins/usr/lib64/gcc/x86_64-poky-linux/5.2.0/plugin/libcc1plugin.so.0.0.0 + [rpaths] + +Upstream-Status: Inappropriate [OE configuration] + +Signed-off-by: Robert Yang +--- + libcc1/Makefile.am | 4 ++-- + libcc1/Makefile.in | 4 ++-- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/libcc1/Makefile.am b/libcc1/Makefile.am +index c005b0dad4a..ec31d35b7b9 100644 +--- a/libcc1/Makefile.am ++++ b/libcc1/Makefile.am +@@ -37,8 +37,8 @@ libiberty = $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ + $(Wc)$(libiberty_normal))) + libiberty_dep = $(patsubst $(Wc)%,%,$(libiberty)) + +-plugindir = $(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/plugin +-cc1libdir = $(libdir)/$(libsuffix) ++cc1libdir = $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version) ++plugindir = $(cc1libdir) + + if ENABLE_PLUGIN + plugin_LTLIBRARIES = libcc1plugin.la libcp1plugin.la +diff --git a/libcc1/Makefile.in b/libcc1/Makefile.in +index 7104b649026..2103c477468 100644 +--- a/libcc1/Makefile.in ++++ b/libcc1/Makefile.in +@@ -393,8 +393,8 @@ libiberty = $(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \ + $(Wc)$(libiberty_normal))) + + libiberty_dep = $(patsubst $(Wc)%,%,$(libiberty)) +-plugindir = $(libdir)/gcc/$(target_noncanonical)/$(gcc_version)/plugin +-cc1libdir = $(libdir)/$(libsuffix) ++cc1libdir = $(libexecdir)/gcc/$(target_noncanonical)/$(gcc_version) ++plugindir = $(cc1libdir) + @ENABLE_PLUGIN_TRUE@plugin_LTLIBRARIES = libcc1plugin.la libcp1plugin.la + @ENABLE_PLUGIN_TRUE@cc1lib_LTLIBRARIES = libcc1.la + shared_source = callbacks.cc callbacks.hh connection.cc connection.hh \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0024-handle-sysroot-support-for-nativesdk-gcc.patch b/poky/meta/recipes-devtools/gcc/gcc/0024-handle-sysroot-support-for-nativesdk-gcc.patch new file mode 100644 index 000000000..1a65ece7b --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0024-handle-sysroot-support-for-nativesdk-gcc.patch @@ -0,0 +1,346 @@ +From bb1f359e34649516e61305e9748534cce7e0ee70 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 7 Dec 2015 23:39:54 +0000 +Subject: [PATCH] handle sysroot support for nativesdk-gcc + +Being able to build a nativesdk gcc is useful, particularly in cases +where the host compiler may be of an incompatible version (or a 32 +bit compiler is needed). + +Sadly, building nativesdk-gcc is not straight forward. We install +nativesdk-gcc into a relocatable location and this means that its +library locations can change. "Normal" sysroot support doesn't help +in this case since the values of paths like "libdir" change, not just +base root directory of the system. + +In order to handle this we do two things: + +a) Add %r into spec file markup which can be used for injected paths + such as SYSTEMLIBS_DIR (see gcc_multilib_setup()). +b) Add other paths which need relocation into a .gccrelocprefix section + which the relocation code will notice and adjust automatically. + +Upstream-Status: Inappropriate +RP 2015/7/28 + +Signed-off-by: Khem Raj + +Added PREFIXVAR and EXEC_PREFIXVAR to support runtime relocation. Without +these as part of the gccrelocprefix the system can't do runtime relocation +if the executable is moved. (These paths were missed in the original +implementation.) + +Signed-off-by: Mark Hatle +--- + gcc/c-family/c-opts.c | 4 +-- + gcc/cppdefault.c | 63 ++++++++++++++++++++++++++----------------- + gcc/cppdefault.h | 13 ++++----- + gcc/gcc.c | 20 +++++++++----- + gcc/incpath.c | 12 ++++----- + gcc/prefix.c | 6 +++-- + 6 files changed, 70 insertions(+), 48 deletions(-) + +diff --git a/gcc/c-family/c-opts.c b/gcc/c-family/c-opts.c +index 58ba0948e79..806bbcfb7a5 100644 +--- a/gcc/c-family/c-opts.c ++++ b/gcc/c-family/c-opts.c +@@ -1409,8 +1409,8 @@ add_prefixed_path (const char *suffix, incpath_kind chain) + size_t prefix_len, suffix_len; + + suffix_len = strlen (suffix); +- prefix = iprefix ? iprefix : cpp_GCC_INCLUDE_DIR; +- prefix_len = iprefix ? strlen (iprefix) : cpp_GCC_INCLUDE_DIR_len; ++ prefix = iprefix ? iprefix : GCC_INCLUDE_DIRVAR; ++ prefix_len = iprefix ? strlen (iprefix) : strlen(GCC_INCLUDE_DIRVAR) - 7; + + path = (char *) xmalloc (prefix_len + suffix_len + 1); + memcpy (path, prefix, prefix_len); +diff --git a/gcc/cppdefault.c b/gcc/cppdefault.c +index 2f43b88a0c3..6b6be04686c 100644 +--- a/gcc/cppdefault.c ++++ b/gcc/cppdefault.c +@@ -35,6 +35,30 @@ + # undef CROSS_INCLUDE_DIR + #endif + ++static char GPLUSPLUS_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_INCLUDE_DIR; ++char GCC_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GCC_INCLUDE_DIR; ++static char GPLUSPLUS_TOOL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_TOOL_INCLUDE_DIR; ++static char GPLUSPLUS_BACKWARD_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = GPLUSPLUS_BACKWARD_INCLUDE_DIR; ++static char STANDARD_STARTFILE_PREFIX_2VAR[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET; ++#ifdef LOCAL_INCLUDE_DIR ++static char LOCAL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = LOCAL_INCLUDE_DIR; ++#endif ++#ifdef PREFIX_INCLUDE_DIR ++static char PREFIX_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX_INCLUDE_DIR; ++#endif ++#ifdef FIXED_INCLUDE_DIR ++static char FIXED_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = FIXED_INCLUDE_DIR; ++#endif ++#ifdef CROSS_INCLUDE_DIR ++static char CROSS_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = CROSS_INCLUDE_DIR; ++#endif ++#ifdef TOOL_INCLUDE_DIR ++static char TOOL_INCLUDE_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = TOOL_INCLUDE_DIR; ++#endif ++#ifdef NATIVE_SYSTEM_HEADER_DIR ++static char NATIVE_SYSTEM_HEADER_DIRVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = NATIVE_SYSTEM_HEADER_DIR; ++#endif ++ + const struct default_include cpp_include_defaults[] + #ifdef INCLUDE_DEFAULTS + = INCLUDE_DEFAULTS; +@@ -42,38 +66,38 @@ const struct default_include cpp_include_defaults[] + = { + #ifdef GPLUSPLUS_INCLUDE_DIR + /* Pick up GNU C++ generic include files. */ +- { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1, ++ { GPLUSPLUS_INCLUDE_DIRVAR, "G++", 1, 1, + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, + #endif + #ifdef GPLUSPLUS_TOOL_INCLUDE_DIR + /* Pick up GNU C++ target-dependent include files. */ +- { GPLUSPLUS_TOOL_INCLUDE_DIR, "G++", 1, 1, ++ { GPLUSPLUS_TOOL_INCLUDE_DIRVAR, "G++", 1, 1, + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 1 }, + #endif + #ifdef GPLUSPLUS_BACKWARD_INCLUDE_DIR + /* Pick up GNU C++ backward and deprecated include files. */ +- { GPLUSPLUS_BACKWARD_INCLUDE_DIR, "G++", 1, 1, ++ { GPLUSPLUS_BACKWARD_INCLUDE_DIRVAR, "G++", 1, 1, + GPLUSPLUS_INCLUDE_DIR_ADD_SYSROOT, 0 }, + #endif + #ifdef GCC_INCLUDE_DIR + /* This is the dir for gcc's private headers. */ +- { GCC_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, ++ { GCC_INCLUDE_DIRVAR, "GCC", 0, 0, 0, 0 }, + #endif + #ifdef GCC_INCLUDE_SUBDIR_TARGET + /* This is the dir for gcc's private headers under the specified sysroot. */ +- { STANDARD_STARTFILE_PREFIX_2 GCC_INCLUDE_SUBDIR_TARGET, "GCC", 0, 0, 1, 0 }, ++ { STANDARD_STARTFILE_PREFIX_2VAR, "GCC", 0, 0, 1, 0 }, + #endif + #ifdef LOCAL_INCLUDE_DIR + /* /usr/local/include comes before the fixincluded header files. */ +- { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 2 }, +- { LOCAL_INCLUDE_DIR, 0, 0, 1, 1, 0 }, ++ { LOCAL_INCLUDE_DIRVAR, 0, 0, 1, 1, 2 }, ++ { LOCAL_INCLUDE_DIRVAR, 0, 0, 1, 1, 0 }, + #endif + #ifdef PREFIX_INCLUDE_DIR +- { PREFIX_INCLUDE_DIR, 0, 0, 1, 0, 0 }, ++ { PREFIX_INCLUDE_DIRVAR, 0, 0, 1, 0, 0 }, + #endif + #ifdef FIXED_INCLUDE_DIR + /* This is the dir for fixincludes. */ +- { FIXED_INCLUDE_DIR, "GCC", 0, 0, 0, ++ { FIXED_INCLUDE_DIRVAR, "GCC", 0, 0, 0, + /* A multilib suffix needs adding if different multilibs use + different headers. */ + #ifdef SYSROOT_HEADERS_SUFFIX_SPEC +@@ -85,33 +109,24 @@ const struct default_include cpp_include_defaults[] + #endif + #ifdef CROSS_INCLUDE_DIR + /* One place the target system's headers might be. */ +- { CROSS_INCLUDE_DIR, "GCC", 0, 0, 0, 0 }, ++ { CROSS_INCLUDE_DIRVAR, "GCC", 0, 0, 0, 0 }, + #endif + #ifdef TOOL_INCLUDE_DIR + /* Another place the target system's headers might be. */ +- { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1, 0, 0 }, ++ { TOOL_INCLUDE_DIRVAR, "BINUTILS", 0, 1, 0, 0 }, + #endif + #ifdef NATIVE_SYSTEM_HEADER_DIR + /* /usr/include comes dead last. */ +- { NATIVE_SYSTEM_HEADER_DIR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 2 }, +- { NATIVE_SYSTEM_HEADER_DIR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 0 }, ++ { NATIVE_SYSTEM_HEADER_DIRVAR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 2 }, ++ { NATIVE_SYSTEM_HEADER_DIRVAR, NATIVE_SYSTEM_HEADER_COMPONENT, 0, 0, 1, 0 }, + #endif + { 0, 0, 0, 0, 0, 0 } + }; + #endif /* no INCLUDE_DEFAULTS */ + +-#ifdef GCC_INCLUDE_DIR +-const char cpp_GCC_INCLUDE_DIR[] = GCC_INCLUDE_DIR; +-const size_t cpp_GCC_INCLUDE_DIR_len = sizeof GCC_INCLUDE_DIR - 8; +-#else +-const char cpp_GCC_INCLUDE_DIR[] = ""; +-const size_t cpp_GCC_INCLUDE_DIR_len = 0; +-#endif +- + /* The configured prefix. */ +-const char cpp_PREFIX[] = PREFIX; +-const size_t cpp_PREFIX_len = sizeof PREFIX - 1; +-const char cpp_EXEC_PREFIX[] = STANDARD_EXEC_PREFIX; ++char PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX; ++char EXEC_PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_EXEC_PREFIX; + + /* This value is set by cpp_relocated at runtime */ + const char *gcc_exec_prefix; +diff --git a/gcc/cppdefault.h b/gcc/cppdefault.h +index a681264f75e..5e10a2fa140 100644 +--- a/gcc/cppdefault.h ++++ b/gcc/cppdefault.h +@@ -33,7 +33,8 @@ + + struct default_include + { +- const char *const fname; /* The name of the directory. */ ++ const char *fname; /* The name of the directory. */ ++ + const char *const component; /* The component containing the directory + (see update_path in prefix.c) */ + const char cplusplus; /* Only look here if we're compiling C++. */ +@@ -50,17 +51,13 @@ struct default_include + }; + + extern const struct default_include cpp_include_defaults[]; +-extern const char cpp_GCC_INCLUDE_DIR[]; +-extern const size_t cpp_GCC_INCLUDE_DIR_len; ++extern char GCC_INCLUDE_DIRVAR[] __attribute__ ((section (".gccrelocprefix"))); + + /* The configure-time prefix, i.e., the value supplied as the argument + to --prefix=. */ +-extern const char cpp_PREFIX[]; ++extern char PREFIXVAR[] __attribute__ ((section (".gccrelocprefix"))); + /* The length of the configure-time prefix. */ +-extern const size_t cpp_PREFIX_len; +-/* The configure-time execution prefix. This is typically the lib/gcc +- subdirectory of cpp_PREFIX. */ +-extern const char cpp_EXEC_PREFIX[]; ++extern char EXEC_PREFIXVAR[] __attribute__ ((section (".gccrelocprefix"))); + /* The run-time execution prefix. This is typically the lib/gcc + subdirectory of the actual installation. */ + extern const char *gcc_exec_prefix; +diff --git a/gcc/gcc.c b/gcc/gcc.c +index c87f603955f..535d5c3bb65 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -252,6 +252,8 @@ FILE *report_times_to_file = NULL; + #endif + static const char *target_system_root = DEFAULT_TARGET_SYSTEM_ROOT; + ++static char target_relocatable_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = SYSTEMLIBS_DIR; ++ + /* Nonzero means pass the updated target_system_root to the compiler. */ + + static int target_system_root_changed; +@@ -526,6 +528,7 @@ or with constant text in a single argument. + %G process LIBGCC_SPEC as a spec. + %R Output the concatenation of target_system_root and + target_sysroot_suffix. ++ %r Output the base path target_relocatable_prefix + %S process STARTFILE_SPEC as a spec. A capital S is actually used here. + %E process ENDFILE_SPEC as a spec. A capital E is actually used here. + %C process CPP_SPEC as a spec. +@@ -1499,10 +1502,10 @@ static const char *gcc_libexec_prefix; + gcc_exec_prefix is set because, in that case, we know where the + compiler has been installed, and use paths relative to that + location instead. */ +-static const char *const standard_exec_prefix = STANDARD_EXEC_PREFIX; +-static const char *const standard_libexec_prefix = STANDARD_LIBEXEC_PREFIX; +-static const char *const standard_bindir_prefix = STANDARD_BINDIR_PREFIX; +-static const char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; ++static char standard_exec_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_EXEC_PREFIX; ++static char standard_libexec_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_LIBEXEC_PREFIX; ++static char standard_bindir_prefix[4096] __attribute__ ((section (".gccrelocprefix"))) = STANDARD_BINDIR_PREFIX; ++static char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; + + /* For native compilers, these are well-known paths containing + components that may be provided by the system. For cross +@@ -1510,9 +1513,9 @@ static const char *const standard_startfile_prefix = STANDARD_STARTFILE_PREFIX; + static const char *md_exec_prefix = MD_EXEC_PREFIX; + static const char *md_startfile_prefix = MD_STARTFILE_PREFIX; + static const char *md_startfile_prefix_1 = MD_STARTFILE_PREFIX_1; +-static const char *const standard_startfile_prefix_1 ++static char standard_startfile_prefix_1[4096] __attribute__ ((section (".gccrelocprefix"))) + = STANDARD_STARTFILE_PREFIX_1; +-static const char *const standard_startfile_prefix_2 ++static char standard_startfile_prefix_2[4096] __attribute__ ((section (".gccrelocprefix"))) + = STANDARD_STARTFILE_PREFIX_2; + + /* A relative path to be used in finding the location of tools +@@ -5952,6 +5955,11 @@ do_spec_1 (const char *spec, int inswitch, const char *soft_matched_part) + } + break; + ++ case 'r': ++ obstack_grow (&obstack, target_relocatable_prefix, ++ strlen (target_relocatable_prefix)); ++ break; ++ + case 'S': + value = do_spec_1 (startfile_spec, 0, NULL); + if (value != 0) +diff --git a/gcc/incpath.c b/gcc/incpath.c +index 9098ab044ab..bfad4ebe382 100644 +--- a/gcc/incpath.c ++++ b/gcc/incpath.c +@@ -131,7 +131,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, + int relocated = cpp_relocated (); + size_t len; + +- if (iprefix && (len = cpp_GCC_INCLUDE_DIR_len) != 0) ++ if (iprefix && (len = strlen(GCC_INCLUDE_DIRVAR) - 7) != 0) + { + /* Look for directories that start with the standard prefix. + "Translate" them, i.e. replace /usr/local/lib/gcc... with +@@ -145,7 +145,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, + now. */ + if (sysroot && p->add_sysroot) + continue; +- if (!filename_ncmp (p->fname, cpp_GCC_INCLUDE_DIR, len)) ++ if (!filename_ncmp (p->fname, GCC_INCLUDE_DIRVAR, len)) + { + char *str = concat (iprefix, p->fname + len, NULL); + if (p->multilib == 1 && imultilib) +@@ -185,7 +185,7 @@ add_standard_paths (const char *sysroot, const char *iprefix, + free (sysroot_no_trailing_dir_separator); + } + else if (!p->add_sysroot && relocated +- && !filename_ncmp (p->fname, cpp_PREFIX, cpp_PREFIX_len)) ++ && !filename_ncmp (p->fname, PREFIXVAR, strlen(PREFIXVAR))) + { + static const char *relocated_prefix; + char *ostr; +@@ -202,12 +202,12 @@ add_standard_paths (const char *sysroot, const char *iprefix, + dummy = concat (gcc_exec_prefix, "dummy", NULL); + relocated_prefix + = make_relative_prefix (dummy, +- cpp_EXEC_PREFIX, +- cpp_PREFIX); ++ EXEC_PREFIXVAR, ++ PREFIXVAR); + free (dummy); + } + ostr = concat (relocated_prefix, +- p->fname + cpp_PREFIX_len, ++ p->fname + strlen(PREFIXVAR), + NULL); + str = update_path (ostr, p->component); + free (ostr); +diff --git a/gcc/prefix.c b/gcc/prefix.c +index 1a403e535bd..3257ed3cd3e 100644 +--- a/gcc/prefix.c ++++ b/gcc/prefix.c +@@ -72,7 +72,9 @@ License along with GCC; see the file COPYING3. If not see + #include "prefix.h" + #include "common/common-target.h" + +-static const char *std_prefix = PREFIX; ++static const char PREFIXVAR[4096] __attribute__ ((section (".gccrelocprefix"))) = PREFIX; ++ ++static const char *std_prefix = PREFIXVAR; + + static const char *get_key_value (char *); + static char *translate_name (char *); +@@ -212,7 +214,7 @@ translate_name (char *name) + prefix = getenv (key); + + if (prefix == 0) +- prefix = PREFIX; ++ prefix = PREFIXVAR; + + /* We used to strip trailing DIR_SEPARATORs here, but that can + sometimes yield a result with no separator when one was coded diff --git a/poky/meta/recipes-devtools/gcc/gcc/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch b/poky/meta/recipes-devtools/gcc/gcc/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch new file mode 100644 index 000000000..abf1f8491 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0025-Search-target-sysroot-gcc-version-specific-dirs-with.patch @@ -0,0 +1,99 @@ +From 9c0c73ee48dbee2aad57f4dcdad1b7b74e77b944 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 7 Dec 2015 23:41:45 +0000 +Subject: [PATCH] Search target sysroot gcc version specific dirs with + multilib. + +We install the gcc libraries (such as crtbegin.p) into +//5.2.0/ +which is a default search path for GCC (aka multi_suffix in the +code below). is 'machine' in gcc's terminology. We use +these directories so that multiple gcc versions could in theory +co-exist on target. + +We only want to build one gcc-cross-canadian per arch and have this work +for all multilibs. can be handled by mapping the multilib + to the one used by gcc-cross-canadian, e.g. +mips64-polkmllib32-linux +is symlinked to by mips64-poky-linux. + +The default gcc search path in the target sysroot for a "lib64" mutlilib +is: + +/lib32/mips64-poky-linux/5.2.0/ +/lib32/../lib64/ +/usr/lib32/mips64-poky-linux/5.2.0/ +/usr/lib32/../lib64/ +/lib32/ +/usr/lib32/ + +which means that the lib32 crtbegin.o will be found and the lib64 ones +will not which leads to compiler failures. + +This patch injects a multilib version of that path first so the lib64 +binaries can be found first. With this change the search path becomes: + +/lib32/../lib64/mips64-poky-linux/5.2.0/ +/lib32/mips64-poky-linux/5.2.0/ +/lib32/../lib64/ +/usr/lib32/../lib64/mips64-poky-linux/5.2.0/ +/usr/lib32/mips64-poky-linux/5.2.0/ +/usr/lib32/../lib64/ +/lib32/ +/usr/lib32/ + +Upstream-Status: Pending +RP 2015/7/31 + +Signed-off-by: Khem Raj +--- + gcc/gcc.c | 29 ++++++++++++++++++++++++++++- + 1 file changed, 28 insertions(+), 1 deletion(-) + +diff --git a/gcc/gcc.c b/gcc/gcc.c +index 535d5c3bb65..04647ae812d 100644 +--- a/gcc/gcc.c ++++ b/gcc/gcc.c +@@ -2616,7 +2616,7 @@ for_each_path (const struct path_prefix *paths, + if (path == NULL) + { + len = paths->max_len + extra_space + 1; +- len += MAX (MAX (suffix_len, multi_os_dir_len), multiarch_len); ++ len += MAX ((suffix_len + multi_os_dir_len), multiarch_len); + path = XNEWVEC (char, len); + } + +@@ -2628,6 +2628,33 @@ for_each_path (const struct path_prefix *paths, + /* Look first in MACHINE/VERSION subdirectory. */ + if (!skip_multi_dir) + { ++ if (!(pl->os_multilib ? skip_multi_os_dir : skip_multi_dir)) ++ { ++ const char *this_multi; ++ size_t this_multi_len; ++ ++ if (pl->os_multilib) ++ { ++ this_multi = multi_os_dir; ++ this_multi_len = multi_os_dir_len; ++ } ++ else ++ { ++ this_multi = multi_dir; ++ this_multi_len = multi_dir_len; ++ } ++ ++ /* Look in multilib MACHINE/VERSION subdirectory first */ ++ if (this_multi_len) ++ { ++ memcpy (path + len, this_multi, this_multi_len + 1); ++ memcpy (path + len + this_multi_len, multi_suffix, suffix_len + 1); ++ ret = callback (path, callback_info); ++ if (ret) ++ break; ++ } ++ } ++ + memcpy (path + len, multi_suffix, suffix_len + 1); + ret = callback (path, callback_info); + if (ret) diff --git a/poky/meta/recipes-devtools/gcc/gcc/0026-Fix-various-_FOR_BUILD-and-related-variables.patch b/poky/meta/recipes-devtools/gcc/gcc/0026-Fix-various-_FOR_BUILD-and-related-variables.patch new file mode 100644 index 000000000..97bf2f3a7 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0026-Fix-various-_FOR_BUILD-and-related-variables.patch @@ -0,0 +1,134 @@ +From 3a003af8804dda90fdf4862eca5f66cb12faaf02 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 7 Dec 2015 23:42:45 +0000 +Subject: [PATCH] Fix various _FOR_BUILD and related variables + +When doing a FOR_BUILD thing, you have to override CFLAGS with +CFLAGS_FOR_BUILD. And if you use C++, you also have to override +CXXFLAGS with CXXFLAGS_FOR_BUILD. +Without this, when building for mingw, you end up trying to use +the mingw headers for a host build. + +The same goes for other variables as well, such as CPPFLAGS, +CPP, and GMPINC. + +Upstream-Status: Pending + +Signed-off-by: Peter Seebach +Signed-off-by: Mark Hatle +Signed-off-by: Khem Raj +--- + Makefile.in | 6 ++++++ + Makefile.tpl | 5 +++++ + gcc/Makefile.in | 2 +- + gcc/configure | 2 +- + gcc/configure.ac | 2 +- + 5 files changed, 14 insertions(+), 3 deletions(-) + +diff --git a/Makefile.in b/Makefile.in +index c717903bb13..5abc649868d 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -152,6 +152,7 @@ BUILD_EXPORTS = \ + CPP="$(CC_FOR_BUILD) -E"; export CPP; \ + CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ + CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ ++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \ + CXX="$(CXX_FOR_BUILD)"; export CXX; \ + CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \ + GFORTRAN="$(GFORTRAN_FOR_BUILD)"; export GFORTRAN; \ +@@ -171,6 +172,9 @@ BUILD_EXPORTS = \ + # built for the build system to override those in BASE_FLAGS_TO_PASS. + EXTRA_BUILD_FLAGS = \ + CFLAGS="$(CFLAGS_FOR_BUILD)" \ ++ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \ ++ CPP="$(CC_FOR_BUILD) -E" \ ++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \ + LDFLAGS="$(LDFLAGS_FOR_BUILD)" + + # This is the list of directories to built for the host system. +@@ -188,6 +192,7 @@ HOST_SUBDIR = @host_subdir@ + HOST_EXPORTS = \ + $(BASE_EXPORTS) \ + CC="$(CC)"; export CC; \ ++ CPP="$(CC) -E"; export CPP; \ + ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \ + CFLAGS="$(CFLAGS)"; export CFLAGS; \ + CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ +@@ -776,6 +781,7 @@ BASE_FLAGS_TO_PASS = \ + "CC_FOR_BUILD=$(CC_FOR_BUILD)" \ + "CFLAGS_FOR_BUILD=$(CFLAGS_FOR_BUILD)" \ + "CXX_FOR_BUILD=$(CXX_FOR_BUILD)" \ ++ "CXXFLAGS_FOR_BUILD=$(CXXFLAGS_FOR_BUILD)" \ + "EXPECT=$(EXPECT)" \ + "FLEX=$(FLEX)" \ + "INSTALL=$(INSTALL)" \ +diff --git a/Makefile.tpl b/Makefile.tpl +index efed1511750..778beb705b4 100644 +--- a/Makefile.tpl ++++ b/Makefile.tpl +@@ -154,6 +154,7 @@ BUILD_EXPORTS = \ + CC="$(CC_FOR_BUILD)"; export CC; \ + CFLAGS="$(CFLAGS_FOR_BUILD)"; export CFLAGS; \ + CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ ++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)"; export CPPFLAGS; \ + CXX="$(CXX_FOR_BUILD)"; export CXX; \ + CXXFLAGS="$(CXXFLAGS_FOR_BUILD)"; export CXXFLAGS; \ + GFORTRAN="$(GFORTRAN_FOR_BUILD)"; export GFORTRAN; \ +@@ -173,6 +174,9 @@ BUILD_EXPORTS = \ + # built for the build system to override those in BASE_FLAGS_TO_PASS. + EXTRA_BUILD_FLAGS = \ + CFLAGS="$(CFLAGS_FOR_BUILD)" \ ++ CXXFLAGS="$(CXXFLAGS_FOR_BUILD)" \ ++ CPP="$(CC_FOR_BUILD) -E" \ ++ CPPFLAGS="$(CPPFLAGS_FOR_BUILD)" \ + LDFLAGS="$(LDFLAGS_FOR_BUILD)" + + # This is the list of directories to built for the host system. +@@ -190,6 +194,7 @@ HOST_SUBDIR = @host_subdir@ + HOST_EXPORTS = \ + $(BASE_EXPORTS) \ + CC="$(CC)"; export CC; \ ++ CPP="$(CC) -E"; export CPP; \ + ADA_CFLAGS="$(ADA_CFLAGS)"; export ADA_CFLAGS; \ + CFLAGS="$(CFLAGS)"; export CFLAGS; \ + CONFIG_SHELL="$(SHELL)"; export CONFIG_SHELL; \ +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 011c7ac2db6..2f1165f7b5e 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -805,7 +805,7 @@ BUILD_LDFLAGS=@BUILD_LDFLAGS@ + BUILD_NO_PIE_FLAG = @BUILD_NO_PIE_FLAG@ + BUILD_LDFLAGS += $(BUILD_NO_PIE_FLAG) + BUILD_CPPFLAGS= -I. -I$(@D) -I$(srcdir) -I$(srcdir)/$(@D) \ +- -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS) ++ -I$(srcdir)/../include @INCINTL@ $(CPPINC) $(CPPFLAGS_FOR_BUILD) + + # Actual name to use when installing a native compiler. + GCC_INSTALL_NAME := $(shell echo gcc|sed '$(program_transform_name)') +diff --git a/gcc/configure b/gcc/configure +index 825a9652329..ff46cf58960 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -12314,7 +12314,7 @@ else + CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ + CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \ + LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \ +- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \ ++ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \ + ${realsrcdir}/configure \ + --enable-languages=${enable_languages-all} \ + ${enable_obsolete+--enable-obsolete="$enable_obsolete"} \ +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 6099eb3251f..b3c345b61dc 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -1898,7 +1898,7 @@ else + CC="${CC_FOR_BUILD}" CFLAGS="${CFLAGS_FOR_BUILD}" \ + CXX="${CXX_FOR_BUILD}" CXXFLAGS="${CXXFLAGS_FOR_BUILD}" \ + LD="${LD_FOR_BUILD}" LDFLAGS="${LDFLAGS_FOR_BUILD}" \ +- GMPINC="" CPPFLAGS="${CPPFLAGS} -DGENERATOR_FILE" \ ++ GMPINC="" CPPFLAGS="${CPPFLAGS_FOR_BUILD} -DGENERATOR_FILE" \ + ${realsrcdir}/configure \ + --enable-languages=${enable_languages-all} \ + ${enable_obsolete+--enable-obsolete="$enable_obsolete"} \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch b/poky/meta/recipes-devtools/gcc/gcc/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch new file mode 100644 index 000000000..3cd75b718 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0027-nios2-Define-MUSL_DYNAMIC_LINKER.patch @@ -0,0 +1,25 @@ +From 4e53d0ae70af85af0e112a48a3e4dfe4c39f4a8d Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 2 Feb 2016 10:26:10 -0800 +Subject: [PATCH] nios2: Define MUSL_DYNAMIC_LINKER + +Upstream-Status: Pending + +Signed-off-by: Marek Vasut +Signed-off-by: Khem Raj +--- + gcc/config/nios2/linux.h | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/gcc/config/nios2/linux.h b/gcc/config/nios2/linux.h +index 4bdcdcca1f0..e7943a9d640 100644 +--- a/gcc/config/nios2/linux.h ++++ b/gcc/config/nios2/linux.h +@@ -30,6 +30,7 @@ + #define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" + + #define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-nios2.so.1" ++#define MUSL_DYNAMIC_LINKER "/lib/ld-musl-nios2.so.1" + + #undef LINK_SPEC + #define LINK_SPEC LINK_SPEC_ENDIAN \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch b/poky/meta/recipes-devtools/gcc/gcc/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch new file mode 100644 index 000000000..2a6769a82 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0028-Add-ssp_nonshared-to-link-commandline-for-musl-targe.patch @@ -0,0 +1,84 @@ +From 5db0404eb770ac477fd99d444226bcf021067584 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 27 Jun 2017 18:10:54 -0700 +Subject: [PATCH] Add ssp_nonshared to link commandline for musl targets + +when -fstack-protector options are enabled we need to +link with ssp_shared on musl since it does not provide +the __stack_chk_fail_local() so essentially it provides +libssp but not libssp_nonshared something like +TARGET_LIBC_PROVIDES_SSP_BUT_NOT_SSP_NONSHARED + where-as for glibc the needed symbols +are already present in libc_nonshared library therefore +we do not need any library helper on glibc based systems +but musl needs the libssp_noshared from gcc + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + gcc/config/linux.h | 7 +++++++ + gcc/config/rs6000/linux.h | 10 ++++++++++ + gcc/config/rs6000/linux64.h | 10 ++++++++++ + 3 files changed, 27 insertions(+) + +diff --git a/gcc/config/linux.h b/gcc/config/linux.h +index 0c1a8118a26..bdc2a2d0659 100644 +--- a/gcc/config/linux.h ++++ b/gcc/config/linux.h +@@ -195,6 +195,13 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + { GCC_INCLUDE_DIR, "GCC", 0, 1, 0, 0 }, \ + { 0, 0, 0, 0, 0, 0 } \ + } ++#ifdef TARGET_LIBC_PROVIDES_SSP ++#undef LINK_SSP_SPEC ++#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ ++ "|fstack-protector-strong|fstack-protector-explicit" \ ++ ":-lssp_nonshared}" ++#endif ++ + #endif + + #if (DEFAULT_LIBC == LIBC_UCLIBC) && defined (SINGLE_LIBC) /* uClinux */ +diff --git a/gcc/config/rs6000/linux.h b/gcc/config/rs6000/linux.h +index b7026fcbee7..dd54d6c393e 100644 +--- a/gcc/config/rs6000/linux.h ++++ b/gcc/config/rs6000/linux.h +@@ -94,6 +94,16 @@ + " -m elf32ppclinux") + #endif + ++/* link libssp_nonshared.a with musl */ ++#if DEFAULT_LIBC == LIBC_MUSL ++#ifdef TARGET_LIBC_PROVIDES_SSP ++#undef LINK_SSP_SPEC ++#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ ++ "|fstack-protector-strong|fstack-protector-explicit" \ ++ ":-lssp_nonshared}" ++#endif ++#endif ++ + #undef LINK_OS_LINUX_SPEC + #define LINK_OS_LINUX_SPEC LINK_OS_LINUX_EMUL " %{!shared: %{!static: \ + %{!static-pie: \ +diff --git a/gcc/config/rs6000/linux64.h b/gcc/config/rs6000/linux64.h +index 967c1c43c63..dc5e4d97975 100644 +--- a/gcc/config/rs6000/linux64.h ++++ b/gcc/config/rs6000/linux64.h +@@ -452,6 +452,16 @@ extern int dot_symbols; + " -m elf64ppc") + #endif + ++/* link libssp_nonshared.a with musl */ ++#if DEFAULT_LIBC == LIBC_MUSL ++#ifdef TARGET_LIBC_PROVIDES_SSP ++#undef LINK_SSP_SPEC ++#define LINK_SSP_SPEC "%{fstack-protector|fstack-protector-all" \ ++ "|fstack-protector-strong|fstack-protector-explicit" \ ++ ":-lssp_nonshared}" ++#endif ++#endif ++ + #define LINK_OS_LINUX_SPEC32 LINK_OS_LINUX_EMUL32 " %{!shared: %{!static: \ + %{!static-pie: \ + %{rdynamic:-export-dynamic} \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch b/poky/meta/recipes-devtools/gcc/gcc/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch new file mode 100644 index 000000000..767cba038 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0029-Link-libgcc-using-LDFLAGS-not-just-SHLIB_LDFLAGS.patch @@ -0,0 +1,26 @@ +From fbc926dbf6a47fa623b9c94cd9b09a0e90448fdc Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 4 May 2016 21:11:34 -0700 +Subject: [PATCH] Link libgcc using LDFLAGS, not just SHLIB_LDFLAGS + +Upstream-Status: Pending + +Signed-off-by: Christopher Larson +Signed-off-by: Khem Raj +--- + libgcc/config/t-slibgcc | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/libgcc/config/t-slibgcc b/libgcc/config/t-slibgcc +index c997553447c..330352c2c81 100644 +--- a/libgcc/config/t-slibgcc ++++ b/libgcc/config/t-slibgcc +@@ -32,7 +32,7 @@ SHLIB_INSTALL_SOLINK = $(LN_S) $(SHLIB_SONAME) \ + $(DESTDIR)$(slibdir)$(SHLIB_SLIBDIR_QUAL)/$(SHLIB_SOLINK) + + SHLIB_LINK = $(CC) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \ +- $(SHLIB_LDFLAGS) \ ++ $(LDFLAGS) $(SHLIB_LDFLAGS) \ + -o $(SHLIB_DIR)/$(SHLIB_SONAME).tmp @multilib_flags@ \ + $(SHLIB_OBJS) $(SHLIB_LC) && \ + rm -f $(SHLIB_DIR)/$(SHLIB_SOLINK) && \ diff --git a/poky/meta/recipes-devtools/gcc/gcc/0030-sync-gcc-stddef.h-with-musl.patch b/poky/meta/recipes-devtools/gcc/gcc/0030-sync-gcc-stddef.h-with-musl.patch new file mode 100644 index 000000000..4f18907a1 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0030-sync-gcc-stddef.h-with-musl.patch @@ -0,0 +1,88 @@ +From 24dc04dc8d69e3bf61322615b3ef18e02ccd311e Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Fri, 3 Feb 2017 12:56:00 -0800 +Subject: [PATCH] sync gcc stddef.h with musl + +musl defines ptrdiff_t size_t and wchar_t +so dont define them here if musl is definining them + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + gcc/ginclude/stddef.h | 9 +++++++++ + 1 file changed, 9 insertions(+) + +diff --git a/gcc/ginclude/stddef.h b/gcc/ginclude/stddef.h +index 9d67eac4947..6cb5edbedb4 100644 +--- a/gcc/ginclude/stddef.h ++++ b/gcc/ginclude/stddef.h +@@ -128,6 +128,7 @@ _TYPE_wchar_t; + #ifndef ___int_ptrdiff_t_h + #ifndef _GCC_PTRDIFF_T + #ifndef _PTRDIFF_T_DECLARED /* DragonFly */ ++#ifndef __DEFINED_ptrdiff_t /* musl */ + #define _PTRDIFF_T + #define _T_PTRDIFF_ + #define _T_PTRDIFF +@@ -137,10 +138,12 @@ _TYPE_wchar_t; + #define ___int_ptrdiff_t_h + #define _GCC_PTRDIFF_T + #define _PTRDIFF_T_DECLARED ++#define __DEFINED_ptrdiff_t /* musl */ + #ifndef __PTRDIFF_TYPE__ + #define __PTRDIFF_TYPE__ long int + #endif + typedef __PTRDIFF_TYPE__ ptrdiff_t; ++#endif /* __DEFINED_ptrdiff_t */ + #endif /* _PTRDIFF_T_DECLARED */ + #endif /* _GCC_PTRDIFF_T */ + #endif /* ___int_ptrdiff_t_h */ +@@ -178,6 +181,7 @@ typedef __PTRDIFF_TYPE__ ptrdiff_t; + #ifndef _GCC_SIZE_T + #ifndef _SIZET_ + #ifndef __size_t ++#ifndef __DEFINED_size_t /* musl */ + #define __size_t__ /* BeOS */ + #define __SIZE_T__ /* Cray Unicos/Mk */ + #define _SIZE_T +@@ -194,6 +198,7 @@ typedef __PTRDIFF_TYPE__ ptrdiff_t; + #define ___int_size_t_h + #define _GCC_SIZE_T + #define _SIZET_ ++#define __DEFINED_size_t /* musl */ + #if defined (__FreeBSD__) \ + || defined(__DragonFly__) \ + || defined(__FreeBSD_kernel__) \ +@@ -228,6 +233,7 @@ typedef long ssize_t; + #endif /* _SIZE_T */ + #endif /* __SIZE_T__ */ + #endif /* __size_t__ */ ++#endif /* __DEFINED_size_t */ + #undef __need_size_t + #endif /* _STDDEF_H or __need_size_t. */ + +@@ -257,6 +263,7 @@ typedef long ssize_t; + #ifndef ___int_wchar_t_h + #ifndef __INT_WCHAR_T_H + #ifndef _GCC_WCHAR_T ++#ifndef __DEFINED_wchar_t /* musl */ + #define __wchar_t__ /* BeOS */ + #define __WCHAR_T__ /* Cray Unicos/Mk */ + #define _WCHAR_T +@@ -272,6 +279,7 @@ typedef long ssize_t; + #define __INT_WCHAR_T_H + #define _GCC_WCHAR_T + #define _WCHAR_T_DECLARED ++#define __DEFINED_wchar_t /* musl */ + + /* On BSD/386 1.1, at least, machine/ansi.h defines _BSD_WCHAR_T_ + instead of _WCHAR_T_, and _BSD_RUNE_T_ (which, unlike the other +@@ -337,6 +345,7 @@ typedef __WCHAR_TYPE__ wchar_t; + #endif + #endif /* __WCHAR_T__ */ + #endif /* __wchar_t__ */ ++#endif /* __DEFINED_wchar_t musl */ + #undef __need_wchar_t + #endif /* _STDDEF_H or __need_wchar_t. */ + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0031-fix-segmentation-fault-in-precompiled-header-generat.patch b/poky/meta/recipes-devtools/gcc/gcc/0031-fix-segmentation-fault-in-precompiled-header-generat.patch new file mode 100644 index 000000000..702279af0 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0031-fix-segmentation-fault-in-precompiled-header-generat.patch @@ -0,0 +1,57 @@ +From 27b8ba5555ada2dab076988529bfb84d00a4b901 Mon Sep 17 00:00:00 2001 +From: Juro Bystricky +Date: Mon, 19 Mar 2018 22:31:20 -0700 +Subject: [PATCH] fix segmentation fault in precompiled header generation + +Prevent a segmentation fault which occurs when using incorrect +structure trying to access name of some named operators, such as +CPP_NOT, CPP_AND etc. "token->val.node.spelling" cannot be used in +those cases, as is may not be initialized at all. + +[YOCTO #11738] + +Upstream-Status: Pending + +Signed-off-by: Juro Bystricky +Signed-off-by: Khem Raj +--- + libcpp/lex.c | 26 +++++++++++++++++++++----- + 1 file changed, 21 insertions(+), 5 deletions(-) + +diff --git a/libcpp/lex.c b/libcpp/lex.c +index 56ac3a1dd73..73a951148b3 100644 +--- a/libcpp/lex.c ++++ b/libcpp/lex.c +@@ -3311,11 +3311,27 @@ cpp_spell_token (cpp_reader *pfile, const cpp_token *token, + spell_ident: + case SPELL_IDENT: + if (forstring) +- { +- memcpy (buffer, NODE_NAME (token->val.node.spelling), +- NODE_LEN (token->val.node.spelling)); +- buffer += NODE_LEN (token->val.node.spelling); +- } ++ { ++ if (token->type == CPP_NAME) ++ { ++ memcpy (buffer, NODE_NAME (token->val.node.spelling), ++ NODE_LEN (token->val.node.spelling)); ++ buffer += NODE_LEN (token->val.node.spelling); ++ break; ++ } ++ /* NAMED_OP, cannot use node.spelling */ ++ if (token->flags & NAMED_OP) ++ { ++ const char *str = cpp_named_operator2name (token->type); ++ if (str) ++ { ++ size_t len = strlen(str); ++ memcpy(buffer, str, len); ++ buffer += len; ++ } ++ break; ++ } ++ } + else + buffer = _cpp_spell_ident_ucns (buffer, token->val.node.node); + break; diff --git a/poky/meta/recipes-devtools/gcc/gcc/0032-Fix-for-testsuite-failure.patch b/poky/meta/recipes-devtools/gcc/gcc/0032-Fix-for-testsuite-failure.patch new file mode 100644 index 000000000..0a0767b44 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0032-Fix-for-testsuite-failure.patch @@ -0,0 +1,255 @@ +From 2512aacf023c679d86d8f40caff4f6ff412b32ff Mon Sep 17 00:00:00 2001 +From: RAGHUNATH LOLUR +Date: Wed, 6 Dec 2017 22:52:26 -0800 +Subject: [PATCH] Fix for testsuite failure + +2017-11-16 Raghunath Lolur + + * gcc.dg/pr56275.c: If SSE is disabled, ensure that + "-mfpmath" is not set to use SSE. Set "-mfpmath=387". + * gcc.dg/pr68306.c: Likewise + * gcc.dg/pr68306-2.c: Likewise + * gcc.dg/pr68306-3.c: Likewise + * gcc.dg/pr69634.c: Likewise + * gcc.target/i386/amd64-abi-1.c: Likewise + * gcc.target/i386/funcspec-6.c: Likewise + * gcc.target/i386/interrupt-387-err-1.c: Likewise + * gcc.target/i386/isa-14.c: Likewise + * gcc.target/i386/pr44948-2b.c: Likewise + * gcc.target/i386/pr53425-1.c: Likewise + * gcc.target/i386/pr53425-2.c: Likewise + * gcc.target/i386/pr55247.c: Likewise + * gcc.target/i386/pr59644.c: Likewise + * gcc.target/i386/pr62120.c: Likewise + * gcc.target/i386/pr70467-1.c: Likewise + * gcc.target/i386/warn-vect-op-1.c: Likewise + +If -Wall, -Werror are used during compilation various test cases fail +to compile. + +If SSE is disabled, be sure to -mfpmath=387 to resolve this. + +This patch removes the changes to Changelog from the original patch. +This will help us avoid conflicts. + +Upstream-Status: Pending + +Signed-off-by: Mark Hatle +--- + gcc/testsuite/gcc.dg/pr56275.c | 2 +- + gcc/testsuite/gcc.dg/pr68306-2.c | 2 +- + gcc/testsuite/gcc.dg/pr68306-3.c | 2 +- + gcc/testsuite/gcc.dg/pr68306.c | 2 +- + gcc/testsuite/gcc.dg/pr69634.c | 2 +- + gcc/testsuite/gcc.target/i386/amd64-abi-1.c | 2 +- + gcc/testsuite/gcc.target/i386/funcspec-6.c | 1 + + gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c | 2 +- + gcc/testsuite/gcc.target/i386/isa-14.c | 2 +- + gcc/testsuite/gcc.target/i386/pr44948-2b.c | 2 +- + gcc/testsuite/gcc.target/i386/pr53425-1.c | 2 +- + gcc/testsuite/gcc.target/i386/pr53425-2.c | 2 +- + gcc/testsuite/gcc.target/i386/pr55247.c | 2 +- + gcc/testsuite/gcc.target/i386/pr59644.c | 2 +- + gcc/testsuite/gcc.target/i386/pr62120.c | 2 +- + gcc/testsuite/gcc.target/i386/pr70467-1.c | 2 +- + gcc/testsuite/gcc.target/i386/warn-vect-op-1.c | 2 +- + 17 files changed, 17 insertions(+), 16 deletions(-) + +diff --git a/gcc/testsuite/gcc.dg/pr56275.c b/gcc/testsuite/gcc.dg/pr56275.c +index b901bb2b199..a4f6c95e1a1 100644 +--- a/gcc/testsuite/gcc.dg/pr56275.c ++++ b/gcc/testsuite/gcc.dg/pr56275.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O2" } */ +-/* { dg-additional-options "-mno-sse" { target { i?86-*-* x86_64-*-* } } } */ ++/* { dg-additional-options "-mno-sse -mfpmath=387" { target { i?86-*-* x86_64-*-* } } } */ + + typedef long long v2tw __attribute__ ((vector_size (2 * sizeof (long long)))); + +diff --git a/gcc/testsuite/gcc.dg/pr68306-2.c b/gcc/testsuite/gcc.dg/pr68306-2.c +index 4672ebe7987..2a368c484b6 100644 +--- a/gcc/testsuite/gcc.dg/pr68306-2.c ++++ b/gcc/testsuite/gcc.dg/pr68306-2.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O3" } */ +-/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ ++/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ + + struct { + int tz_minuteswest; +diff --git a/gcc/testsuite/gcc.dg/pr68306-3.c b/gcc/testsuite/gcc.dg/pr68306-3.c +index f5a8c102cf8..df3390c64c2 100644 +--- a/gcc/testsuite/gcc.dg/pr68306-3.c ++++ b/gcc/testsuite/gcc.dg/pr68306-3.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O3" } */ +-/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ ++/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ + /* { dg-additional-options "-mno-altivec -mno-vsx" { target powerpc*-*-* } } */ + + extern void fn2(); +diff --git a/gcc/testsuite/gcc.dg/pr68306.c b/gcc/testsuite/gcc.dg/pr68306.c +index 54e5b40f221..0813389e2c1 100644 +--- a/gcc/testsuite/gcc.dg/pr68306.c ++++ b/gcc/testsuite/gcc.dg/pr68306.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O3" } */ +-/* { dg-additional-options "-mno-sse -mno-mmx" { target i?86-*-* x86_64-*-* } } */ ++/* { dg-additional-options "-mno-sse -mno-mmx -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ + + enum powerpc_pmc_type { PPC_PMC_IBM }; + struct { +diff --git a/gcc/testsuite/gcc.dg/pr69634.c b/gcc/testsuite/gcc.dg/pr69634.c +index 60a56149463..bcc23f9ccd6 100644 +--- a/gcc/testsuite/gcc.dg/pr69634.c ++++ b/gcc/testsuite/gcc.dg/pr69634.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O2 -fno-dce -fschedule-insns -fno-tree-vrp -fcompare-debug -Wno-psabi" } */ +-/* { dg-additional-options "-mno-sse" { target i?86-*-* x86_64-*-* } } */ ++/* { dg-additional-options "-mno-sse -mfpmath=387" { target i?86-*-* x86_64-*-* } } */ + /* { dg-require-effective-target scheduling } */ + + typedef unsigned short u16; +diff --git a/gcc/testsuite/gcc.target/i386/amd64-abi-1.c b/gcc/testsuite/gcc.target/i386/amd64-abi-1.c +index 69fde57bf06..7f1f1c03edf 100644 +--- a/gcc/testsuite/gcc.target/i386/amd64-abi-1.c ++++ b/gcc/testsuite/gcc.target/i386/amd64-abi-1.c +@@ -1,5 +1,5 @@ + /* { dg-do compile { target { ! ia32 } } } */ +-/* { dg-options "-mno-sse" } */ ++/* { dg-options "-mno-sse -mfpmath=387" } */ + /* { dg-additional-options "-mabi=sysv" { target *-*-mingw* } } */ + + double foo(void) { return 0; } /* { dg-error "SSE disabled" } */ +diff --git a/gcc/testsuite/gcc.target/i386/funcspec-6.c b/gcc/testsuite/gcc.target/i386/funcspec-6.c +index ea896b7ebfd..bf15569b826 100644 +--- a/gcc/testsuite/gcc.target/i386/funcspec-6.c ++++ b/gcc/testsuite/gcc.target/i386/funcspec-6.c +@@ -1,6 +1,7 @@ + /* Test whether all of the 64-bit function specific options are accepted + without error. */ + /* { dg-do compile { target { ! ia32 } } } */ ++/* { dg-additional-options "-mfpmath=387" } */ + + #include "funcspec-56.inc" + +diff --git a/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c b/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c +index 8561a3c26d6..6377f814645 100644 +--- a/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c ++++ b/gcc/testsuite/gcc.target/i386/interrupt-387-err-1.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-O2 -mgeneral-regs-only -mno-cld -mno-iamcu -m80387" } */ ++/* { dg-options "-O2 -mgeneral-regs-only -mno-cld -mno-iamcu -m80387 -mfpmath=387" } */ + + typedef unsigned int uword_t __attribute__ ((mode (__word__))); + +diff --git a/gcc/testsuite/gcc.target/i386/isa-14.c b/gcc/testsuite/gcc.target/i386/isa-14.c +index 5d49e6e77fe..1de2db92bdd 100644 +--- a/gcc/testsuite/gcc.target/i386/isa-14.c ++++ b/gcc/testsuite/gcc.target/i386/isa-14.c +@@ -1,5 +1,5 @@ + /* { dg-do run } */ +-/* { dg-options "-march=x86-64 -msse4a -mfma4 -mno-sse" } */ ++/* { dg-options "-march=x86-64 -msse4a -mfma4 -mno-sse -mfpmath=387" } */ + + extern void abort (void); + +diff --git a/gcc/testsuite/gcc.target/i386/pr44948-2b.c b/gcc/testsuite/gcc.target/i386/pr44948-2b.c +index fa1769b62fb..f79fb12726f 100644 +--- a/gcc/testsuite/gcc.target/i386/pr44948-2b.c ++++ b/gcc/testsuite/gcc.target/i386/pr44948-2b.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-O -mno-sse -Wno-psabi -mtune=generic" } */ ++/* { dg-options "-O -mno-sse -Wno-psabi -mtune=generic -mfpmath=387" } */ + + struct A + { +diff --git a/gcc/testsuite/gcc.target/i386/pr53425-1.c b/gcc/testsuite/gcc.target/i386/pr53425-1.c +index 2e89ff7d81d..6339bf6b736 100644 +--- a/gcc/testsuite/gcc.target/i386/pr53425-1.c ++++ b/gcc/testsuite/gcc.target/i386/pr53425-1.c +@@ -1,6 +1,6 @@ + /* PR target/53425 */ + /* { dg-do compile { target { ! ia32 } } } */ +-/* { dg-options "-O2 -mno-sse" } */ ++/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ + /* { dg-skip-if "no SSE vector" { x86_64-*-mingw* } } */ + + typedef double __v2df __attribute__ ((__vector_size__ (16))); +diff --git a/gcc/testsuite/gcc.target/i386/pr53425-2.c b/gcc/testsuite/gcc.target/i386/pr53425-2.c +index 61f6283dbe9..2c5a55f0ac3 100644 +--- a/gcc/testsuite/gcc.target/i386/pr53425-2.c ++++ b/gcc/testsuite/gcc.target/i386/pr53425-2.c +@@ -1,6 +1,6 @@ + /* PR target/53425 */ + /* { dg-do compile { target { ! ia32 } } } */ +-/* { dg-options "-O2 -mno-sse" } */ ++/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ + /* { dg-skip-if "no SSE vector" { x86_64-*-mingw* } } */ + + typedef float __v2sf __attribute__ ((__vector_size__ (8))); +diff --git a/gcc/testsuite/gcc.target/i386/pr55247.c b/gcc/testsuite/gcc.target/i386/pr55247.c +index 23366d0909d..9810e3abb76 100644 +--- a/gcc/testsuite/gcc.target/i386/pr55247.c ++++ b/gcc/testsuite/gcc.target/i386/pr55247.c +@@ -1,6 +1,6 @@ + /* { dg-do compile { target { ! ia32 } } } */ + /* { dg-require-effective-target maybe_x32 } */ +-/* { dg-options "-O2 -mno-sse -mno-mmx -mx32 -maddress-mode=long" } */ ++/* { dg-options "-O2 -mno-sse -mno-mmx -mx32 -maddress-mode=long -mfpmath=387" } */ + + typedef unsigned int uint32_t; + typedef uint32_t Elf32_Word; +diff --git a/gcc/testsuite/gcc.target/i386/pr59644.c b/gcc/testsuite/gcc.target/i386/pr59644.c +index 96006b3e338..4287e4538bf 100644 +--- a/gcc/testsuite/gcc.target/i386/pr59644.c ++++ b/gcc/testsuite/gcc.target/i386/pr59644.c +@@ -1,6 +1,6 @@ + /* PR target/59644 */ + /* { dg-do run { target lp64 } } */ +-/* { dg-options "-O2 -ffreestanding -mno-sse -mpreferred-stack-boundary=3 -maccumulate-outgoing-args -mno-red-zone" } */ ++/* { dg-options "-O2 -ffreestanding -mno-sse -mpreferred-stack-boundary=3 -maccumulate-outgoing-args -mno-red-zone -mfpmath=387" } */ + + /* This test uses __builtin_trap () instead of e.g. abort, + because due to -mpreferred-stack-boundary=3 it should not call +diff --git a/gcc/testsuite/gcc.target/i386/pr62120.c b/gcc/testsuite/gcc.target/i386/pr62120.c +index 28d85d37712..c93266bd4bc 100644 +--- a/gcc/testsuite/gcc.target/i386/pr62120.c ++++ b/gcc/testsuite/gcc.target/i386/pr62120.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-mno-sse" } */ ++/* { dg-options "-mno-sse -mfpmath=387" } */ + + void foo () + { +diff --git a/gcc/testsuite/gcc.target/i386/pr70467-1.c b/gcc/testsuite/gcc.target/i386/pr70467-1.c +index 4e112c88d07..bcfb396a68d 100644 +--- a/gcc/testsuite/gcc.target/i386/pr70467-1.c ++++ b/gcc/testsuite/gcc.target/i386/pr70467-1.c +@@ -1,6 +1,6 @@ + /* PR rtl-optimization/70467 */ + /* { dg-do compile } */ +-/* { dg-options "-O2 -mno-sse" } */ ++/* { dg-options "-O2 -mno-sse -mfpmath=387" } */ + + void foo (unsigned long long *); + +diff --git a/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c b/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c +index 6cda1534311..26e37f5b8ba 100644 +--- a/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c ++++ b/gcc/testsuite/gcc.target/i386/warn-vect-op-1.c +@@ -1,5 +1,5 @@ + /* { dg-do compile { target { ! ia32 } } } */ +-/* { dg-options "-mno-sse -Wvector-operation-performance" } */ ++/* { dg-options "-mno-sse -Wvector-operation-performance -mfpmath=387" } */ + #define vector(elcount, type) \ + __attribute__((vector_size((elcount)*sizeof(type)))) type + diff --git a/poky/meta/recipes-devtools/gcc/gcc/0033-Re-introduce-spe-commandline-options.patch b/poky/meta/recipes-devtools/gcc/gcc/0033-Re-introduce-spe-commandline-options.patch new file mode 100644 index 000000000..ba7c2b8fd --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0033-Re-introduce-spe-commandline-options.patch @@ -0,0 +1,38 @@ +From 3fc06241ce37e2e4b3ed21ace28d347eb511448d Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 6 Jun 2018 12:10:22 -0700 +Subject: [PATCH] Re-introduce spe commandline options + +This should ensure that we keep accepting +spe options + +Upstream-Status: Inappropriate [SPE port is removed from rs600 port] + +Signed-off-by: Khem Raj +--- + gcc/config/rs6000/rs6000.opt | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +diff --git a/gcc/config/rs6000/rs6000.opt b/gcc/config/rs6000/rs6000.opt +index f95b8279270..0e52d51409d 100644 +--- a/gcc/config/rs6000/rs6000.opt ++++ b/gcc/config/rs6000/rs6000.opt +@@ -344,6 +344,18 @@ mdebug= + Target RejectNegative Joined + -mdebug= Enable debug output. + ++mspe ++Target Var(rs6000_spe) Save ++Generate SPE SIMD instructions on E500. ++ ++mabi=spe ++Target RejectNegative Var(rs6000_spe_abi) Save ++Use the SPE ABI extensions. ++ ++mabi=no-spe ++Target RejectNegative Var(rs6000_spe_abi, 0) ++Do not use the SPE ABI extensions. ++ + mabi=altivec + Target RejectNegative Var(rs6000_altivec_abi) Save + Use the AltiVec ABI extensions. diff --git a/poky/meta/recipes-devtools/gcc/gcc/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch b/poky/meta/recipes-devtools/gcc/gcc/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch new file mode 100644 index 000000000..4ce9dc6de --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0034-libgcc_s-Use-alias-for-__cpu_indicator_init-instead-.patch @@ -0,0 +1,83 @@ +From b42ff59c3fe2967d37815c8db72a47b9b7f585b4 Mon Sep 17 00:00:00 2001 +From: Szabolcs Nagy +Date: Sat, 24 Oct 2015 20:09:53 +0000 +Subject: [PATCH] libgcc_s: Use alias for __cpu_indicator_init instead of + symver + +Adapter from + +https://gcc.gnu.org/ml/gcc-patches/2015-05/msg00899.html + +This fix was debated but hasnt been applied gcc upstream since +they expect musl to support '@' in symbol versioning which is +a sun/gnu versioning extention. This patch however avoids the +need for the '@' symbols at all + +libgcc/Changelog: + +2015-05-11 Szabolcs Nagy + + * config/i386/cpuinfo.c (__cpu_indicator_init_local): Add. + (__cpu_indicator_init@GCC_4.8.0, __cpu_model@GCC_4.8.0): Remove. + + * config/i386/t-linux (HOST_LIBGCC2_CFLAGS): Remove -DUSE_ELF_SYMVER. + +gcc/Changelog: + +2015-05-11 Szabolcs Nagy + + * config/i386/i386-expand.c (ix86_expand_builtin): Make __builtin_cpu_init + call __cpu_indicator_init_local instead of __cpu_indicator_init. + +Upstream-Status: Pending + +Signed-off-by: Khem Raj +--- + gcc/config/i386/i386-expand.c | 4 ++-- + libgcc/config/i386/cpuinfo.c | 6 +++--- + libgcc/config/i386/t-linux | 2 +- + 3 files changed, 6 insertions(+), 6 deletions(-) + +diff --git a/gcc/config/i386/i386-expand.c b/gcc/config/i386/i386-expand.c +index 48f00c5fcfc..468f5f71fac 100644 +--- a/gcc/config/i386/i386-expand.c ++++ b/gcc/config/i386/i386-expand.c +@@ -10941,10 +10941,10 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget, + { + case IX86_BUILTIN_CPU_INIT: + { +- /* Make it call __cpu_indicator_init in libgcc. */ ++ /* Make it call __cpu_indicator_init_local in libgcc.a. */ + tree call_expr, fndecl, type; + type = build_function_type_list (integer_type_node, NULL_TREE); +- fndecl = build_fn_decl ("__cpu_indicator_init", type); ++ fndecl = build_fn_decl ("__cpu_indicator_init_local", type); + call_expr = build_call_expr (fndecl, 0); + return expand_expr (call_expr, target, mode, EXPAND_NORMAL); + } +diff --git a/libgcc/config/i386/cpuinfo.c b/libgcc/config/i386/cpuinfo.c +index 00322c58622..f42bbb8af98 100644 +--- a/libgcc/config/i386/cpuinfo.c ++++ b/libgcc/config/i386/cpuinfo.c +@@ -508,7 +508,7 @@ __cpu_indicator_init (void) + return 0; + } + +-#if defined SHARED && defined USE_ELF_SYMVER +-__asm__ (".symver __cpu_indicator_init, __cpu_indicator_init@GCC_4.8.0"); +-__asm__ (".symver __cpu_model, __cpu_model@GCC_4.8.0"); ++#ifndef SHARED ++int __cpu_indicator_init_local (void) ++ __attribute__ ((weak, alias ("__cpu_indicator_init"))); + #endif +diff --git a/libgcc/config/i386/t-linux b/libgcc/config/i386/t-linux +index 8506a635790..564296f788e 100644 +--- a/libgcc/config/i386/t-linux ++++ b/libgcc/config/i386/t-linux +@@ -3,5 +3,5 @@ + # t-slibgcc-elf-ver and t-linux + SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/i386/libgcc-glibc.ver + +-HOST_LIBGCC2_CFLAGS += -mlong-double-80 -DUSE_ELF_SYMVER $(CET_FLAGS) ++HOST_LIBGCC2_CFLAGS += -mlong-double-80 $(CET_FLAGS) + CRTSTUFF_T_CFLAGS += $(CET_FLAGS) diff --git a/poky/meta/recipes-devtools/gcc/gcc/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch b/poky/meta/recipes-devtools/gcc/gcc/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch new file mode 100644 index 000000000..dd1bf6ded --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0035-gentypes-genmodes-Do-not-use-__LINE__-for-maintainin.patch @@ -0,0 +1,182 @@ +From 0395060a7dcf98c5f5a65103f6aaa71d6b862259 Mon Sep 17 00:00:00 2001 +From: Richard Purdie +Date: Tue, 10 Mar 2020 08:26:53 -0700 +Subject: [PATCH] gentypes/genmodes: Do not use __LINE__ for maintaining + reproducibility + +Inserting line numbers into generated code means its not always reproducible wth +differing versions of host gcc. Void the issue by not adding these. + +Upstream-Status: Inappropriate [OE Reproducibility specific] + +Signed-off-by: Richard Purdie +Signed-off-by: Khem Raj +--- + gcc/gengtype.c | 6 +++--- + gcc/genmodes.c | 32 ++++++++++++++++---------------- + 2 files changed, 19 insertions(+), 19 deletions(-) + +diff --git a/gcc/gengtype.c b/gcc/gengtype.c +index 981577481af..d5700fff401 100644 +--- a/gcc/gengtype.c ++++ b/gcc/gengtype.c +@@ -991,7 +991,7 @@ create_field_at (pair_p next, type_p type, const char *name, options_p opt, + /* Create a fake field with the given type and name. NEXT is the next + field in the chain. */ + #define create_field(next,type,name) \ +- create_field_all (next,type,name, 0, this_file, __LINE__) ++ create_field_all (next,type,name, 0, this_file, 0) + + /* Like create_field, but the field is only valid when condition COND + is true. */ +@@ -1024,7 +1024,7 @@ create_optional_field_ (pair_p next, type_p type, const char *name, + } + + #define create_optional_field(next,type,name,cond) \ +- create_optional_field_(next,type,name,cond,__LINE__) ++ create_optional_field_(next,type,name,cond,0) + + /* Reverse a linked list of 'struct pair's in place. */ + pair_p +@@ -5187,7 +5187,7 @@ main (int argc, char **argv) + /* These types are set up with #define or else outside of where + we can see them. We should initialize them before calling + read_input_list. */ +-#define POS_HERE(Call) do { pos.file = this_file; pos.line = __LINE__; \ ++#define POS_HERE(Call) do { pos.file = this_file; pos.line = 0; \ + Call;} while (0) + POS_HERE (do_scalar_typedef ("CUMULATIVE_ARGS", &pos)); + POS_HERE (do_scalar_typedef ("REAL_VALUE_TYPE", &pos)); +diff --git a/gcc/genmodes.c b/gcc/genmodes.c +index bd78310ea24..dbd02c51a4c 100644 +--- a/gcc/genmodes.c ++++ b/gcc/genmodes.c +@@ -430,7 +430,7 @@ complete_all_modes (void) + } + + /* For each mode in class CLASS, construct a corresponding complex mode. */ +-#define COMPLEX_MODES(C) make_complex_modes (MODE_##C, __FILE__, __LINE__) ++#define COMPLEX_MODES(C) make_complex_modes (MODE_##C, __FILE__, 0) + static void + make_complex_modes (enum mode_class cl, + const char *file, unsigned int line) +@@ -489,7 +489,7 @@ make_complex_modes (enum mode_class cl, + having as many components as necessary. ORDER is the sorting order + of the mode, with smaller numbers indicating a higher priority. */ + #define VECTOR_MODES_WITH_PREFIX(PREFIX, C, W, ORDER) \ +- make_vector_modes (MODE_##C, #PREFIX, W, ORDER, __FILE__, __LINE__) ++ make_vector_modes (MODE_##C, #PREFIX, W, ORDER, __FILE__, 0) + #define VECTOR_MODES(C, W) VECTOR_MODES_WITH_PREFIX (V, C, W, 0) + static void ATTRIBUTE_UNUSED + make_vector_modes (enum mode_class cl, const char *prefix, unsigned int width, +@@ -541,7 +541,7 @@ make_vector_modes (enum mode_class cl, const char *prefix, unsigned int width, + /* Create a vector of booleans called NAME with COUNT elements and + BYTESIZE bytes in total. */ + #define VECTOR_BOOL_MODE(NAME, COUNT, BYTESIZE) \ +- make_vector_bool_mode (#NAME, COUNT, BYTESIZE, __FILE__, __LINE__) ++ make_vector_bool_mode (#NAME, COUNT, BYTESIZE, __FILE__, 0) + static void ATTRIBUTE_UNUSED + make_vector_bool_mode (const char *name, unsigned int count, + unsigned int bytesize, const char *file, +@@ -563,7 +563,7 @@ make_vector_bool_mode (const char *name, unsigned int count, + /* Input. */ + + #define _SPECIAL_MODE(C, N) \ +- make_special_mode (MODE_##C, #N, __FILE__, __LINE__) ++ make_special_mode (MODE_##C, #N, __FILE__, 0) + #define RANDOM_MODE(N) _SPECIAL_MODE (RANDOM, N) + #define CC_MODE(N) _SPECIAL_MODE (CC, N) + +@@ -576,7 +576,7 @@ make_special_mode (enum mode_class cl, const char *name, + + #define INT_MODE(N, Y) FRACTIONAL_INT_MODE (N, -1U, Y) + #define FRACTIONAL_INT_MODE(N, B, Y) \ +- make_int_mode (#N, B, Y, __FILE__, __LINE__) ++ make_int_mode (#N, B, Y, __FILE__, 0) + + static void + make_int_mode (const char *name, +@@ -589,16 +589,16 @@ make_int_mode (const char *name, + } + + #define FRACT_MODE(N, Y, F) \ +- make_fixed_point_mode (MODE_FRACT, #N, Y, 0, F, __FILE__, __LINE__) ++ make_fixed_point_mode (MODE_FRACT, #N, Y, 0, F, __FILE__, 0) + + #define UFRACT_MODE(N, Y, F) \ +- make_fixed_point_mode (MODE_UFRACT, #N, Y, 0, F, __FILE__, __LINE__) ++ make_fixed_point_mode (MODE_UFRACT, #N, Y, 0, F, __FILE__, 0) + + #define ACCUM_MODE(N, Y, I, F) \ +- make_fixed_point_mode (MODE_ACCUM, #N, Y, I, F, __FILE__, __LINE__) ++ make_fixed_point_mode (MODE_ACCUM, #N, Y, I, F, __FILE__, 0) + + #define UACCUM_MODE(N, Y, I, F) \ +- make_fixed_point_mode (MODE_UACCUM, #N, Y, I, F, __FILE__, __LINE__) ++ make_fixed_point_mode (MODE_UACCUM, #N, Y, I, F, __FILE__, 0) + + /* Create a fixed-point mode by setting CL, NAME, BYTESIZE, IBIT, FBIT, + FILE, and LINE. */ +@@ -619,7 +619,7 @@ make_fixed_point_mode (enum mode_class cl, + + #define FLOAT_MODE(N, Y, F) FRACTIONAL_FLOAT_MODE (N, -1U, Y, F) + #define FRACTIONAL_FLOAT_MODE(N, B, Y, F) \ +- make_float_mode (#N, B, Y, #F, __FILE__, __LINE__) ++ make_float_mode (#N, B, Y, #F, __FILE__, 0) + + static void + make_float_mode (const char *name, +@@ -636,7 +636,7 @@ make_float_mode (const char *name, + #define DECIMAL_FLOAT_MODE(N, Y, F) \ + FRACTIONAL_DECIMAL_FLOAT_MODE (N, -1U, Y, F) + #define FRACTIONAL_DECIMAL_FLOAT_MODE(N, B, Y, F) \ +- make_decimal_float_mode (#N, B, Y, #F, __FILE__, __LINE__) ++ make_decimal_float_mode (#N, B, Y, #F, __FILE__, 0) + + static void + make_decimal_float_mode (const char *name, +@@ -651,7 +651,7 @@ make_decimal_float_mode (const char *name, + } + + #define RESET_FLOAT_FORMAT(N, F) \ +- reset_float_format (#N, #F, __FILE__, __LINE__) ++ reset_float_format (#N, #F, __FILE__, 0) + static void ATTRIBUTE_UNUSED + reset_float_format (const char *name, const char *format, + const char *file, unsigned int line) +@@ -672,7 +672,7 @@ reset_float_format (const char *name, const char *format, + + /* __intN support. */ + #define INT_N(M,PREC) \ +- make_int_n (#M, PREC, __FILE__, __LINE__) ++ make_int_n (#M, PREC, __FILE__, 0) + static void ATTRIBUTE_UNUSED + make_int_n (const char *m, int bitsize, + const char *file, unsigned int line) +@@ -701,7 +701,7 @@ make_int_n (const char *m, int bitsize, + /* Partial integer modes are specified by relation to a full integer + mode. */ + #define PARTIAL_INT_MODE(M,PREC,NAME) \ +- make_partial_integer_mode (#M, #NAME, PREC, __FILE__, __LINE__) ++ make_partial_integer_mode (#M, #NAME, PREC, __FILE__, 0) + static void ATTRIBUTE_UNUSED + make_partial_integer_mode (const char *base, const char *name, + unsigned int precision, +@@ -728,7 +728,7 @@ make_partial_integer_mode (const char *base, const char *name, + /* A single vector mode can be specified by naming its component + mode and the number of components. */ + #define VECTOR_MODE(C, M, N) \ +- make_vector_mode (MODE_##C, #M, N, __FILE__, __LINE__); ++ make_vector_mode (MODE_##C, #M, N, __FILE__, 0); + static void ATTRIBUTE_UNUSED + make_vector_mode (enum mode_class bclass, + const char *base, +@@ -771,7 +771,7 @@ make_vector_mode (enum mode_class bclass, + + /* Adjustability. */ + #define _ADD_ADJUST(A, M, X, C1, C2) \ +- new_adjust (#M, &adj_##A, #A, #X, MODE_##C1, MODE_##C2, __FILE__, __LINE__) ++ new_adjust (#M, &adj_##A, #A, #X, MODE_##C1, MODE_##C2, __FILE__, 0) + + #define ADJUST_NUNITS(M, X) _ADD_ADJUST (nunits, M, X, RANDOM, RANDOM) + #define ADJUST_BYTESIZE(M, X) _ADD_ADJUST (bytesize, M, X, RANDOM, RANDOM) diff --git a/poky/meta/recipes-devtools/gcc/gcc/0036-mingw32-Enable-operation_not_supported.patch b/poky/meta/recipes-devtools/gcc/gcc/0036-mingw32-Enable-operation_not_supported.patch new file mode 100644 index 000000000..de82a3a53 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0036-mingw32-Enable-operation_not_supported.patch @@ -0,0 +1,26 @@ +From 6f87a095f0e696bec07a50df789c9db8bdbca43d Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 12 May 2020 10:39:09 -0700 +Subject: [PATCH] mingw32: Enable operation_not_supported + +Fixes nativesdk build errors on mingw32 gcc-runtime + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + libstdc++-v3/config/os/mingw32/error_constants.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/libstdc++-v3/config/os/mingw32/error_constants.h b/libstdc++-v3/config/os/mingw32/error_constants.h +index 68ac72a78fb..71cd5815b81 100644 +--- a/libstdc++-v3/config/os/mingw32/error_constants.h ++++ b/libstdc++-v3/config/os/mingw32/error_constants.h +@@ -107,7 +107,7 @@ _GLIBCXX_BEGIN_NAMESPACE_VERSION + #ifdef _GLIBCXX_HAVE_EPERM + operation_not_permitted = EPERM, + #endif +-// operation_not_supported = EOPNOTSUPP, ++ operation_not_supported = EOPNOTSUPP, + #ifdef _GLIBCXX_HAVE_EWOULDBLOCK + operation_would_block = EWOULDBLOCK, + #endif diff --git a/poky/meta/recipes-devtools/gcc/gcc/0037-libatomic-Do-not-enforce-march-on-aarch64.patch b/poky/meta/recipes-devtools/gcc/gcc/0037-libatomic-Do-not-enforce-march-on-aarch64.patch new file mode 100644 index 000000000..3946acea1 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc/0037-libatomic-Do-not-enforce-march-on-aarch64.patch @@ -0,0 +1,42 @@ +From 38d262bfe7c0c894c364dc6e4dc7971e78a73974 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 13 May 2020 15:10:38 -0700 +Subject: [PATCH] libatomic: Do not enforce march on aarch64 + +OE passes the right options via gcc compiler cmdline via TUNE_CCARGS +this can conflict between -mcpu settings and -march setting here, since +-mcpu will translate into an appropriate -march, lets depend on that +instead of setting it explicitly + +Upstream-Status: Inappropriate [OE-Specific] + +Signed-off-by: Khem Raj +--- + libatomic/Makefile.am | 1 - + libatomic/Makefile.in | 1 - + 2 files changed, 2 deletions(-) + +diff --git a/libatomic/Makefile.am b/libatomic/Makefile.am +index 133fbbca77e..ac1ca645876 100644 +--- a/libatomic/Makefile.am ++++ b/libatomic/Makefile.am +@@ -125,7 +125,6 @@ libatomic_la_LIBADD = $(foreach s,$(SIZES),$(addsuffix _$(s)_.lo,$(SIZEOBJS))) + ## On a target-specific basis, include alternates to be selected by IFUNC. + if HAVE_IFUNC + if ARCH_AARCH64_LINUX +-IFUNC_OPTIONS = -march=armv8-a+lse + libatomic_la_LIBADD += $(foreach s,$(SIZES),$(addsuffix _$(s)_1_.lo,$(SIZEOBJS))) + endif + if ARCH_ARM_LINUX +diff --git a/libatomic/Makefile.in b/libatomic/Makefile.in +index a51807e95c9..97df2d7ff03 100644 +--- a/libatomic/Makefile.in ++++ b/libatomic/Makefile.in +@@ -431,7 +431,6 @@ M_SRC = $(firstword $(filter %/$(M_FILE), $(all_c_files))) + libatomic_la_LIBADD = $(foreach s,$(SIZES),$(addsuffix \ + _$(s)_.lo,$(SIZEOBJS))) $(am__append_1) $(am__append_2) \ + $(am__append_3) $(am__append_4) +-@ARCH_AARCH64_LINUX_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=armv8-a+lse + @ARCH_ARM_LINUX_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=armv7-a+fp -DHAVE_KERNEL64 + @ARCH_I386_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -march=i586 + @ARCH_X86_64_TRUE@@HAVE_IFUNC_TRUE@IFUNC_OPTIONS = -mcx16 diff --git a/poky/meta/recipes-devtools/gcc/gcc_10.1.bb b/poky/meta/recipes-devtools/gcc/gcc_10.1.bb deleted file mode 100644 index 7d9359058..000000000 --- a/poky/meta/recipes-devtools/gcc/gcc_10.1.bb +++ /dev/null @@ -1,14 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require gcc-target.inc - -# Building with thumb enabled on armv4t armv5t fails with -# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7438:(.text.unlikely+0x2fa): relocation truncated to fit: R_ARM_THM_CALL against symbol `fancy_abort(char const*, int, char const*)' defined in .glue_7 section in linker stubs -# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7442:(.text.unlikely+0x318): additional relocation overflows omitted from the output -ARM_INSTRUCTION_SET_armv4 = "arm" -ARM_INSTRUCTION_SET_armv5 = "arm" - -ARMFPARCHEXT_armv6 = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" -ARMFPARCHEXT_armv7a = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" -ARMFPARCHEXT_armv7ve = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" - -BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-devtools/gcc/gcc_10.2.bb b/poky/meta/recipes-devtools/gcc/gcc_10.2.bb new file mode 100644 index 000000000..7d9359058 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/gcc_10.2.bb @@ -0,0 +1,14 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require gcc-target.inc + +# Building with thumb enabled on armv4t armv5t fails with +# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7438:(.text.unlikely+0x2fa): relocation truncated to fit: R_ARM_THM_CALL against symbol `fancy_abort(char const*, int, char const*)' defined in .glue_7 section in linker stubs +# | gcc-4.8.1-r0/gcc-4.8.1/gcc/cp/decl.c:7442:(.text.unlikely+0x318): additional relocation overflows omitted from the output +ARM_INSTRUCTION_SET_armv4 = "arm" +ARM_INSTRUCTION_SET_armv5 = "arm" + +ARMFPARCHEXT_armv6 = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" +ARMFPARCHEXT_armv7a = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" +ARMFPARCHEXT_armv7ve = "${@'+fp' if d.getVar('TARGET_FPU') == 'hard' else ''}" + +BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-devtools/gcc/libgcc-initial_10.1.bb b/poky/meta/recipes-devtools/gcc/libgcc-initial_10.1.bb deleted file mode 100644 index 0c698c26e..000000000 --- a/poky/meta/recipes-devtools/gcc/libgcc-initial_10.1.bb +++ /dev/null @@ -1,5 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require libgcc-initial.inc - -# Building with thumb enabled on armv6t fails -ARM_INSTRUCTION_SET_armv6 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/libgcc-initial_10.2.bb b/poky/meta/recipes-devtools/gcc/libgcc-initial_10.2.bb new file mode 100644 index 000000000..0c698c26e --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/libgcc-initial_10.2.bb @@ -0,0 +1,5 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require libgcc-initial.inc + +# Building with thumb enabled on armv6t fails +ARM_INSTRUCTION_SET_armv6 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/libgcc_10.1.bb b/poky/meta/recipes-devtools/gcc/libgcc_10.1.bb deleted file mode 100644 index ea210a113..000000000 --- a/poky/meta/recipes-devtools/gcc/libgcc_10.1.bb +++ /dev/null @@ -1,5 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require libgcc.inc - -# Building with thumb enabled on armv6t fails -ARM_INSTRUCTION_SET_armv6 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/libgcc_10.2.bb b/poky/meta/recipes-devtools/gcc/libgcc_10.2.bb new file mode 100644 index 000000000..ea210a113 --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/libgcc_10.2.bb @@ -0,0 +1,5 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require libgcc.inc + +# Building with thumb enabled on armv6t fails +ARM_INSTRUCTION_SET_armv6 = "arm" diff --git a/poky/meta/recipes-devtools/gcc/libgfortran_10.1.bb b/poky/meta/recipes-devtools/gcc/libgfortran_10.1.bb deleted file mode 100644 index 71dd8b4bd..000000000 --- a/poky/meta/recipes-devtools/gcc/libgfortran_10.1.bb +++ /dev/null @@ -1,3 +0,0 @@ -require recipes-devtools/gcc/gcc-${PV}.inc -require libgfortran.inc - diff --git a/poky/meta/recipes-devtools/gcc/libgfortran_10.2.bb b/poky/meta/recipes-devtools/gcc/libgfortran_10.2.bb new file mode 100644 index 000000000..71dd8b4bd --- /dev/null +++ b/poky/meta/recipes-devtools/gcc/libgfortran_10.2.bb @@ -0,0 +1,3 @@ +require recipes-devtools/gcc/gcc-${PV}.inc +require libgfortran.inc + diff --git a/poky/meta/recipes-devtools/git/git_2.27.0.bb b/poky/meta/recipes-devtools/git/git_2.27.0.bb deleted file mode 100644 index 8022659ad..000000000 --- a/poky/meta/recipes-devtools/git/git_2.27.0.bb +++ /dev/null @@ -1,10 +0,0 @@ -require git.inc - -EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \ - ac_cv_fread_reads_directories=${ac_cv_fread_reads_directories=yes} \ - " -EXTRA_OEMAKE += "NO_GETTEXT=1" - -SRC_URI[tarball.sha256sum] = "77ded85cbe42b1ffdc2578b460a1ef5d23bcbc6683eabcafbb0d394dffe2e787" -SRC_URI[manpages.sha256sum] = "414e4b17133e54d846f6bfa2479f9757c50e16c013eb76167a492ae5409b8947" - diff --git a/poky/meta/recipes-devtools/git/git_2.28.0.bb b/poky/meta/recipes-devtools/git/git_2.28.0.bb new file mode 100644 index 000000000..e90787d06 --- /dev/null +++ b/poky/meta/recipes-devtools/git/git_2.28.0.bb @@ -0,0 +1,9 @@ +require git.inc + +EXTRA_OECONF += "ac_cv_snprintf_returns_bogus=no \ + ac_cv_fread_reads_directories=${ac_cv_fread_reads_directories=yes} \ + " +EXTRA_OEMAKE += "NO_GETTEXT=1" + +SRC_URI[tarball.sha256sum] = "f914c60a874d466c1e18467c864a910dd4ea22281ba6d4d58077cb0c3f115170" +SRC_URI[manpages.sha256sum] = "3cfca28a88d5b8112ea42322b797a500a14d0acddea391aed0462aff1ab11bf7" diff --git a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb index 54eb5e48a..200b1f185 100644 --- a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb +++ b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb @@ -8,8 +8,8 @@ DEPENDS_class-native = "hostperl-runtime-native" INHIBIT_DEFAULT_DEPS = "1" -SRCREV = "696cd4a4eab1ee9fefbb7e38dbab291d741d0c5a" -PV = "20200621+git${SRCPV}" +SRCREV = "2593751ef276497e312d7c4ce7fd049614c7bf80" +PV = "20200721+git${SRCPV}" SRC_URI = "git://git.savannah.gnu.org/config.git \ file://gnu-configize.in" diff --git a/poky/meta/recipes-devtools/go/go-1.14.inc b/poky/meta/recipes-devtools/go/go-1.14.inc index 105011695..8f8ed89de 100644 --- a/poky/meta/recipes-devtools/go/go-1.14.inc +++ b/poky/meta/recipes-devtools/go/go-1.14.inc @@ -1,7 +1,7 @@ require go-common.inc GO_BASEVERSION = "1.14" -GO_MINOR = ".4" +GO_MINOR = ".7" PV .= "${GO_MINOR}" FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:" @@ -18,4 +18,4 @@ SRC_URI += "\ file://0008-use-GOBUILDMODE-to-set-buildmode.patch \ " SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch" -SRC_URI[main.sha256sum] = "7011af3bbc2ac108d1b82ea8abb87b2e63f78844f0259be20cde4d42c5c40584" +SRC_URI[main.sha256sum] = "064392433563660c73186991c0a315787688e7c38a561e26647686f89b6c30e3" diff --git a/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb b/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb deleted file mode 100644 index 72c988944..000000000 --- a/poky/meta/recipes-devtools/go/go-binary-native_1.14.4.bb +++ /dev/null @@ -1,46 +0,0 @@ -# This recipe is for bootstrapping our go-cross from a prebuilt binary of Go from golang.org. - -SUMMARY = "Go programming language compiler (upstream binary for bootstrap)" -HOMEPAGE = " http://golang.org/" -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" - -PROVIDES = "go-native" - -SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz;name=go_${BUILD_GOTUPLE}" -SRC_URI[go_linux_amd64.sha256sum] = "aed845e4185a0b2a3c3d5e1d0a35491702c55889192bb9c30e67a3de6849c067" -SRC_URI[go_linux_arm64.sha256sum] = "05dc46ada4e23a1f58e72349f7c366aae2e9c7a7f1e7653095538bc5bba5e077" - -UPSTREAM_CHECK_URI = "https://golang.org/dl/" -UPSTREAM_CHECK_REGEX = "go(?P\d+(\.\d+)+)\.linux" - -S = "${WORKDIR}/go" - -inherit goarch native - -do_compile() { - : -} - -make_wrapper() { - rm -f ${D}${bindir}/$1 - cat <${D}${bindir}/$1 -#!/bin/bash -here=\`dirname \$0\` -export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" -\$here/../lib/go/bin/$1 "\$@" -END - chmod +x ${D}${bindir}/$1 -} - -do_install() { - find ${S} -depth -type d -name testdata -exec rm -rf {} + - - install -d ${D}${bindir} ${D}${libdir}/go - cp --preserve=mode,timestamps -R ${S}/ ${D}${libdir}/ - - for f in ${S}/bin/* - do - make_wrapper `basename $f` - done -} diff --git a/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb b/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb new file mode 100644 index 000000000..3452ca46d --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb @@ -0,0 +1,46 @@ +# This recipe is for bootstrapping our go-cross from a prebuilt binary of Go from golang.org. + +SUMMARY = "Go programming language compiler (upstream binary for bootstrap)" +HOMEPAGE = " http://golang.org/" +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" + +PROVIDES = "go-native" + +SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz;name=go_${BUILD_GOTUPLE}" +SRC_URI[go_linux_amd64.sha256sum] = "4a7fa60f323ee1416a4b1425aefc37ea359e9d64df19c326a58953a97ad41ea5" +SRC_URI[go_linux_arm64.sha256sum] = "fe5b6f6e441f3cb7b53ebf1a010bbebcb720ac98124984cfe2e51d72b8a58c71" + +UPSTREAM_CHECK_URI = "https://golang.org/dl/" +UPSTREAM_CHECK_REGEX = "go(?P\d+(\.\d+)+)\.linux" + +S = "${WORKDIR}/go" + +inherit goarch native + +do_compile() { + : +} + +make_wrapper() { + rm -f ${D}${bindir}/$1 + cat <${D}${bindir}/$1 +#!/bin/bash +here=\`dirname \$0\` +export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" +\$here/../lib/go/bin/$1 "\$@" +END + chmod +x ${D}${bindir}/$1 +} + +do_install() { + find ${S} -depth -type d -name testdata -exec rm -rf {} + + + install -d ${D}${bindir} ${D}${libdir}/go + cp --preserve=mode,timestamps -R ${S}/ ${D}${libdir}/ + + for f in ${S}/bin/* + do + make_wrapper `basename $f` + done +} diff --git a/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch b/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch deleted file mode 100644 index a45cfb61b..000000000 --- a/poky/meta/recipes-devtools/json-c/json-c/CVE-2020-12762.patch +++ /dev/null @@ -1,160 +0,0 @@ -From 099016b7e8d70a6d5dd814e788bba08d33d48426 Mon Sep 17 00:00:00 2001 -From: Tobias Stoeckmann -Date: Mon, 4 May 2020 19:41:16 +0200 -Subject: [PATCH 1/3] Protect array_list_del_idx against size_t overflow. - -If the assignment of stop overflows due to idx and count being -larger than SIZE_T_MAX in sum, out of boundary access could happen. - -It takes invalid usage of this function for this to happen, but -I decided to add this check so array_list_del_idx is as safe against -bad usage as the other arraylist functions. - -Upstream-Status: Backport [https://github.com/json-c/json-c/commit/31243e4d1204ef78be34b0fcae73221eee6b83be] -CVE: CVE-2020-12762 -Signed-off-by: Chee Yang Lee - ---- - arraylist.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/arraylist.c b/arraylist.c -index 12ad8af6d3..e5524aca75 100644 ---- a/arraylist.c -+++ b/arraylist.c -@@ -136,6 +136,9 @@ int array_list_del_idx(struct array_list *arr, size_t idx, size_t count) - { - size_t i, stop; - -+ /* Avoid overflow in calculation with large indices. */ -+ if (idx > SIZE_T_MAX - count) -+ return -1; - stop = idx + count; - if (idx >= arr->length || stop > arr->length) - return -1; - -From 77d935b7ae7871a1940cd827e850e6063044ec45 Mon Sep 17 00:00:00 2001 -From: Tobias Stoeckmann -Date: Mon, 4 May 2020 19:46:45 +0200 -Subject: [PATCH 2/3] Prevent division by zero in linkhash. - -If a linkhash with a size of zero is created, then modulo operations -are prone to division by zero operations. - -Purely protective measure against bad usage. ---- - linkhash.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/linkhash.c b/linkhash.c -index 7ea58c0abf..f05cc38030 100644 ---- a/linkhash.c -+++ b/linkhash.c -@@ -12,6 +12,7 @@ - - #include "config.h" - -+#include - #include - #include - #include -@@ -499,6 +500,8 @@ struct lh_table *lh_table_new(int size, lh_entry_free_fn *free_fn, lh_hash_fn *h - int i; - struct lh_table *t; - -+ /* Allocate space for elements to avoid divisions by zero. */ -+ assert(size > 0); - t = (struct lh_table *)calloc(1, sizeof(struct lh_table)); - if (!t) - return NULL; - -From d07b91014986900a3a75f306d302e13e005e9d67 Mon Sep 17 00:00:00 2001 -From: Tobias Stoeckmann -Date: Mon, 4 May 2020 19:47:25 +0200 -Subject: [PATCH 3/3] Fix integer overflows. - -The data structures linkhash and printbuf are limited to 2 GB in size -due to a signed integer being used to track their current size. - -If too much data is added, then size variable can overflow, which is -an undefined behaviour in C programming language. - -Assuming that a signed int overflow just leads to a negative value, -like it happens on many sytems (Linux i686/amd64 with gcc), then -printbuf is vulnerable to an out of boundary write on 64 bit systems. ---- - linkhash.c | 7 +++++-- - printbuf.c | 19 ++++++++++++++++--- - 2 files changed, 21 insertions(+), 5 deletions(-) - -diff --git a/linkhash.c b/linkhash.c -index f05cc38030..51e90b13a2 100644 ---- a/linkhash.c -+++ b/linkhash.c -@@ -580,9 +580,12 @@ int lh_table_insert_w_hash(struct lh_table *t, const void *k, const void *v, con - { - unsigned long n; - -- if (t->count >= t->size * LH_LOAD_FACTOR) -- if (lh_table_resize(t, t->size * 2) != 0) -+ if (t->count >= t->size * LH_LOAD_FACTOR) { -+ /* Avoid signed integer overflow with large tables. */ -+ int new_size = INT_MAX / 2 < t->size ? t->size * 2 : INT_MAX; -+ if (t->size == INT_MAX || lh_table_resize(t, new_size) != 0) - return -1; -+ } - - n = h % t->size; - -diff --git a/printbuf.c b/printbuf.c -index 976c12dde5..00822fac4f 100644 ---- a/printbuf.c -+++ b/printbuf.c -@@ -15,6 +15,7 @@ - - #include "config.h" - -+#include - #include - #include - #include -@@ -65,10 +66,16 @@ static int printbuf_extend(struct printbuf *p, int min_size) - - if (p->size >= min_size) - return 0; -- -- new_size = p->size * 2; -- if (new_size < min_size + 8) -+ /* Prevent signed integer overflows with large buffers. */ -+ if (min_size > INT_MAX - 8) -+ return -1; -+ if (p->size > INT_MAX / 2) - new_size = min_size + 8; -+ else { -+ new_size = p->size * 2; -+ if (new_size < min_size + 8) -+ new_size = min_size + 8; -+ } - #ifdef PRINTBUF_DEBUG - MC_DEBUG("printbuf_memappend: realloc " - "bpos=%d min_size=%d old_size=%d new_size=%d\n", -@@ -83,6 +90,9 @@ static int printbuf_extend(struct printbuf *p, int min_size) - - int printbuf_memappend(struct printbuf *p, const char *buf, int size) - { -+ /* Prevent signed integer overflows with large buffers. */ -+ if (size > INT_MAX - p->bpos - 1) -+ return -1; - if (p->size <= p->bpos + size + 1) - { - if (printbuf_extend(p, p->bpos + size + 1) < 0) -@@ -100,6 +110,9 @@ int printbuf_memset(struct printbuf *pb, int offset, int charvalue, int len) - - if (offset == -1) - offset = pb->bpos; -+ /* Prevent signed integer overflows with large buffers. */ -+ if (len > INT_MAX - offset) -+ return -1; - size_needed = offset + len; - if (pb->size < size_needed) - { diff --git a/poky/meta/recipes-devtools/json-c/json-c_0.14.bb b/poky/meta/recipes-devtools/json-c/json-c_0.14.bb deleted file mode 100644 index 1d501d129..000000000 --- a/poky/meta/recipes-devtools/json-c/json-c_0.14.bb +++ /dev/null @@ -1,20 +0,0 @@ -SUMMARY = "C bindings for apps which will manipulate JSON data" -DESCRIPTION = "JSON-C implements a reference counting object model that allows you to easily construct JSON objects in C." -HOMEPAGE = "https://github.com/json-c/json-c/wiki" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=de54b60fbbc35123ba193fea8ee216f2" - -SRC_URI = "https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz \ - file://CVE-2020-12762.patch \ -" - -SRC_URI[sha256sum] = "b377de08c9b23ca3b37d9a9828107dff1de5ce208ff4ebb35005a794f30c6870" - -UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" -UPSTREAM_CHECK_REGEX = "json-c-(?P\d+(\.\d+)+)-\d+" - -RPROVIDES_${PN} = "libjson" - -inherit cmake - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/json-c/json-c_0.15.bb b/poky/meta/recipes-devtools/json-c/json-c_0.15.bb new file mode 100644 index 000000000..2968590dd --- /dev/null +++ b/poky/meta/recipes-devtools/json-c/json-c_0.15.bb @@ -0,0 +1,18 @@ +SUMMARY = "C bindings for apps which will manipulate JSON data" +DESCRIPTION = "JSON-C implements a reference counting object model that allows you to easily construct JSON objects in C." +HOMEPAGE = "https://github.com/json-c/json-c/wiki" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=de54b60fbbc35123ba193fea8ee216f2" + +SRC_URI = "https://s3.amazonaws.com/json-c_releases/releases/${BP}.tar.gz" + +SRC_URI[sha256sum] = "b8d80a1ddb718b3ba7492916237bbf86609e9709fb007e7f7d4322f02341a4c6" + +UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" +UPSTREAM_CHECK_REGEX = "json-c-(?P\d+(\.\d+)+)-\d+" + +RPROVIDES_${PN} = "libjson" + +inherit cmake + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/libdnf/libdnf/enable_test_data_dir_set.patch b/poky/meta/recipes-devtools/libdnf/libdnf/enable_test_data_dir_set.patch new file mode 100644 index 000000000..e3784cc9e --- /dev/null +++ b/poky/meta/recipes-devtools/libdnf/libdnf/enable_test_data_dir_set.patch @@ -0,0 +1,26 @@ +libdnf: allow reproducible binary builds + +Use a dummy directory for test data if not built WITH_TESTS. Allow for overriding +TESTDATADIR, since the default is guaranteed to be wrong for target builds. + +Upstream-Status: Pending + +Signed-off-by: Joe Slater + + +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -133,7 +133,12 @@ add_definitions(-DG_LOG_DOMAIN=\\"libdnf + add_definitions(-D_FILE_OFFSET_BITS=64) + + # tests +-add_definitions(-DTESTDATADIR=\\"${CMAKE_SOURCE_DIR}/data/tests\\") ++if(NOT WITH_TESTS) ++ set(TEST_DATA_DIR "/notests") ++elseif(NOT DEFINED TEST_DATA_DIR) ++ set(TEST_DATA_DIR "${CMAKE_SOURCE_DIR}/data/tests") ++endif() ++add_definitions(-DTESTDATADIR=\\"${TEST_DATA_DIR}\\") + + # librhsm + if(ENABLE_RHSM_SUPPORT) diff --git a/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb b/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb index 947b2f234..37991e6d8 100644 --- a/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb +++ b/poky/meta/recipes-devtools/libdnf/libdnf_0.48.0.bb @@ -8,6 +8,7 @@ SRC_URI = "git://github.com/rpm-software-management/libdnf;branch=dnf-4-master \ file://0001-Get-parameters-for-both-libsolv-and-libsolvext-libdn.patch \ file://0001-Add-WITH_TESTS-option.patch \ file://0001-Look-fo-sphinx-only-if-documentation-is-actually-ena.patch \ + file://enable_test_data_dir_set.patch \ " SRCREV = "46a28d0cf09277fffc11392e5e362a2eda0d53a8" diff --git a/poky/meta/recipes-devtools/llvm/llvm_git.bb b/poky/meta/recipes-devtools/llvm/llvm_git.bb index d24ed761b..4c2d49031 100644 --- a/poky/meta/recipes-devtools/llvm/llvm_git.bb +++ b/poky/meta/recipes-devtools/llvm/llvm_git.bb @@ -19,7 +19,7 @@ inherit cmake pkgconfig PROVIDES += "llvm${PV}" -MAJOR_VERSION = "9" +MAJOR_VERSION = "10" MINOR_VERSION = "0" PATCH_VERSION = "1" @@ -29,7 +29,7 @@ LLVM_RELEASE = "${PV}" LLVM_DIR = "llvm${LLVM_RELEASE}" BRANCH = "release/${MAJOR_VERSION}.x" -SRCREV = "c1a0a213378a458fbea1a5c77b315c7dce08fd05" +SRCREV = "ef32c611aa214dea855364efd7ba451ec5ec3f74" SRC_URI = "git://github.com/llvm/llvm-project.git;branch=${BRANCH} \ file://0006-llvm-TargetLibraryInfo-Undefine-libc-functions-if-th.patch;striplevel=2 \ file://0007-llvm-allow-env-override-of-exe-path.patch;striplevel=2 \ diff --git a/poky/meta/recipes-devtools/meson/meson.inc b/poky/meta/recipes-devtools/meson/meson.inc index ffa17b306..607093a15 100644 --- a/poky/meta/recipes-devtools/meson/meson.inc +++ b/poky/meta/recipes-devtools/meson/meson.inc @@ -16,7 +16,7 @@ SRC_URI = "https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${P file://0001-modules-python.py-do-not-substitute-python-s-install.patch \ file://0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch \ " -SRC_URI[sha256sum] = "f2bdf4cf0694e696b48261cdd14380fb1d0fe33d24744d8b2df0c12f33ebb662" +SRC_URI[sha256sum] = "0a1ae2bfe2ae14ac47593537f93290fb79e9b775c55b4c53c282bc3ca3745b35" SRC_URI_append_class-native = " \ file://0001-Make-CPU-family-warnings-fatal.patch \ diff --git a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch index 39b1af52e..623e32957 100644 --- a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch +++ b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch @@ -1,7 +1,7 @@ -From 62c415eedb62905de76e2e0bbd156a947705cab2 Mon Sep 17 00:00:00 2001 +From 5624b5835af747b601780ad14646f9c1fb854931 Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Tue, 3 Jul 2018 13:59:09 +0100 -Subject: [PATCH] Make CPU family warnings fatal +Subject: [PATCH 1/2] Make CPU family warnings fatal Upstream-Status: Inappropriate [OE specific] Signed-off-by: Ross Burton @@ -12,23 +12,23 @@ Signed-off-by: Ross Burton 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/mesonbuild/envconfig.py b/mesonbuild/envconfig.py -index 17058df..18bbf86 100644 +index 219b62e..d1be65b 100644 --- a/mesonbuild/envconfig.py +++ b/mesonbuild/envconfig.py -@@ -225,7 +225,7 @@ class MachineInfo: +@@ -199,7 +199,7 @@ class MachineInfo: cpu_family = literal['cpu_family'] if cpu_family not in known_cpu_families: -- mlog.warning('Unknown CPU family %s, please report this at https://github.com/mesonbuild/meson/issues/new' % cpu_family) -+ raise EnvironmentException('Unknown CPU family %s, see https://wiki.yoctoproject.org/wiki/Meson/UnknownCPU for directions.' % cpu_family) +- mlog.warning('Unknown CPU family {}, please report this at https://github.com/mesonbuild/meson/issues/new'.format(cpu_family)) ++ raise EnvironmentException('Unknown CPU family {}, see https://wiki.yoctoproject.org/wiki/Meson/UnknownCPU for directions.'.format(cpu_family)) endian = literal['endian'] if endian not in ('little', 'big'): diff --git a/mesonbuild/environment.py b/mesonbuild/environment.py -index a2f78a4..59fcb07 100644 +index bf09a88..8eabe78 100644 --- a/mesonbuild/environment.py +++ b/mesonbuild/environment.py -@@ -364,9 +364,7 @@ def detect_cpu_family(compilers: CompilersDict) -> str: +@@ -375,9 +375,7 @@ def detect_cpu_family(compilers: CompilersDict) -> str: trial = 'parisc' if trial not in known_cpu_families: @@ -39,3 +39,6 @@ index a2f78a4..59fcb07 100644 return trial +-- +2.24.0 + diff --git a/poky/meta/recipes-devtools/meson/meson/0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch b/poky/meta/recipes-devtools/meson/meson/0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch index 35c18f078..a1f8422d4 100644 --- a/poky/meta/recipes-devtools/meson/meson/0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch +++ b/poky/meta/recipes-devtools/meson/meson/0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch @@ -1,4 +1,4 @@ -From 7be634fa9705d0367f48a91305f9acb642ff0a11 Mon Sep 17 00:00:00 2001 +From 64aa6718c290e150dafd8da83f31cb08af00af0e Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Wed, 27 May 2020 16:43:05 +0000 Subject: [PATCH] gnome.py: prefix g-i paths with PKG_CONFIG_SYSROOT_DIR @@ -9,33 +9,29 @@ determine when a custom variable is a path) Upstream-Status: Pending Signed-off-by: Alexander Kanavin + --- - mesonbuild/modules/gnome.py | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) + mesonbuild/modules/gnome.py | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mesonbuild/modules/gnome.py b/mesonbuild/modules/gnome.py -index 89d5d5d..d75f2e5 100644 +index 52016f4..2b72ee4 100644 --- a/mesonbuild/modules/gnome.py +++ b/mesonbuild/modules/gnome.py -@@ -739,17 +739,17 @@ class GnomeModule(ExtensionModule): - if giscanner.found(): - giscanner_path = giscanner.get_command()[0] - if not any(x in giscanner_path for x in gi_util_dirs_check): -- giscanner = self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {}) -+ giscanner = os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {}) - else: -- giscanner = self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {}) -+ giscanner = os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {}) - - gicompiler = self.interpreter.find_program_impl('g-ir-compiler') - if gicompiler.found(): - gicompiler_path = gicompiler.get_command()[0] - if not any(x in gicompiler_path for x in gi_util_dirs_check): -- gicompiler = self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {}) -+ gicompiler = os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {}) - else: -- gicompiler = self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {}) -+ gicompiler = os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {}) - - ns = kwargs.pop('namespace') - nsversion = kwargs.pop('nsversion') +@@ -410,14 +410,14 @@ class GnomeModule(ExtensionModule): + if giscanner is not None: + self.giscanner = ExternalProgram.from_entry('g-ir-scanner', giscanner) + elif self.gir_dep.type_name == 'pkgconfig': +- self.giscanner = ExternalProgram('g_ir_scanner', self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {})) ++ self.giscanner = ExternalProgram('g_ir_scanner', os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_scanner', {})) + else: + self.giscanner = self.interpreter.find_program_impl('g-ir-scanner') + gicompiler = state.environment.lookup_binary_entry(MachineChoice.HOST, 'g-ir-compiler') + if gicompiler is not None: + self.gicompiler = ExternalProgram.from_entry('g-ir-compiler', gicompiler) + elif self.gir_dep.type_name == 'pkgconfig': +- self.gicompiler = ExternalProgram('g_ir_compiler', self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {})) ++ self.gicompiler = ExternalProgram('g_ir_compiler', os.environ['PKG_CONFIG_SYSROOT_DIR'] + self.gir_dep.get_pkgconfig_variable('g_ir_compiler', {})) + else: + self.gicompiler = self.interpreter.find_program_impl('g-ir-compiler') + return self.gir_dep, self.giscanner, self.gicompiler diff --git a/poky/meta/recipes-devtools/meson/meson/0001-modules-python.py-do-not-substitute-python-s-install.patch b/poky/meta/recipes-devtools/meson/meson/0001-modules-python.py-do-not-substitute-python-s-install.patch index a25c39227..c0ad01e9d 100644 --- a/poky/meta/recipes-devtools/meson/meson/0001-modules-python.py-do-not-substitute-python-s-install.patch +++ b/poky/meta/recipes-devtools/meson/meson/0001-modules-python.py-do-not-substitute-python-s-install.patch @@ -1,4 +1,4 @@ -From 1d178fb2928d325e339b15972890ceced863d3ec Mon Sep 17 00:00:00 2001 +From 214e559d394491b1376e4cc370f75151117a3f83 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Thu, 18 Apr 2019 17:36:11 +0200 Subject: [PATCH] modules/python.py: do not substitute python's install prefix @@ -10,15 +10,16 @@ b) shouldn't be necessary as Python's prefix ought to be correct in the first pl Upstream-Status: Pending Signed-off-by: Alexander Kanavin + --- mesonbuild/modules/python.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/mesonbuild/modules/python.py b/mesonbuild/modules/python.py -index 6e2c63b..f5a37ac 100644 +index 2f0c644..d2aa565 100644 --- a/mesonbuild/modules/python.py +++ b/mesonbuild/modules/python.py -@@ -254,7 +254,7 @@ import sysconfig +@@ -251,7 +251,7 @@ INTROSPECT_COMMAND = '''import sysconfig import json import sys @@ -27,8 +28,8 @@ index 6e2c63b..f5a37ac 100644 def links_against_libpython(): from distutils.core import Distribution, Extension -@@ -279,12 +279,11 @@ class PythonInstallation(ExternalProgramHolder): - ExternalProgramHolder.__init__(self, python) +@@ -276,12 +276,11 @@ class PythonInstallation(ExternalProgramHolder): + ExternalProgramHolder.__init__(self, python, interpreter.subproject) self.interpreter = interpreter self.subproject = self.interpreter.subproject - prefix = self.interpreter.environment.coredata.get_builtin_option('prefix') diff --git a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch index bb06d9924..dce463e5b 100644 --- a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch +++ b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch @@ -1,4 +1,4 @@ -From d976d5a8abd6d42edf794d2a4c211fc6697fb14c Mon Sep 17 00:00:00 2001 +From 4b283d545152092fec12b9d80219161d11002c72 Mon Sep 17 00:00:00 2001 From: Peter Kjellerstedt Date: Thu, 26 Jul 2018 16:32:49 +0200 Subject: [PATCH] Support building allarch recipes again @@ -13,7 +13,7 @@ Signed-off-by: Peter Kjellerstedt 1 file changed, 1 insertion(+) diff --git a/mesonbuild/envconfig.py b/mesonbuild/envconfig.py -index 18bbf86..e76315e 100644 +index dc20616..f54adcd 100644 --- a/mesonbuild/envconfig.py +++ b/mesonbuild/envconfig.py @@ -36,6 +36,7 @@ _T = T.TypeVar('_T') diff --git a/poky/meta/recipes-devtools/meson/meson/disable-rpath-handling.patch b/poky/meta/recipes-devtools/meson/meson/disable-rpath-handling.patch index 38e50d72f..4653a72a2 100644 --- a/poky/meta/recipes-devtools/meson/meson/disable-rpath-handling.patch +++ b/poky/meta/recipes-devtools/meson/meson/disable-rpath-handling.patch @@ -1,4 +1,4 @@ -From 3af10fa8cd4e97181288d72227dea712290fd5e6 Mon Sep 17 00:00:00 2001 +From 9e3fcf192c1ca068d310c648c311f9d850214421 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Fri, 23 Nov 2018 15:28:28 +0000 Subject: [PATCH] meson: Disable rpath stripping at install time @@ -16,17 +16,17 @@ Upstream-Status: Submitted [https://github.com/mesonbuild/meson/issues/2567] 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mesonbuild/minstall.py b/mesonbuild/minstall.py -index ace0569..85bd8df 100644 +index 0be01fe..5406cab 100644 --- a/mesonbuild/minstall.py +++ b/mesonbuild/minstall.py -@@ -508,8 +508,11 @@ class Installer: +@@ -512,8 +512,11 @@ class Installer: if file_copied: self.did_install_something = True try: -- depfixer.fix_rpath(outname, install_rpath, final_path, +- depfixer.fix_rpath(outname, t.rpath_dirs_to_remove, install_rpath, final_path, - install_name_mappings, verbose=False) + if install_rpath: -+ depfixer.fix_rpath(outname, install_rpath, final_path, ++ depfixer.fix_rpath(outname, t.rpath_dirs_to_remove, install_rpath, final_path, + install_name_mappings, verbose=False) + else: + print("RPATH changes at install time disabled") diff --git a/poky/meta/recipes-devtools/meson/meson_0.54.3.bb b/poky/meta/recipes-devtools/meson/meson_0.54.3.bb deleted file mode 100644 index de9b905c1..000000000 --- a/poky/meta/recipes-devtools/meson/meson_0.54.3.bb +++ /dev/null @@ -1,4 +0,0 @@ -include meson.inc - -BBCLASSEXTEND = "native" - diff --git a/poky/meta/recipes-devtools/meson/meson_0.55.0.bb b/poky/meta/recipes-devtools/meson/meson_0.55.0.bb new file mode 100644 index 000000000..de9b905c1 --- /dev/null +++ b/poky/meta/recipes-devtools/meson/meson_0.55.0.bb @@ -0,0 +1,4 @@ +include meson.inc + +BBCLASSEXTEND = "native" + diff --git a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.3.bb b/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.3.bb deleted file mode 100644 index 67add2c25..000000000 --- a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.54.3.bb +++ /dev/null @@ -1,65 +0,0 @@ -include meson.inc - -inherit nativesdk -inherit siteinfo - -SRC_URI += "file://meson-setup.py \ - file://meson-wrapper" - -def meson_endian(prefix, d): - arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS") - sitedata = siteinfo_data_for_machine(arch, os, d) - if "endian-little" in sitedata: - return "little" - elif "endian-big" in sitedata: - return "big" - else: - bb.fatal("Cannot determine endianism for %s-%s" % (arch, os)) - -# The cross file logic is similar but not identical to that in meson.bbclass, -# since it's generating for an SDK rather than a cross-compile. Important -# differences are: -# - We can't set vars like CC, CXX, etc. yet because they will be filled in with -# real paths by meson-setup.sh when the SDK is extracted. -# - Some overrides aren't needed, since the SDK injects paths that take care of -# them. -do_install_append() { - install -d ${D}${datadir}/meson - cat >${D}${datadir}/meson/meson.cross.template <${D}${datadir}/meson/meson.cross.template < - -From 287964d54b64bed833adba307e1d920f8fcf0cbc Mon Sep 17 00:00:00 2001 -From: Ozkan Sezer -Date: Wed, 7 Aug 2019 15:50:50 +0300 -Subject: [PATCH] fix pa_add_headers.m4 for development versions of autoconf - (bug 3392471) - -Signed-off-by: H. Peter Anvin (Intel) ---- - autoconf/m4/pa_add_headers.m4 | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/aclocal.m4 b/aclocal.m4 -index 1af2dd5d..6ce3e6c1 100644 ---- a/aclocal.m4 -+++ b/aclocal.m4 -@@ -169,7 +169,8 @@ dnl -------------------------------------------------------------------------- - AC_DEFUN(_PA_ADD_HEADER, - [AC_CHECK_HEADERS([$1],[ac_includes_default="$ac_includes_default - #include <$1>" --])]) -+]) -+]) - - AC_DEFUN(PA_ADD_HEADERS, - [m4_map_args_w([$1],[_PA_ADD_HEADER(],[)])]) diff --git a/poky/meta/recipes-devtools/nasm/nasm/0001-stdlib-Add-strlcat.patch b/poky/meta/recipes-devtools/nasm/nasm/0001-stdlib-Add-strlcat.patch index d94fd3290..0ede8a832 100644 --- a/poky/meta/recipes-devtools/nasm/nasm/0001-stdlib-Add-strlcat.patch +++ b/poky/meta/recipes-devtools/nasm/nasm/0001-stdlib-Add-strlcat.patch @@ -1,12 +1,13 @@ -From 8a204171004fa0d7d21389530c744d215e99efb0 Mon Sep 17 00:00:00 2001 +From 1c5023002bad3a5b0bbc181fdb324160beace733 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Tue, 19 Nov 2019 12:47:30 -0600 -Subject: [PATCH 1/2] stdlib: Add strlcat +Subject: [PATCH] stdlib: Add strlcat Adds strlcat which can be used to safely concatenate strings Upstream-Status: Submitted [https://bugzilla.nasm.us/show_bug.cgi?id=3392635] Signed-off-by: Joshua Watt + --- Makefile.in | 2 +- configure.ac | 2 ++ @@ -16,10 +17,10 @@ Signed-off-by: Joshua Watt create mode 100644 stdlib/strlcat.c diff --git a/Makefile.in b/Makefile.in -index 32ef3d91..ff7eb447 100644 +index bfae1f8..156dc4c 100644 --- a/Makefile.in +++ b/Makefile.in -@@ -93,7 +93,7 @@ NASM = asm/nasm.$(O) +@@ -101,7 +101,7 @@ NASM = asm/nasm.$(O) NDISASM = disasm/ndisasm.$(O) LIBOBJ = stdlib/snprintf.$(O) stdlib/vsnprintf.$(O) stdlib/strlcpy.$(O) \ @@ -27,20 +28,20 @@ index 32ef3d91..ff7eb447 100644 + stdlib/strnlen.$(O) stdlib/strrchrnul.$(O) stdlib/strlcat.$(O) \ \ nasmlib/ver.$(O) \ - nasmlib/crc64.$(O) nasmlib/malloc.$(O) nasmlib/errfile.$(O) \ + nasmlib/alloc.$(O) nasmlib/asprintf.$(O) nasmlib/errfile.$(O) \ diff --git a/configure.ac b/configure.ac -index 38b3b596..b4e88778 100644 +index 7b72769..14fd033 100644 --- a/configure.ac +++ b/configure.ac -@@ -152,6 +152,7 @@ AC_CHECK_FUNCS([vsnprintf _vsnprintf]) - AC_CHECK_FUNCS([snprintf _snprintf]) +@@ -234,6 +234,7 @@ PA_FUNC_SNPRINTF + PA_FUNC_VSNPRINTF AC_CHECK_FUNCS([strlcpy]) AC_CHECK_FUNCS([strrchrnul]) +AC_CHECK_FUNCS([strlcat]) dnl These types are POSIX-specific, and Windows does it differently... AC_CHECK_TYPES([struct _stati64]) -@@ -170,6 +171,7 @@ AC_CHECK_DECLS(strsep) +@@ -253,6 +254,7 @@ AC_CHECK_DECLS(strsep) AC_CHECK_DECLS(strlcpy) AC_CHECK_DECLS(strnlen) AC_CHECK_DECLS(strrchrnul) @@ -49,10 +50,10 @@ index 38b3b596..b4e88778 100644 dnl Check for missing types AC_TYPE_UINTPTR_T diff --git a/include/compiler.h b/include/compiler.h -index 4178c98e..8153d297 100644 +index b4fd3a8..7fb4821 100644 --- a/include/compiler.h +++ b/include/compiler.h -@@ -159,6 +159,10 @@ size_t strlcpy(char *, const char *, size_t); +@@ -169,6 +169,10 @@ size_t strlcpy(char *, const char *, size_t); char *strrchrnul(const char *, int); #endif @@ -65,7 +66,7 @@ index 4178c98e..8153d297 100644 # include diff --git a/stdlib/strlcat.c b/stdlib/strlcat.c new file mode 100644 -index 00000000..7084d460 +index 0000000..7084d46 --- /dev/null +++ b/stdlib/strlcat.c @@ -0,0 +1,43 @@ @@ -112,6 +113,3 @@ index 00000000..7084d460 + +#endif + --- -2.23.0 - diff --git a/poky/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch b/poky/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch index bbfae2e8a..f788e0fd4 100644 --- a/poky/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch +++ b/poky/meta/recipes-devtools/nasm/nasm/0002-Add-debug-prefix-map-option.patch @@ -1,7 +1,7 @@ -From fa677c1caf6b8192971920cf5c1aa8cb33c74605 Mon Sep 17 00:00:00 2001 +From bb4e42ad3a0cdd23a1d1797e6299c76b474867c0 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Tue, 19 Nov 2019 13:12:17 -0600 -Subject: [PATCH 2/2] Add --debug-prefix-map option +Subject: [PATCH] Add --debug-prefix-map option Adds an option to remap file prefixes in output object files. This is analogous to the "-fdebug-prefix-map" option in GCC, and allows files to @@ -9,47 +9,48 @@ be built in a reproducible manner regardless of the build directory. Upstream-Status: Submitted [https://bugzilla.nasm.us/show_bug.cgi?id=3392635] Signed-off-by: Joshua Watt + --- - asm/nasm.c | 28 ++++++++++++++++++++++++++-- + asm/nasm.c | 26 +++++++++++++++++++++++++- include/nasmlib.h | 9 +++++++++ nasm.txt | 4 ++++ nasmlib/filename.c | 20 ++++++++++++++++++++ output/outas86.c | 4 +++- output/outcoff.c | 4 ++-- - output/outelf.c | 8 ++++---- + output/outelf.c | 2 +- output/outieee.c | 2 +- output/outobj.c | 2 +- stdlib/strlcat.c | 2 +- test/elfdebugprefix.asm | 6 ++++++ test/performtest.pl | 12 ++++++++++-- - 12 files changed, 87 insertions(+), 14 deletions(-) + 12 files changed, 83 insertions(+), 10 deletions(-) create mode 100644 test/elfdebugprefix.asm diff --git a/asm/nasm.c b/asm/nasm.c -index 1c5a5fc5..5d45103c 100644 +index a0e1719..fc6c62e 100644 --- a/asm/nasm.c +++ b/asm/nasm.c -@@ -841,7 +841,8 @@ enum text_options { - OPT_BEFORE, +@@ -938,7 +938,8 @@ enum text_options { OPT_LIMIT, OPT_KEEP_ALL, -- OPT_NO_LINE -+ OPT_NO_LINE, + OPT_NO_LINE, +- OPT_DEBUG ++ OPT_DEBUG, + OPT_DEBUG_PREFIX_MAP }; - struct textargs { - const char *label; -@@ -866,6 +867,7 @@ static const struct textargs textopts[] = { - {"limit-", OPT_LIMIT, true, 0}, - {"keep-all", OPT_KEEP_ALL, false, 0}, - {"no-line", OPT_NO_LINE, false, 0}, + enum need_arg { + ARG_NO, +@@ -970,6 +971,7 @@ static const struct textargs textopts[] = { + {"keep-all", OPT_KEEP_ALL, ARG_NO, 0}, + {"no-line", OPT_NO_LINE, ARG_NO, 0}, + {"debug", OPT_DEBUG, ARG_MAYBE, 0}, + {"debug-prefix-map", OPT_DEBUG_PREFIX_MAP, true, 0}, - {NULL, OPT_BOGUS, false, 0} + {NULL, OPT_BOGUS, ARG_NO, 0} }; -@@ -1217,6 +1219,26 @@ static bool process_arg(char *p, char *q, int pass) - case OPT_NO_LINE: - pp_noline = true; +@@ -1332,6 +1334,26 @@ static bool process_arg(char *p, char *q, int pass) + case OPT_DEBUG: + debug_nasm = param ? strtoul(param, NULL, 10) : debug_nasm+1; break; + case OPT_DEBUG_PREFIX_MAP: { + struct debug_prefix_list *d; @@ -72,24 +73,22 @@ index 1c5a5fc5..5d45103c 100644 + } + break; case OPT_HELP: - help(0); + help(stdout); exit(0); -@@ -2010,7 +2032,9 @@ static void help(const char xopt) - " --lpostfix str append the given string to all other symbols\n" - " --keep-all output files will not be removed even if an error happens\n" - " --no-line ignore %%line directives in input\n" -- " --limit-X val set execution limit X\n"); -+ " --limit-X val set execution limit X\n" -+ " --debug-prefix-map base=dest\n" -+ " remap paths starting with 'base' to 'dest' in output files\n"); +@@ -2297,6 +2319,8 @@ static void help(FILE *out) + " -w-x disable warning x (also -Wno-x)\n" + " -w[+-]error promote all warnings to errors (also -Werror)\n" + " -w[+-]error=x promote warning x to errors (also -Werror=x)\n" ++ " --debug-prefix-map base=dest\n" ++ " remap paths starting with 'base' to 'dest' in output files\n" + , out); - for (i = 0; i <= LIMIT_MAX; i++) { - printf(" %-15s %s (default ", + fprintf(out, " %-20s %s\n", diff --git a/include/nasmlib.h b/include/nasmlib.h -index e57d0e6d..cf921547 100644 +index e9bfbcc..98fc653 100644 --- a/include/nasmlib.h +++ b/include/nasmlib.h -@@ -195,10 +195,19 @@ int64_t readstrnum(char *str, int length, bool *warn); +@@ -250,10 +250,19 @@ int64_t readstrnum(char *str, int length, bool *warn); */ int32_t seg_alloc(void); @@ -110,7 +109,7 @@ index e57d0e6d..cf921547 100644 /* * Utility macros... diff --git a/nasm.txt b/nasm.txt -index a28202f9..443c06b2 100644 +index cc7fa27..d3485c9 100644 --- a/nasm.txt +++ b/nasm.txt @@ -147,6 +147,10 @@ OPTIONS @@ -125,7 +124,7 @@ index a28202f9..443c06b2 100644 ------ This man page does not fully describe the syntax of *nasm*'s assembly language, diff --git a/nasmlib/filename.c b/nasmlib/filename.c -index 172ae0bc..fda2be41 100644 +index 172ae0b..fda2be4 100644 --- a/nasmlib/filename.c +++ b/nasmlib/filename.c @@ -39,6 +39,8 @@ @@ -160,10 +159,10 @@ index 172ae0bc..fda2be41 100644 + return dest; +} diff --git a/output/outas86.c b/output/outas86.c -index 3f9867b9..d5f4f966 100644 +index 54b22f8..c4a412c 100644 --- a/output/outas86.c +++ b/output/outas86.c -@@ -113,6 +113,8 @@ static void as86_sect_write(struct Section *, const uint8_t *, +@@ -110,6 +110,8 @@ static void as86_sect_write(struct Section *, const uint8_t *, static void as86_init(void) { @@ -172,7 +171,7 @@ index 3f9867b9..d5f4f966 100644 stext.data = saa_init(1L); stext.datalen = 0L; stext.head = stext.last = NULL; -@@ -134,7 +136,7 @@ static void as86_init(void) +@@ -131,7 +133,7 @@ static void as86_init(void) strslen = 0; /* as86 module name = input file minus extension */ @@ -182,10 +181,10 @@ index 3f9867b9..d5f4f966 100644 static void as86_cleanup(void) diff --git a/output/outcoff.c b/output/outcoff.c -index a2fd302c..bcf576fb 100644 +index bcd9ff3..15bfcf3 100644 --- a/output/outcoff.c +++ b/output/outcoff.c -@@ -1070,14 +1070,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value, +@@ -1095,14 +1095,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value, static void coff_write_symbols(void) { @@ -203,48 +202,23 @@ index a2fd302c..bcf576fb 100644 /* diff --git a/output/outelf.c b/output/outelf.c -index de99d076..203b5dc0 100644 +index 61af020..1292958 100644 --- a/output/outelf.c +++ b/output/outelf.c -@@ -1,5 +1,5 @@ - /* ----------------------------------------------------------------------- * -- * -+ * - * Copyright 1996-2017 The NASM Authors - All Rights Reserved - * See the file AUTHORS included with the NASM distribution for - * the specific copyright holders. -@@ -14,7 +14,7 @@ - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. -- * -+ * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND - * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, - * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -@@ -315,7 +315,7 @@ elf_directive(enum directive directive, char *value, int pass) +@@ -553,7 +553,7 @@ static void elf_init(void) + }; + const char * const *p; - static void elf_init(void) - { - strlcpy(elf_module, inname, sizeof(elf_module)); + filename_debug_remap(elf_module, inname, sizeof(elf_module)); sects = NULL; nsects = sectlen = 0; syms = saa_init((int32_t)sizeof(struct elf_symbol)); -@@ -868,7 +868,7 @@ static void elf32_out(int32_t segto, const void *data, - " segment base references"); - } else { - if (wrt == NO_SEG) { -- /* -+ /* - * The if() is a hack to deal with compilers which - * don't handle switch() statements with 64-bit - * expressions. diff --git a/output/outieee.c b/output/outieee.c -index 3a28942d..f61824e4 100644 +index 4cc0f0f..2468724 100644 --- a/output/outieee.c +++ b/output/outieee.c -@@ -209,7 +209,7 @@ static void ieee_unqualified_name(char *, char *); +@@ -207,7 +207,7 @@ static void ieee_unqualified_name(char *, char *); */ static void ieee_init(void) { @@ -254,10 +228,10 @@ index 3a28942d..f61824e4 100644 fpubhead = NULL; fpubtail = &fpubhead; diff --git a/output/outobj.c b/output/outobj.c -index b4f2c499..55bba4a1 100644 +index 0d4d311..d8dd6a0 100644 --- a/output/outobj.c +++ b/output/outobj.c -@@ -640,7 +640,7 @@ static enum directive_result obj_directive(enum directive, char *, int); +@@ -638,7 +638,7 @@ static enum directive_result obj_directive(enum directive, char *); static void obj_init(void) { @@ -267,7 +241,7 @@ index b4f2c499..55bba4a1 100644 any_segs = false; fpubhead = NULL; diff --git a/stdlib/strlcat.c b/stdlib/strlcat.c -index 7084d460..ee93dea3 100644 +index 7084d46..ee93dea 100644 --- a/stdlib/strlcat.c +++ b/stdlib/strlcat.c @@ -29,7 +29,7 @@ size_t strlcat(char *dest, const char *src, size_t size) @@ -281,7 +255,7 @@ index 7084d460..ee93dea3 100644 /* destination was not NULL terminated. Return the initial size */ diff --git a/test/elfdebugprefix.asm b/test/elfdebugprefix.asm new file mode 100644 -index 00000000..a67ba29c +index 0000000..a67ba29 --- /dev/null +++ b/test/elfdebugprefix.asm @@ -0,0 +1,6 @@ @@ -292,7 +266,7 @@ index 00000000..a67ba29c + ret + diff --git a/test/performtest.pl b/test/performtest.pl -index f7865b39..096f9604 100755 +index f7865b3..096f960 100755 --- a/test/performtest.pl +++ b/test/performtest.pl @@ -42,14 +42,22 @@ sub perform { @@ -320,6 +294,3 @@ index f7865b39..096f9604 100755 #Move the output to the test dir mkpath("$outputdir/$testname/$subname"); foreach(split / /,$files) { --- -2.23.0 - diff --git a/poky/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch b/poky/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch deleted file mode 100644 index 6e3f909d0..000000000 --- a/poky/meta/recipes-devtools/nasm/nasm/CVE-2018-19755.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 3079f7966dbed4497e36d5067cbfd896a90358cb Mon Sep 17 00:00:00 2001 -From: Cyrill Gorcunov -Date: Wed, 14 Nov 2018 10:03:42 +0300 -Subject: [PATCH] preproc: Fix malformed parameter count - -readnum returns 64bit number which may become -a negative integer upon conversion which in -turn lead to out of bound array access. - -Fix it by explicit conversion with bounds check - - | POC6:2: error: parameter count `2222222222' is out of bounds [0; 2147483647] - -https://bugzilla.nasm.us/show_bug.cgi?id=3392528 - -Signed-off-by: Cyrill Gorcunov - -Upstream-Status: Backport -CVE: CVE-2018-19755 -Signed-off-by: Anuj Mittal ---- - asm/preproc.c | 43 +++++++++++++++++++++---------------------- - 1 file changed, 21 insertions(+), 22 deletions(-) - -diff --git a/asm/preproc.c b/asm/preproc.c -index b6afee3..e5ad05a 100644 ---- a/asm/preproc.c -+++ b/asm/preproc.c -@@ -1650,6 +1650,23 @@ smacro_defined(Context * ctx, const char *name, int nparam, SMacro ** defn, - return false; - } - -+/* param should be a natural number [0; INT_MAX] */ -+static int read_param_count(const char *str) -+{ -+ int result; -+ bool err; -+ -+ result = readnum(str, &err); -+ if (result < 0 || result > INT_MAX) { -+ result = 0; -+ nasm_error(ERR_NONFATAL, "parameter count `%s' is out of bounds [%d; %d]", -+ str, 0, INT_MAX); -+ } else if (err) { -+ nasm_error(ERR_NONFATAL, "unable to parse parameter count `%s'", str); -+ } -+ return result; -+} -+ - /* - * Count and mark off the parameters in a multi-line macro call. - * This is called both from within the multi-line macro expansion -@@ -1871,11 +1888,7 @@ static bool if_condition(Token * tline, enum preproc_token ct) - pp_directives[ct]); - } else { - searching.nparam_min = searching.nparam_max = -- readnum(tline->text, &j); -- if (j) -- nasm_error(ERR_NONFATAL, -- "unable to parse parameter count `%s'", -- tline->text); -+ read_param_count(tline->text); - } - if (tline && tok_is_(tline->next, "-")) { - tline = tline->next->next; -@@ -1886,11 +1899,7 @@ static bool if_condition(Token * tline, enum preproc_token ct) - "`%s' expects a parameter count after `-'", - pp_directives[ct]); - else { -- searching.nparam_max = readnum(tline->text, &j); -- if (j) -- nasm_error(ERR_NONFATAL, -- "unable to parse parameter count `%s'", -- tline->text); -+ searching.nparam_max = read_param_count(tline->text); - if (searching.nparam_min > searching.nparam_max) { - nasm_error(ERR_NONFATAL, - "minimum parameter count exceeds maximum"); -@@ -2079,8 +2088,6 @@ static void undef_smacro(Context *ctx, const char *mname) - */ - static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive) - { -- bool err; -- - tline = tline->next; - skip_white_(tline); - tline = expand_id(tline); -@@ -2103,11 +2110,7 @@ static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive) - if (!tok_type_(tline, TOK_NUMBER)) { - nasm_error(ERR_NONFATAL, "`%s' expects a parameter count", directive); - } else { -- def->nparam_min = def->nparam_max = -- readnum(tline->text, &err); -- if (err) -- nasm_error(ERR_NONFATAL, -- "unable to parse parameter count `%s'", tline->text); -+ def->nparam_min = def->nparam_max = read_param_count(tline->text); - } - if (tline && tok_is_(tline->next, "-")) { - tline = tline->next->next; -@@ -2117,11 +2120,7 @@ static bool parse_mmacro_spec(Token *tline, MMacro *def, const char *directive) - nasm_error(ERR_NONFATAL, - "`%s' expects a parameter count after `-'", directive); - } else { -- def->nparam_max = readnum(tline->text, &err); -- if (err) { -- nasm_error(ERR_NONFATAL, "unable to parse parameter count `%s'", -- tline->text); -- } -+ def->nparam_max = read_param_count(tline->text); - if (def->nparam_min > def->nparam_max) { - nasm_error(ERR_NONFATAL, "minimum parameter count exceeds maximum"); - def->nparam_max = def->nparam_min; --- -2.10.5.GIT - diff --git a/poky/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch b/poky/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch deleted file mode 100644 index d45d2cb46..000000000 --- a/poky/meta/recipes-devtools/nasm/nasm/CVE-2019-14248.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 93d41d82963b2cfd0b24c906f5a8daf53281b559 Mon Sep 17 00:00:00 2001 -From: "H. Peter Anvin (Intel)" -Date: Fri, 16 Aug 2019 01:12:54 -0700 -Subject: [PATCH] BR 3392576: don't segfault on a bad %pragma limit - -Don't segfault on a bad %pragma limit. Instead treat a NULL pointer as -an empty string. - -Reported-by: Ren Kimura -Signed-off-by: H. Peter Anvin (Intel) - -CVE: CVE-2019-14248 -Upstream-Status: Backport [https://repo.or.cz/nasm.git/commit/93d41d82963b2cfd0b24c906f5a8daf53281b559] -Signed-off-by: Anuj Mittal ---- - asm/nasm.c | 7 ++++++- - 1 file changed, 6 insertions(+), 1 deletion(-) - -diff --git a/asm/nasm.c b/asm/nasm.c -index c84d675..65116ab 100644 ---- a/asm/nasm.c -+++ b/asm/nasm.c -@@ -212,6 +212,11 @@ nasm_set_limit(const char *limit, const char *valstr) - bool rn_error; - int errlevel; - -+ if (!limit) -+ limit = ""; -+ if (!valstr) -+ valstr = ""; -+ - for (i = 0; i <= LIMIT_MAX; i++) { - if (!nasm_stricmp(limit, limit_info[i].name)) - break; -@@ -204,7 +209,7 @@ nasm_set_limit(const char *limit, const char *valstr) - errlevel = ERR_WARNING|ERR_NOFILE|ERR_USAGE; - else - errlevel = ERR_WARNING|ERR_PASS1|WARN_UNKNOWN_PRAGMA; -- nasm_error(errlevel, "unknown limit: `%s'", limit); -+ nasm_error(errlevel, "invalid limit value: `%s'", valstr); - return DIRR_ERROR; - } - diff --git a/poky/meta/recipes-devtools/nasm/nasm_2.14.02.bb b/poky/meta/recipes-devtools/nasm/nasm_2.14.02.bb deleted file mode 100644 index c40be8fbb..000000000 --- a/poky/meta/recipes-devtools/nasm/nasm_2.14.02.bb +++ /dev/null @@ -1,26 +0,0 @@ -SUMMARY = "General-purpose x86 assembler" -SECTION = "devel" -LICENSE = "BSD-2-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe" - -SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \ - file://CVE-2018-19755.patch \ - file://CVE-2019-14248.patch \ - file://0001-stdlib-Add-strlcat.patch \ - file://0002-Add-debug-prefix-map-option.patch \ - file://0001-fix-pa_add_headers.m4-for-development-versions-of-au.patch \ - " - -SRC_URI[md5sum] = "3f489aa48ad2aa1f967dc5e293bbd06f" -SRC_URI[sha256sum] = "34fd26c70a277a9fdd54cb5ecf389badedaf48047b269d1008fbc819b24e80bc" - -# brokensep since this uses autoconf but not automake -inherit autotools-brokensep - -EXTRA_AUTORECONF += "--exclude=aclocal" - -BBCLASSEXTEND = "native" - -DEPENDS = "groff-native" - -CVE_PRODUCT = "netwide_assembler" diff --git a/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb b/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb new file mode 100644 index 000000000..5c4e28de0 --- /dev/null +++ b/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb @@ -0,0 +1,21 @@ +SUMMARY = "General-purpose x86 assembler" +SECTION = "devel" +LICENSE = "BSD-2-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe" + +SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \ + file://0001-stdlib-Add-strlcat.patch \ + file://0002-Add-debug-prefix-map-option.patch \ + " + +SRC_URI[sha256sum] = "04e7343d9bf112bffa9fda86f6c7c8b120c2ccd700b882e2db9f57484b1bd778" + +EXTRA_AUTORECONF_append = " -I autoconf/m4" + +inherit autotools + +BBCLASSEXTEND = "native" + +DEPENDS = "groff-native" + +CVE_PRODUCT = "netwide_assembler" diff --git a/poky/meta/recipes-devtools/perl/files/perl-cross-makefile.patch b/poky/meta/recipes-devtools/perl/files/perl-cross-makefile.patch new file mode 100644 index 000000000..5d3f99820 --- /dev/null +++ b/poky/meta/recipes-devtools/perl/files/perl-cross-makefile.patch @@ -0,0 +1,29 @@ +Makefile: Avoid continual rebuilds of miniperl and associated races + +In the Yocto Project, when we run "make install" we notice miniperl +rebuilding multiple times. Usually this is harmless however sometimes +race issues occur such as miniperl not being executable. + +The issue is that crosspatch is a phony target so it always rebuilds. +Adding this as a dependency of miniperl means miniperl always rebuilds +too. + +Avoid this by injecting a direct dependency avoiding the phony target. +miniperl is then only rebuilt when its input changes as desired. + +Signed-off-by: Richard Purdie richard.purdie@linuxfoundation.org +Upstream-Status: Submitted [https://github.com/arsv/perl-cross/pull/95] + +Index: perl-5.32.0/Makefile +=================================================================== +--- perl-5.32.0.orig/Makefile ++++ perl-5.32.0/Makefile +@@ -56,7 +56,7 @@ crosspatch: $(CROSSPATCHED) + + # A minor fix for buildroot, force crosspatching when running "make perl modules" + # instead of "make all". +-miniperlmain$O: crosspatch ++miniperlmain$O: $(CROSSPATCHED) + + # Original versions are not saved anymore; patch generally takes care of this, + # and if that fails, reaching for the source tarball is the safest option. diff --git a/poky/meta/recipes-devtools/perl/perl_5.32.0.bb b/poky/meta/recipes-devtools/perl/perl_5.32.0.bb index 1adfd90e7..bba8263b9 100644 --- a/poky/meta/recipes-devtools/perl/perl_5.32.0.bb +++ b/poky/meta/recipes-devtools/perl/perl_5.32.0.bb @@ -20,6 +20,7 @@ SRC_URI = "https://www.cpan.org/src/5.0/perl-${PV}.tar.gz;name=perl \ file://0001-configure_path.sh-do-not-hardcode-prefix-lib-as-libr.patch \ file://0002-Constant-Fix-up-shebang.patch \ file://determinism.patch \ + file://perl-cross-makefile.patch \ " SRC_URI_append_class-native = " \ file://perl-configpm-switch.patch \ @@ -138,6 +139,11 @@ do_install() { # Fix up shared library rm ${D}/${libdir}/perl5/${PV}/*/CORE/libperl.so ln -sf ../../../../libperl.so.${PERL_LIB_VER} $(echo ${D}/${libdir}/perl5/${PV}/*/CORE)/libperl.so + + # Try to catch Bug #13946 + if [ -e ${D}/${libdir}/perl5/${PV}/Storable.pm ]; then + bbfatal 'non-arch specific Storable.pm found! See https://bugzilla.yoctoproject.org/show_bug.cgi?id=13946' + fi } do_install_append_class-target() { diff --git a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch index 201e3570d..86c9363d6 100644 --- a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch +++ b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch @@ -1,4 +1,4 @@ -From 234c3da52da09b28db5b2c4d33ebe9c800c461ac Mon Sep 17 00:00:00 2001 +From 768e1f2f14c9f1b3f9bd0e017c3f6183b45616e8 Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Tue, 17 Jul 2018 10:13:38 +0800 Subject: [PATCH] conditionally do not fetch code by easy_install @@ -9,16 +9,15 @@ internet by easy_install. Upstream-Status: Inappropriate [oe specific] Signed-off-by: Hongxu Jia - --- setuptools/command/easy_install.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py -index 5a9576f..f5961cb 100644 +index bcbd4f58..6455afda 100644 --- a/setuptools/command/easy_install.py +++ b/setuptools/command/easy_install.py -@@ -656,6 +656,11 @@ class easy_install(Command): +@@ -653,6 +653,11 @@ class easy_install(Command): os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir)) def easy_install(self, spec, deps=False): @@ -27,6 +26,9 @@ index 5a9576f..f5961cb 100644 + "Please add its native recipe to DEPENDS." % spec) + return None + - if not self.editable: - self.install_site_py() - + with self._tmpdir() as tmpdir: + if not isinstance(spec, Requirement): + if URL_SCHEME(spec): +-- +2.25.1 + diff --git a/poky/meta/recipes-devtools/python/python-setuptools.inc b/poky/meta/recipes-devtools/python/python-setuptools.inc deleted file mode 100644 index ecf120575..000000000 --- a/poky/meta/recipes-devtools/python/python-setuptools.inc +++ /dev/null @@ -1,60 +0,0 @@ -SUMMARY = "Download, build, install, upgrade, and uninstall Python packages" -HOMEPAGE = "https://pypi.org/project/setuptools" -SECTION = "devel/python" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=19;md5=9a33897f1bca1160d7aad3835152e158" - -PYPI_PACKAGE_EXT = "zip" - -inherit pypi - -SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch" - -SRC_URI += "file://0001-change-shebang-to-python3.patch" - -SRC_URI[sha256sum] = "843037738d1e34e8b326b5e061f474aca6ef9d7ece41329afbc8aac6195a3920" - -DEPENDS += "${PYTHON_PN}" - -RDEPENDS_${PN} = "\ - ${PYTHON_PN}-2to3 \ - ${PYTHON_PN}-compile \ - ${PYTHON_PN}-compression \ - ${PYTHON_PN}-ctypes \ - ${PYTHON_PN}-distutils \ - ${PYTHON_PN}-email \ - ${PYTHON_PN}-html \ - ${PYTHON_PN}-json \ - ${PYTHON_PN}-netserver \ - ${PYTHON_PN}-numbers \ - ${PYTHON_PN}-pickle \ - ${PYTHON_PN}-pkgutil \ - ${PYTHON_PN}-plistlib \ - ${PYTHON_PN}-shell \ - ${PYTHON_PN}-stringold \ - ${PYTHON_PN}-threading \ - ${PYTHON_PN}-unittest \ - ${PYTHON_PN}-xml \ -" -do_install_prepend() { - install -d ${D}${PYTHON_SITEPACKAGES_DIR} -} - -BBCLASSEXTEND = "native nativesdk" - -# The pkg-resources module can be used by itself, without the package downloader -# and easy_install. Ship it in a separate package so that it can be used by -# minimal distributions. -PACKAGES =+ "${PYTHON_PN}-pkg-resources " -FILES_${PYTHON_PN}-pkg-resources = "${PYTHON_SITEPACKAGES_DIR}/pkg_resources/*" -RDEPENDS_${PYTHON_PN}-pkg-resources = "\ - ${PYTHON_PN}-compression \ - ${PYTHON_PN}-email \ - ${PYTHON_PN}-plistlib \ - ${PYTHON_PN}-pprint \ -" -# Due to the way OE-Core implemented native recipes, the native class cannot -# have a dependency on something that is not a recipe name. Work around that by -# manually setting RPROVIDES. -RDEPENDS_${PN}_append = " ${PYTHON_PN}-pkg-resources" -RPROVIDES_append_class-native = " ${PYTHON_PN}-pkg-resources-native" diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb b/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb deleted file mode 100644 index 0dc1ed862..000000000 --- a/poky/meta/recipes-devtools/python/python3-setuptools_47.3.1.bb +++ /dev/null @@ -1,6 +0,0 @@ -require python-setuptools.inc -inherit setuptools3 - -do_install_append() { - mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install -} diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb b/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb new file mode 100644 index 000000000..1c500e468 --- /dev/null +++ b/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb @@ -0,0 +1,65 @@ +SUMMARY = "Download, build, install, upgrade, and uninstall Python packages" +HOMEPAGE = "https://pypi.org/project/setuptools" +SECTION = "devel/python" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=19;md5=9a33897f1bca1160d7aad3835152e158" + +PYPI_PACKAGE_EXT = "zip" + +inherit pypi setuptools3 + +SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch" + +SRC_URI += "file://0001-change-shebang-to-python3.patch" + +SRC_URI[sha256sum] = "1c7b51fba5d83160d540d18b2bf08fd546357488adf9ddbca08cc1e997bd5c18" + +DEPENDS += "${PYTHON_PN}" + +RDEPENDS_${PN} = "\ + ${PYTHON_PN}-2to3 \ + ${PYTHON_PN}-compile \ + ${PYTHON_PN}-compression \ + ${PYTHON_PN}-ctypes \ + ${PYTHON_PN}-distutils \ + ${PYTHON_PN}-email \ + ${PYTHON_PN}-html \ + ${PYTHON_PN}-json \ + ${PYTHON_PN}-netserver \ + ${PYTHON_PN}-numbers \ + ${PYTHON_PN}-pickle \ + ${PYTHON_PN}-pkgutil \ + ${PYTHON_PN}-plistlib \ + ${PYTHON_PN}-shell \ + ${PYTHON_PN}-stringold \ + ${PYTHON_PN}-threading \ + ${PYTHON_PN}-unittest \ + ${PYTHON_PN}-xml \ +" + +do_install_prepend() { + install -d ${D}${PYTHON_SITEPACKAGES_DIR} +} + +do_install_append() { + mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install +} + +BBCLASSEXTEND = "native nativesdk" + +# The pkg-resources module can be used by itself, without the package downloader +# and easy_install. Ship it in a separate package so that it can be used by +# minimal distributions. +PACKAGES =+ "${PYTHON_PN}-pkg-resources " +FILES_${PYTHON_PN}-pkg-resources = "${PYTHON_SITEPACKAGES_DIR}/pkg_resources/*" +RDEPENDS_${PYTHON_PN}-pkg-resources = "\ + ${PYTHON_PN}-compression \ + ${PYTHON_PN}-email \ + ${PYTHON_PN}-plistlib \ + ${PYTHON_PN}-pprint \ +" +# Due to the way OE-Core implemented native recipes, the native class cannot +# have a dependency on something that is not a recipe name. Work around that by +# manually setting RPROVIDES. +RDEPENDS_${PN}_append = " ${PYTHON_PN}-pkg-resources" +RPROVIDES_append_class-native = " ${PYTHON_PN}-pkg-resources-native" diff --git a/poky/meta/recipes-devtools/python/python3/0001-configure.ac-define-a-path-for-profile-data.patch b/poky/meta/recipes-devtools/python/python3/0001-configure.ac-define-a-path-for-profile-data.patch deleted file mode 100644 index ee3d64552..000000000 --- a/poky/meta/recipes-devtools/python/python3/0001-configure.ac-define-a-path-for-profile-data.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 5e94e315119117898ce1a2798641848f61d796b1 Mon Sep 17 00:00:00 2001 -From: Mingli Yu -Date: Thu, 9 Jul 2020 07:58:44 +0000 -Subject: [PATCH] configure.ac: define a path for profile data - -There comes below error when use ccache 3.7.10 to compile python3 -and check [1] for more details. - | Python-3.8.3/Modules/_contextvarsmodule.c:43:1: error: source locations for function 'PyInit__contextvars' have changed, the profile data may be out of date [-Werror=coverage-mismatch] - -That's because the logic for profile directory changes a little in -[2] after ccache upgrades to 3.7.10. - -So define a profile directory path accordingly to fix the above error. - -[1] https://github.com/ccache/ccache/issues/615 -[2] https://github.com/ccache/ccache/commit/91a2954eb47b4a106e2be6cf611917b895108e35 - -Upstream-Status: Submitted [https://github.com/python/cpython/pull/21408] - -Signed-off-by: Mingli Yu ---- - configure.ac | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/configure.ac b/configure.ac -index 08fe397..bb15bda 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -1332,8 +1332,8 @@ case $cc_basename in - fi - ;; - *) -- PGO_PROF_GEN_FLAG="-fprofile-generate" -- PGO_PROF_USE_FLAG="-fprofile-use -fprofile-correction" -+ PGO_PROF_GEN_FLAG="-fprofile-generate=$(pwd)" -+ PGO_PROF_USE_FLAG="-fprofile-use=$(pwd) -fprofile-correction" - LLVM_PROF_MERGER="true" - LLVM_PROF_FILE="" - ;; --- -2.26.2 - diff --git a/poky/meta/recipes-devtools/python/python3/get_module_deps3.py b/poky/meta/recipes-devtools/python/python3/get_module_deps3.py index fd12baad8..6806f2317 100644 --- a/poky/meta/recipes-devtools/python/python3/get_module_deps3.py +++ b/poky/meta/recipes-devtools/python/python3/get_module_deps3.py @@ -9,6 +9,7 @@ debug=False import sys +import os # We can get a list of the modules which are currently required to run python # so we run python-core and get its modules, we then import what we need @@ -48,8 +49,19 @@ current_module = str(sys.argv[1]).rstrip() if(debug==True): log = open('log_%s' % current_module,'w') log.write('Module %s generated the following dependencies:\n' % current_module) -try: - importlib.import_module('%s' % current_module) +try: + m = importlib.import_module(current_module) + # handle python packages which may not include all modules in the __init__ + if os.path.basename(m.__file__) == "__init__.py": + modulepath = os.path.dirname(m.__file__) + for i in os.listdir(modulepath): + if i.startswith("_") or not(i.endswith(".py")): + continue + submodule = "{}.{}".format(current_module, i[:-3]) + try: + importlib.import_module(submodule) + except: + pass # ignore all import or other exceptions raised during import except ImportError as e: if (debug==True): log.write('Module was not found') @@ -107,6 +119,8 @@ for item in dif: dep_path = dep_path.replace(soabi,'*') print (dep_path) continue + if "_sysconfigdata" in dep_path: + dep_path = dep_path.replace(sysconfig._get_sysconfigdata_name(), "_sysconfigdata*") if (debug==True): log.write(dep_path+'\n') @@ -140,6 +154,8 @@ for item in dif: log.write(cached) cached = fix_path(cached) cached = cached.replace(cpython_tag,'*') + if "_sysconfigdata" in cached: + cached = cached.replace(sysconfig._get_sysconfigdata_name(), "_sysconfigdata*") print (cached) if debug==True: diff --git a/poky/meta/recipes-devtools/python/python3/python3-manifest.json b/poky/meta/recipes-devtools/python/python3/python3-manifest.json index 3bcc9b866..69aecb700 100644 --- a/poky/meta/recipes-devtools/python/python3/python3-manifest.json +++ b/poky/meta/recipes-devtools/python/python3/python3-manifest.json @@ -285,7 +285,7 @@ "${libdir}/python${PYTHON_MAJMIN}/operator.py", "${libdir}/python${PYTHON_MAJMIN}/optparse.py", "${libdir}/python${PYTHON_MAJMIN}/os.py", - "${libdir}/python${PYTHON_MAJMIN}/pathlib.py", + "${libdir}/python${PYTHON_MAJMIN}/pathlib.py", "${libdir}/python${PYTHON_MAJMIN}/pkgutil.py", "${libdir}/python${PYTHON_MAJMIN}/platform.py", "${libdir}/python${PYTHON_MAJMIN}/posixpath.py", @@ -313,6 +313,8 @@ "${libdir}/python${PYTHON_MAJMIN}/tokenize.py", "${libdir}/python${PYTHON_MAJMIN}/traceback.py", "${libdir}/python${PYTHON_MAJMIN}/types.py", + "${libdir}/python${PYTHON_MAJMIN}/urllib", + "${libdir}/python${PYTHON_MAJMIN}/urllib/parse.py", "${libdir}/python${PYTHON_MAJMIN}/warnings.py", "${libdir}/python${PYTHON_MAJMIN}/weakref.py", "${prefix}/lib/python${PYTHON_MAJMIN}/config*/*[!.a]" @@ -324,7 +326,7 @@ "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_compression.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_markupbase.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_sitebuiltins.*.pyc", - "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_sysconfigdata.*.pyc", + "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_sysconfigdata*.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/_weakrefset.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/abc.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/argparse.*.pyc", @@ -359,7 +361,7 @@ "${libdir}/python${PYTHON_MAJMIN}/__pycache__/operator.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/optparse.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/os.*.pyc", - "${libdir}/python${PYTHON_MAJMIN}/__pycache__/pathlib.*.pyc", + "${libdir}/python${PYTHON_MAJMIN}/__pycache__/pathlib.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/pkgutil.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/platform.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/posixpath.*.pyc", @@ -397,7 +399,9 @@ "${libdir}/python${PYTHON_MAJMIN}/importlib/__pycache__", "${libdir}/python${PYTHON_MAJMIN}/importlib/__pycache__/abc.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/importlib/__pycache__/machinery.*.pyc", - "${libdir}/python${PYTHON_MAJMIN}/importlib/__pycache__/util.*.pyc" + "${libdir}/python${PYTHON_MAJMIN}/importlib/__pycache__/util.*.pyc", + "${libdir}/python${PYTHON_MAJMIN}/urllib/__pycache__", + "${libdir}/python${PYTHON_MAJMIN}/urllib/__pycache__/parse.*.pyc" ] }, "crypt": { @@ -427,7 +431,10 @@ "ctypes": { "summary": "Python C types support", "rdepends": [ - "core" + "core", + "crypt", + "io", + "math" ], "files": [ "${libdir}/python${PYTHON_MAJMIN}/ctypes", @@ -537,7 +544,10 @@ "distutils": { "summary": "Python Distribution Utilities", "rdepends": [ - "core" + "compression", + "core", + "email", + "stringold" ], "files": [ "${libdir}/python${PYTHON_MAJMIN}/distutils" @@ -548,7 +558,6 @@ "summary": "Python framework for running examples in docstrings", "rdepends": [ "asyncio", - "compression", "core", "debugger", "difflib", @@ -577,7 +586,9 @@ "datetime", "io", "math", - "netclient" + "mime", + "netclient", + "stringold" ], "files": [ "${libdir}/python${PYTHON_MAJMIN}/email", @@ -648,7 +659,6 @@ "io": { "summary": "Python low-level I/O", "rdepends": [ - "compression", "core", "crypt", "math", @@ -690,7 +700,11 @@ "summary": "Python logging support", "rdepends": [ "core", - "stringold" + "io", + "netserver", + "pickle", + "stringold", + "threading" ], "files": [ "${libdir}/python${PYTHON_MAJMIN}/logging" @@ -824,11 +838,18 @@ "summary": "Python multiprocessing support", "rdepends": [ "core", + "crypt", + "ctypes", "io", - "pickle" + "math", + "mmap", + "netclient", + "pickle", + "threading" ], "files": [ "${libdir}/python${PYTHON_MAJMIN}/lib-dynload/_multiprocessing.*.so", + "${libdir}/python${PYTHON_MAJMIN}/lib-dynload/_posixshmem.*.so", "${libdir}/python${PYTHON_MAJMIN}/multiprocessing" ], "cached": [] @@ -855,10 +876,9 @@ "${libdir}/python${PYTHON_MAJMIN}/mimetypes.py", "${libdir}/python${PYTHON_MAJMIN}/nntplib.py", "${libdir}/python${PYTHON_MAJMIN}/poplib.py", + "${libdir}/python${PYTHON_MAJMIN}/secrets.py", "${libdir}/python${PYTHON_MAJMIN}/smtplib.py", "${libdir}/python${PYTHON_MAJMIN}/telnetlib.py", - "${libdir}/python${PYTHON_MAJMIN}/urllib", - "${libdir}/python${PYTHON_MAJMIN}/urllib/__pycache__", "${libdir}/python${PYTHON_MAJMIN}/uuid.py" ], "cached": [ @@ -868,6 +888,7 @@ "${libdir}/python${PYTHON_MAJMIN}/__pycache__/mimetypes.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/nntplib.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/poplib.*.pyc", + "${libdir}/python${PYTHON_MAJMIN}/__pycache__/secrets.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/smtplib.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/telnetlib.*.pyc", "${libdir}/python${PYTHON_MAJMIN}/__pycache__/uuid.*.pyc" @@ -876,7 +897,6 @@ "netserver": { "summary": "Python Internet Protocol servers", "rdepends": [ - "compression", "core", "crypt", "datetime", @@ -992,8 +1012,7 @@ "pydoc": { "summary": "Python interactive help support", "rdepends": [ - "core", - "netclient" + "core" ], "files": [ "${bindir}/pydoc*", @@ -1017,7 +1036,6 @@ "shell": { "summary": "Python shell-like functionality", "rdepends": [ - "compression", "core", "stringold" ], @@ -1150,7 +1168,6 @@ "summary": "Python unit testing framework", "rdepends": [ "asyncio", - "compression", "core", "difflib", "io", @@ -1185,7 +1202,6 @@ "venv": { "summary": "Provides support for creating lightweight virtual environments with their own site directories, optionally isolated from system site directories.", "rdepends": [ - "compression", "core", "logging", "stringold" @@ -1211,7 +1227,21 @@ "xmlrpc": { "summary": "Python XML-RPC support", "rdepends": [ + "compression", "core", + "crypt", + "datetime", + "email", + "fcntl", + "html", + "io", + "math", + "mime", + "netclient", + "netserver", + "numbers", + "pydoc", + "stringold", "xml" ], "files": [ diff --git a/poky/meta/recipes-devtools/python/python3_3.8.3.bb b/poky/meta/recipes-devtools/python/python3_3.8.3.bb deleted file mode 100644 index 7e0f35ce4..000000000 --- a/poky/meta/recipes-devtools/python/python3_3.8.3.bb +++ /dev/null @@ -1,362 +0,0 @@ -SUMMARY = "The Python Programming Language" -HOMEPAGE = "http://www.python.org" -LICENSE = "PSFv2" -SECTION = "devel/python" - -LIC_FILES_CHKSUM = "file://LICENSE;md5=203a6dbc802ee896020a47161e759642" - -SRC_URI = "http://www.python.org/ftp/python/${PV}/Python-${PV}.tar.xz \ - file://run-ptest \ - file://create_manifest3.py \ - file://get_module_deps3.py \ - file://python3-manifest.json \ - file://check_build_completeness.py \ - file://cgi_py.patch \ - file://0001-Do-not-add-usr-lib-termcap-to-linker-flags-to-avoid-.patch \ - ${@bb.utils.contains('PACKAGECONFIG', 'tk', '', 'file://avoid_warning_about_tkinter.patch', d)} \ - file://0001-Do-not-use-the-shell-version-of-python-config-that-w.patch \ - file://python-config.patch \ - file://0001-Makefile.pre-use-qemu-wrapper-when-gathering-profile.patch \ - file://0001-Do-not-hardcode-lib-as-location-for-site-packages-an.patch \ - file://0001-python3-use-cc_basename-to-replace-CC-for-checking-c.patch \ - file://0001-Lib-sysconfig.py-fix-another-place-where-lib-is-hard.patch \ - file://0001-Makefile-fix-Issue36464-parallel-build-race-problem.patch \ - file://0001-bpo-36852-proper-detection-of-mips-architecture-for-.patch \ - file://crosspythonpath.patch \ - file://reformat_sysconfig.py \ - file://0001-Use-FLAG_REF-always-for-interned-strings.patch \ - file://0001-test_locale.py-correct-the-test-output-format.patch \ - file://0017-setup.py-do-not-report-missing-dependencies-for-disa.patch \ - file://0001-setup.py-pass-missing-libraries-to-Extension-for-mul.patch \ - file://0001-Makefile-do-not-compile-.pyc-in-parallel.patch \ - file://0001-configure.ac-fix-LIBPL.patch \ - file://0001-python3-Do-not-hardcode-lib-for-distutils.patch \ - file://0020-configure.ac-setup.py-do-not-add-a-curses-include-pa.patch \ - file://0001-configure.ac-define-a-path-for-profile-data.patch \ - " - -SRC_URI_append_class-native = " \ - file://0001-distutils-sysconfig-append-STAGING_LIBDIR-python-sys.patch \ - file://12-distutils-prefix-is-inside-staging-area.patch \ - file://0001-Don-t-search-system-for-headers-libraries.patch \ - " - -SRC_URI[md5sum] = "3000cf50aaa413052aef82fd2122ca78" -SRC_URI[sha256sum] = "dfab5ec723c218082fe3d5d7ae17ecbdebffa9a1aea4d64aa3a2ecdd2e795864" - -# exclude pre-releases for both python 2.x and 3.x -UPSTREAM_CHECK_REGEX = "[Pp]ython-(?P\d+(\.\d+)+).tar" - -CVE_PRODUCT = "python" - -# This is not exploitable when glibc has CVE-2016-10739 fixed. -CVE_CHECK_WHITELIST += "CVE-2019-18348" - -PYTHON_MAJMIN = "3.8" - -S = "${WORKDIR}/Python-${PV}" - -BBCLASSEXTEND = "native nativesdk" - -inherit autotools pkgconfig qemu ptest multilib_header update-alternatives - -MULTILIB_SUFFIX = "${@d.getVar('base_libdir',1).split('/')[-1]}" - -ALTERNATIVE_${PN}-dev = "python3-config" -ALTERNATIVE_LINK_NAME[python3-config] = "${bindir}/python${PYTHON_MAJMIN}-config" -ALTERNATIVE_TARGET[python3-config] = "${bindir}/python${PYTHON_MAJMIN}-config-${MULTILIB_SUFFIX}" - - -DEPENDS = "bzip2-replacement-native libffi bzip2 openssl sqlite3 zlib virtual/libintl xz virtual/crypt util-linux libtirpc libnsl2" -DEPENDS_append_class-target = " python3-native" -DEPENDS_append_class-nativesdk = " python3-native" - -EXTRA_OECONF = " --without-ensurepip --enable-shared" -EXTRA_OECONF_append_class-native = " --bindir=${bindir}/${PN}" - -export CROSSPYTHONPATH="${STAGING_LIBDIR_NATIVE}/python${PYTHON_MAJMIN}/lib-dynload/" - -EXTRANATIVEPATH += "python3-native" - -CACHED_CONFIGUREVARS = " \ - ac_cv_file__dev_ptmx=yes \ - ac_cv_file__dev_ptc=no \ - ac_cv_working_tzset=yes \ -" - -def possibly_include_pgo(d): - # PGO currently causes builds to not be reproducible, so disable it for - # now. See YOCTO #13407 - if bb.utils.contains('MACHINE_FEATURES', 'qemu-usermode', True, False, d) and d.getVar('BUILD_REPRODUCIBLE_BINARIES') != '1': - return 'pgo' - - return '' - -PACKAGECONFIG_class-target ??= "readline ${@possibly_include_pgo(d)} gdbm" -PACKAGECONFIG_class-native ??= "readline gdbm" -PACKAGECONFIG_class-nativesdk ??= "readline gdbm" -PACKAGECONFIG[readline] = ",,readline" -# Use profile guided optimisation by running PyBench inside qemu-user -PACKAGECONFIG[pgo] = "--enable-optimizations,,qemu-native" -PACKAGECONFIG[tk] = ",,tk" -PACKAGECONFIG[gdbm] = ",,gdbm" - -do_configure_prepend () { - mkdir -p ${B}/Modules - cat > ${B}/Modules/Setup.local << EOF -*disabled* -${@bb.utils.contains('PACKAGECONFIG', 'gdbm', '', '_gdbm _dbm', d)} -${@bb.utils.contains('PACKAGECONFIG', 'readline', '', 'readline', d)} -EOF -} - -CPPFLAGS_append = " -I${STAGING_INCDIR}/ncursesw -I${STAGING_INCDIR}/uuid" - -EXTRA_OEMAKE = '\ - STAGING_LIBDIR=${STAGING_LIBDIR} \ - STAGING_INCDIR=${STAGING_INCDIR} \ - LIB=${baselib} \ -' - -do_compile_prepend_class-target() { - if ${@bb.utils.contains('PACKAGECONFIG', 'pgo', 'true', 'false', d)}; then - qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_TARGET}', ['${B}', '${STAGING_DIR_TARGET}/${base_libdir}'])}" - cat >pgo-wrapper < ${B}/Modules/Setup.local << EOF +*disabled* +${@bb.utils.contains('PACKAGECONFIG', 'gdbm', '', '_gdbm _dbm', d)} +${@bb.utils.contains('PACKAGECONFIG', 'readline', '', 'readline', d)} +EOF +} + +CPPFLAGS_append = " -I${STAGING_INCDIR}/ncursesw -I${STAGING_INCDIR}/uuid" + +EXTRA_OEMAKE = '\ + STAGING_LIBDIR=${STAGING_LIBDIR} \ + STAGING_INCDIR=${STAGING_INCDIR} \ + LIB=${baselib} \ +' + +do_compile_prepend_class-target() { + if ${@bb.utils.contains('PACKAGECONFIG', 'pgo', 'true', 'false', d)}; then + qemu_binary="${@qemu_wrapper_cmdline(d, '${STAGING_DIR_TARGET}', ['${B}', '${STAGING_DIR_TARGET}/${base_libdir}'])}" + cat >pgo-wrapper < configure | 4 ++++ 1 file changed, 4 insertions(+) -diff --git a/configure b/configure -index 36646e7b..48912a94 100755 ---- a/configure -+++ b/configure -@@ -1601,6 +1601,10 @@ for opt do +Index: qemu-5.1.0/configure +=================================================================== +--- qemu-5.1.0.orig/configure ++++ qemu-5.1.0/configure +@@ -1640,6 +1640,10 @@ for opt do ;; - --gdb=*) gdb_bin="$optarg" + --disable-libdaxctl) libdaxctl=no ;; + --enable-libudev) libudev="yes" + ;; @@ -27,6 +27,3 @@ index 36646e7b..48912a94 100755 *) echo "ERROR: unknown option $opt" echo "Try '$0 --help' for more information" --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Add-missing-wacom-HID-descriptor.patch b/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Add-missing-wacom-HID-descriptor.patch index ae89ae09d..46c9da08a 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Add-missing-wacom-HID-descriptor.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Add-missing-wacom-HID-descriptor.patch @@ -20,11 +20,11 @@ Signed-off-by: Sakib Sajal hw/usb/dev-wacom.c | 94 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 93 insertions(+), 1 deletion(-) -diff --git a/hw/usb/dev-wacom.c b/hw/usb/dev-wacom.c -index 8ed57b3b..1502928b 100644 ---- a/hw/usb/dev-wacom.c -+++ b/hw/usb/dev-wacom.c -@@ -74,6 +74,89 @@ static const USBDescStrings desc_strings = { +Index: qemu-5.1.0/hw/usb/dev-wacom.c +=================================================================== +--- qemu-5.1.0.orig/hw/usb/dev-wacom.c ++++ qemu-5.1.0/hw/usb/dev-wacom.c +@@ -74,6 +74,89 @@ static const USBDescStrings desc_strings [STR_SERIALNUMBER] = "1", }; @@ -114,7 +114,7 @@ index 8ed57b3b..1502928b 100644 static const USBDescIface desc_iface_wacom = { .bInterfaceNumber = 0, .bNumEndpoints = 1, -@@ -91,7 +174,7 @@ static const USBDescIface desc_iface_wacom = { +@@ -91,7 +174,7 @@ static const USBDescIface desc_iface_wac 0x00, /* u8 country_code */ 0x01, /* u8 num_descriptors */ 0x22, /* u8 type: Report */ @@ -123,7 +123,7 @@ index 8ed57b3b..1502928b 100644 }, }, }, -@@ -271,6 +354,15 @@ static void usb_wacom_handle_control(USBDevice *dev, USBPacket *p, +@@ -271,6 +354,15 @@ static void usb_wacom_handle_control(USB } switch (request) { @@ -139,6 +139,3 @@ index 8ed57b3b..1502928b 100644 case WACOM_SET_REPORT: if (s->mouse_grabbed) { qemu_remove_mouse_event_handler(s->eh_entry); --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Do-not-include-file-if-not-exists.patch b/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Do-not-include-file-if-not-exists.patch index 6e38d814c..678e05946 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Do-not-include-file-if-not-exists.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0001-qemu-Do-not-include-file-if-not-exists.patch @@ -15,10 +15,10 @@ Signed-off-by: Sakib Sajal linux-user/syscall.c | 2 ++ 1 file changed, 2 insertions(+) -diff --git a/linux-user/syscall.c b/linux-user/syscall.c -index d6f8cc97..a61420e7 100644 ---- a/linux-user/syscall.c -+++ b/linux-user/syscall.c +Index: qemu-5.1.0/linux-user/syscall.c +=================================================================== +--- qemu-5.1.0.orig/linux-user/syscall.c ++++ qemu-5.1.0/linux-user/syscall.c @@ -109,7 +109,9 @@ #include #include @@ -28,7 +28,4 @@ index d6f8cc97..a61420e7 100644 +#endif #include #include - #include "linux_loop.h" --- -2.24.0 - + #ifdef HAVE_DRM_H diff --git a/poky/meta/recipes-devtools/qemu/qemu/0002-Add-subpackage-ptest-which-runs-all-unit-test-cases-.patch b/poky/meta/recipes-devtools/qemu/qemu/0002-Add-subpackage-ptest-which-runs-all-unit-test-cases-.patch index 3d268870f..f379948f1 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0002-Add-subpackage-ptest-which-runs-all-unit-test-cases-.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0002-Add-subpackage-ptest-which-runs-all-unit-test-cases-.patch @@ -16,11 +16,11 @@ Signed-off-by: Sakib Sajal tests/Makefile.include | 8 ++++++++ 1 file changed, 8 insertions(+) -diff --git a/tests/Makefile.include b/tests/Makefile.include -index 51de6762..1ea4d322 100644 ---- a/tests/Makefile.include -+++ b/tests/Makefile.include -@@ -941,4 +941,12 @@ all: $(QEMU_IOTESTS_HELPERS-y) +Index: qemu-5.1.0/tests/Makefile.include +=================================================================== +--- qemu-5.1.0.orig/tests/Makefile.include ++++ qemu-5.1.0/tests/Makefile.include +@@ -982,4 +982,12 @@ all: $(QEMU_IOTESTS_HELPERS-y) -include $(wildcard tests/qtest/*.d) -include $(wildcard tests/qtest/libqos/*.d) @@ -33,6 +33,3 @@ index 51de6762..1ea4d322 100644 + done + endif --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/0003-qemu-Add-addition-environment-space-to-boot-loader-q.patch b/poky/meta/recipes-devtools/qemu/qemu/0003-qemu-Add-addition-environment-space-to-boot-loader-q.patch index 012d60d8f..33cef4221 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0003-qemu-Add-addition-environment-space-to-boot-loader-q.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0003-qemu-Add-addition-environment-space-to-boot-loader-q.patch @@ -15,13 +15,13 @@ Signed-off-by: Jason Wessel Signed-off-by: Roy Li --- - hw/mips/mips_malta.c | 2 +- + hw/mips/malta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/hw/mips/mips_malta.c b/hw/mips/mips_malta.c -index 92e9ca5b..3a7f3954 100644 ---- a/hw/mips/mips_malta.c -+++ b/hw/mips/mips_malta.c +Index: qemu-5.1.0/hw/mips/malta.c +=================================================================== +--- qemu-5.1.0.orig/hw/mips/malta.c ++++ qemu-5.1.0/hw/mips/malta.c @@ -59,7 +59,7 @@ #define ENVP_ADDR 0x80002000l diff --git a/poky/meta/recipes-devtools/qemu/qemu/0004-qemu-disable-Valgrind.patch b/poky/meta/recipes-devtools/qemu/qemu/0004-qemu-disable-Valgrind.patch index bc30397e8..71f537f9b 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0004-qemu-disable-Valgrind.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0004-qemu-disable-Valgrind.patch @@ -12,11 +12,11 @@ Signed-off-by: Ross Burton configure | 9 --------- 1 file changed, 9 deletions(-) -diff --git a/configure b/configure -index 6099be1d..a766017b 100755 ---- a/configure -+++ b/configure -@@ -5390,15 +5390,6 @@ fi +Index: qemu-5.1.0/configure +=================================================================== +--- qemu-5.1.0.orig/configure ++++ qemu-5.1.0/configure +@@ -5751,15 +5751,6 @@ fi # check if we have valgrind/valgrind.h valgrind_h=no diff --git a/poky/meta/recipes-devtools/qemu/qemu/0005-qemu-native-set-ld.bfd-fix-cflags-and-set-some-envir.patch b/poky/meta/recipes-devtools/qemu/qemu/0005-qemu-native-set-ld.bfd-fix-cflags-and-set-some-envir.patch index 2c5b241e4..02ebbee1a 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0005-qemu-native-set-ld.bfd-fix-cflags-and-set-some-envir.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0005-qemu-native-set-ld.bfd-fix-cflags-and-set-some-envir.patch @@ -11,11 +11,11 @@ Signed-off-by: Sakib Sajal configure | 4 ---- 1 file changed, 4 deletions(-) -diff --git a/configure b/configure -index 83c65439..6bdf488c 100755 ---- a/configure -+++ b/configure -@@ -6251,10 +6251,6 @@ write_c_skeleton +Index: qemu-5.1.0/configure +=================================================================== +--- qemu-5.1.0.orig/configure ++++ qemu-5.1.0/configure +@@ -6515,10 +6515,6 @@ write_c_skeleton if test "$gcov" = "yes" ; then QEMU_CFLAGS="-fprofile-arcs -ftest-coverage -g $QEMU_CFLAGS" QEMU_LDFLAGS="-fprofile-arcs -ftest-coverage $QEMU_LDFLAGS" @@ -26,6 +26,3 @@ index 83c65439..6bdf488c 100755 fi if test "$have_asan" = "yes"; then --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/0006-chardev-connect-socket-to-a-spawned-command.patch b/poky/meta/recipes-devtools/qemu/qemu/0006-chardev-connect-socket-to-a-spawned-command.patch index 0810ae84c..98fd5e913 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0006-chardev-connect-socket-to-a-spawned-command.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0006-chardev-connect-socket-to-a-spawned-command.patch @@ -51,11 +51,11 @@ Signed-off-by: Patrick Ohly qapi/char.json | 5 +++ 3 files changed, 109 insertions(+) -diff --git a/chardev/char-socket.c b/chardev/char-socket.c -index 185fe38d..54fa4234 100644 ---- a/chardev/char-socket.c -+++ b/chardev/char-socket.c -@@ -1288,6 +1288,67 @@ static bool qmp_chardev_validate_socket(ChardevSocket *sock, +Index: qemu-5.1.0/chardev/char-socket.c +=================================================================== +--- qemu-5.1.0.orig/chardev/char-socket.c ++++ qemu-5.1.0/chardev/char-socket.c +@@ -1292,6 +1292,67 @@ static bool qmp_chardev_validate_socket( return true; } @@ -123,7 +123,7 @@ index 185fe38d..54fa4234 100644 static void qmp_chardev_open_socket(Chardev *chr, ChardevBackend *backend, -@@ -1296,6 +1357,9 @@ static void qmp_chardev_open_socket(Chardev *chr, +@@ -1300,6 +1361,9 @@ static void qmp_chardev_open_socket(Char { SocketChardev *s = SOCKET_CHARDEV(chr); ChardevSocket *sock = backend->u.socket.data; @@ -133,7 +133,7 @@ index 185fe38d..54fa4234 100644 bool do_nodelay = sock->has_nodelay ? sock->nodelay : false; bool is_listen = sock->has_server ? sock->server : true; bool is_telnet = sock->has_telnet ? sock->telnet : false; -@@ -1361,6 +1425,14 @@ static void qmp_chardev_open_socket(Chardev *chr, +@@ -1365,6 +1429,14 @@ static void qmp_chardev_open_socket(Char update_disconnected_filename(s); @@ -148,13 +148,15 @@ index 185fe38d..54fa4234 100644 if (s->is_listen) { if (qmp_chardev_open_socket_server(chr, is_telnet || is_tn3270, is_waitconnect, errp) < 0) { -@@ -1380,9 +1452,26 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, +@@ -1384,11 +1456,27 @@ static void qemu_chr_parse_socket(QemuOp const char *host = qemu_opt_get(opts, "host"); const char *port = qemu_opt_get(opts, "port"); const char *fd = qemu_opt_get(opts, "fd"); +#ifndef _WIN32 + const char *cmd = qemu_opt_get(opts, "cmd"); +#endif + bool tight = qemu_opt_get_bool(opts, "tight", true); + bool abstract = qemu_opt_get_bool(opts, "abstract", false); SocketAddressLegacy *addr; ChardevSocket *sock; @@ -171,19 +173,19 @@ index 185fe38d..54fa4234 100644 + } + } else +#endif -+ if ((!!path + !!fd + !!host) != 1) { error_setg(errp, "Exactly one of 'path', 'fd' or 'host' required"); -@@ -1425,12 +1514,24 @@ static void qemu_chr_parse_socket(QemuOpts *opts, ChardevBackend *backend, +@@ -1431,12 +1519,24 @@ static void qemu_chr_parse_socket(QemuOp sock->has_tls_authz = qemu_opt_get(opts, "tls-authz"); sock->tls_authz = g_strdup(qemu_opt_get(opts, "tls-authz")); +- addr = g_new0(SocketAddressLegacy, 1); +#ifndef _WIN32 + sock->cmd = g_strdup(cmd); +#endif + - addr = g_new0(SocketAddressLegacy, 1); ++ addr = g_new0(SocketAddressLegacy, 1); +#ifndef _WIN32 + if (path || cmd) { +#else @@ -197,28 +199,28 @@ index 185fe38d..54fa4234 100644 +#else q_unix->path = g_strdup(path); +#endif + q_unix->tight = tight; + q_unix->abstract = abstract; } else if (host) { - addr->type = SOCKET_ADDRESS_LEGACY_KIND_INET; - addr->u.inet.data = g_new(InetSocketAddress, 1); -diff --git a/chardev/char.c b/chardev/char.c -index 7b6b2cb1..0c2ca64b 100644 ---- a/chardev/char.c -+++ b/chardev/char.c -@@ -837,6 +837,9 @@ QemuOptsList qemu_chardev_opts = { - },{ +Index: qemu-5.1.0/chardev/char.c +=================================================================== +--- qemu-5.1.0.orig/chardev/char.c ++++ qemu-5.1.0/chardev/char.c +@@ -826,6 +826,9 @@ QemuOptsList qemu_chardev_opts = { .name = "path", .type = QEMU_OPT_STRING, -+ },{ + },{ + .name = "cmd", + .type = QEMU_OPT_STRING, - },{ ++ },{ .name = "host", .type = QEMU_OPT_STRING, -diff --git a/qapi/char.json b/qapi/char.json -index a6e81ac7..517962c6 100644 ---- a/qapi/char.json -+++ b/qapi/char.json -@@ -247,6 +247,10 @@ + },{ +Index: qemu-5.1.0/qapi/char.json +=================================================================== +--- qemu-5.1.0.orig/qapi/char.json ++++ qemu-5.1.0/qapi/char.json +@@ -250,6 +250,10 @@ # # @addr: socket address to listen on (server=true) # or connect to (server=false) @@ -229,7 +231,7 @@ index a6e81ac7..517962c6 100644 # @tls-creds: the ID of the TLS credentials object (since 2.6) # @tls-authz: the ID of the QAuthZ authorization object against which # the client's x509 distinguished name will be validated. This -@@ -272,6 +276,7 @@ +@@ -276,6 +280,7 @@ ## { 'struct': 'ChardevSocket', 'data': { 'addr': 'SocketAddressLegacy', diff --git a/poky/meta/recipes-devtools/qemu/qemu/0007-apic-fixup-fallthrough-to-PIC.patch b/poky/meta/recipes-devtools/qemu/qemu/0007-apic-fixup-fallthrough-to-PIC.patch index 89baad9b7..034ac5782 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0007-apic-fixup-fallthrough-to-PIC.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0007-apic-fixup-fallthrough-to-PIC.patch @@ -29,11 +29,11 @@ Signed-off-by: He Zhe hw/intc/apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/hw/intc/apic.c b/hw/intc/apic.c -index 2a74f7b4..4d5da365 100644 ---- a/hw/intc/apic.c -+++ b/hw/intc/apic.c -@@ -603,7 +603,7 @@ int apic_accept_pic_intr(DeviceState *dev) +Index: qemu-5.1.0/hw/intc/apic.c +=================================================================== +--- qemu-5.1.0.orig/hw/intc/apic.c ++++ qemu-5.1.0/hw/intc/apic.c +@@ -603,7 +603,7 @@ int apic_accept_pic_intr(DeviceState *de APICCommonState *s = APIC(dev); uint32_t lvt0; diff --git a/poky/meta/recipes-devtools/qemu/qemu/0008-linux-user-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch b/poky/meta/recipes-devtools/qemu/qemu/0008-linux-user-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch index 30bb4ddf2..d20f04ee5 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0008-linux-user-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0008-linux-user-Fix-webkitgtk-hangs-on-32-bit-x86-target.patch @@ -18,11 +18,11 @@ Signed-off-by: Alistair Francis linux-user/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/linux-user/main.c b/linux-user/main.c -index 6ff7851e..ebff0485 100644 ---- a/linux-user/main.c -+++ b/linux-user/main.c -@@ -78,7 +78,7 @@ int have_guest_base; +Index: qemu-5.1.0/linux-user/main.c +=================================================================== +--- qemu-5.1.0.orig/linux-user/main.c ++++ qemu-5.1.0/linux-user/main.c +@@ -92,7 +92,7 @@ static int last_log_mask; (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) /* There are a number of places where we assign reserved_va to a variable of type abi_ulong and expect it to fit. Avoid the last page. */ diff --git a/poky/meta/recipes-devtools/qemu/qemu/0009-Fix-webkitgtk-builds.patch b/poky/meta/recipes-devtools/qemu/qemu/0009-Fix-webkitgtk-builds.patch index eef3f3f97..f2a44986b 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0009-Fix-webkitgtk-builds.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0009-Fix-webkitgtk-builds.patch @@ -28,29 +28,29 @@ Signed-off-by: Sakib Sajal linux-user/syscall.c | 5 +---- 4 files changed, 10 insertions(+), 23 deletions(-) -diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h -index 49384bb6..93b12519 100644 ---- a/include/exec/cpu-all.h -+++ b/include/exec/cpu-all.h -@@ -162,12 +162,8 @@ extern unsigned long guest_base; - extern int have_guest_base; - extern unsigned long reserved_va; - --#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS --#define GUEST_ADDR_MAX (~0ul) --#else --#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \ +Index: qemu-5.1.0/include/exec/cpu-all.h +=================================================================== +--- qemu-5.1.0.orig/include/exec/cpu-all.h ++++ qemu-5.1.0/include/exec/cpu-all.h +@@ -176,11 +176,8 @@ extern unsigned long reserved_va; + * avoid setting bits at the top of guest addresses that might need + * to be used for tags. + */ +-#define GUEST_ADDR_MAX_ \ +- ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ +- UINT32_MAX : ~0ul) +-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_) +- +#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \ - (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) --#endif ++ (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) #else #include "exec/hwaddr.h" -diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h -index 53de1975..cf19ed2e 100644 ---- a/include/exec/cpu_ldst.h -+++ b/include/exec/cpu_ldst.h -@@ -70,7 +70,10 @@ typedef uint64_t abi_ptr; +Index: qemu-5.1.0/include/exec/cpu_ldst.h +=================================================================== +--- qemu-5.1.0.orig/include/exec/cpu_ldst.h ++++ qemu-5.1.0/include/exec/cpu_ldst.h +@@ -75,7 +75,10 @@ typedef uint64_t abi_ptr; #if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS #define guest_addr_valid(x) (1) #else @@ -62,11 +62,11 @@ index 53de1975..cf19ed2e 100644 #endif #define h2g_valid(x) guest_addr_valid((unsigned long)(x) - guest_base) -diff --git a/linux-user/mmap.c b/linux-user/mmap.c -index e3780337..1d4aba95 100644 ---- a/linux-user/mmap.c -+++ b/linux-user/mmap.c -@@ -71,7 +71,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) +Index: qemu-5.1.0/linux-user/mmap.c +=================================================================== +--- qemu-5.1.0.orig/linux-user/mmap.c ++++ qemu-5.1.0/linux-user/mmap.c +@@ -71,7 +71,7 @@ int target_mprotect(abi_ulong start, abi return -TARGET_EINVAL; len = TARGET_PAGE_ALIGN(len); end = start + len; @@ -75,18 +75,18 @@ index e3780337..1d4aba95 100644 return -TARGET_ENOMEM; } prot &= PROT_READ | PROT_WRITE | PROT_EXEC; -@@ -467,8 +467,8 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, +@@ -467,8 +467,8 @@ abi_long target_mmap(abi_ulong start, ab * It can fail only on 64-bit host with 32-bit target. * On any other target/host host mmap() handles this error correctly. */ -- if (!guest_range_valid(start, len)) { +- if (end < start || !guest_range_valid(start, len)) { - errno = ENOMEM; -+ if ((unsigned long)start + len - 1 > (abi_ulong) -1) { ++ if (end < start || ((unsigned long)start + len - 1 > (abi_ulong) -1)) { + errno = EINVAL; goto fail; } -@@ -604,10 +604,8 @@ int target_munmap(abi_ulong start, abi_ulong len) +@@ -604,10 +604,8 @@ int target_munmap(abi_ulong start, abi_u if (start & ~TARGET_PAGE_MASK) return -TARGET_EINVAL; len = TARGET_PAGE_ALIGN(len); @@ -98,7 +98,7 @@ index e3780337..1d4aba95 100644 mmap_lock(); end = start + len; real_start = start & qemu_host_page_mask; -@@ -662,13 +660,6 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, +@@ -662,13 +660,6 @@ abi_long target_mremap(abi_ulong old_add int prot; void *host_addr; @@ -112,11 +112,11 @@ index e3780337..1d4aba95 100644 mmap_lock(); if (flags & MREMAP_FIXED) { -diff --git a/linux-user/syscall.c b/linux-user/syscall.c -index 05f03919..d6f8cc97 100644 ---- a/linux-user/syscall.c -+++ b/linux-user/syscall.c -@@ -4287,9 +4287,6 @@ static inline abi_ulong do_shmat(CPUArchState *cpu_env, +Index: qemu-5.1.0/linux-user/syscall.c +=================================================================== +--- qemu-5.1.0.orig/linux-user/syscall.c ++++ qemu-5.1.0/linux-user/syscall.c +@@ -4336,9 +4336,6 @@ static inline abi_ulong do_shmat(CPUArch return -TARGET_EINVAL; } } @@ -126,7 +126,7 @@ index 05f03919..d6f8cc97 100644 mmap_lock(); -@@ -7247,7 +7244,7 @@ static int open_self_maps(void *cpu_env, int fd) +@@ -7376,7 +7373,7 @@ static int open_self_maps(void *cpu_env, const char *path; max = h2g_valid(max - 1) ? @@ -135,6 +135,3 @@ index 05f03919..d6f8cc97 100644 if (page_check_range(h2g(min), max - min, flags) == -1) { continue; --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/0010-configure-Add-pkg-config-handling-for-libgcrypt.patch b/poky/meta/recipes-devtools/qemu/qemu/0010-configure-Add-pkg-config-handling-for-libgcrypt.patch index 34df78b7f..d7e3fffdd 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/0010-configure-Add-pkg-config-handling-for-libgcrypt.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/0010-configure-Add-pkg-config-handling-for-libgcrypt.patch @@ -14,11 +14,11 @@ Signed-off-by: He Zhe configure | 48 ++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 8 deletions(-) -diff --git a/configure b/configure -index 72f11aca..cac271ce 100755 ---- a/configure -+++ b/configure -@@ -2875,6 +2875,30 @@ has_libgcrypt() { +Index: qemu-5.1.0/configure +=================================================================== +--- qemu-5.1.0.orig/configure ++++ qemu-5.1.0/configure +@@ -3084,6 +3084,30 @@ has_libgcrypt() { return 0 } @@ -49,7 +49,7 @@ index 72f11aca..cac271ce 100755 if test "$nettle" != "no"; then pass="no" -@@ -2915,7 +2939,14 @@ fi +@@ -3124,7 +3148,14 @@ fi if test "$gcrypt" != "no"; then pass="no" @@ -65,7 +65,7 @@ index 72f11aca..cac271ce 100755 gcrypt_cflags=$(libgcrypt-config --cflags) gcrypt_libs=$(libgcrypt-config --libs) # Debian has removed -lgpg-error from libgcrypt-config -@@ -2925,15 +2956,16 @@ if test "$gcrypt" != "no"; then +@@ -3134,15 +3165,16 @@ if test "$gcrypt" != "no"; then then gcrypt_libs="$gcrypt_libs -lgpg-error" fi diff --git a/poky/meta/recipes-devtools/qemu/qemu/0013-cpus.c-Add-error-messages-when-qemi_cpu_kick_thread-.patch b/poky/meta/recipes-devtools/qemu/qemu/0013-cpus.c-Add-error-messages-when-qemi_cpu_kick_thread-.patch deleted file mode 100644 index e5ebfc126..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/0013-cpus.c-Add-error-messages-when-qemi_cpu_kick_thread-.patch +++ /dev/null @@ -1,74 +0,0 @@ -From 0a53e906510cce1f32bc04a11e81ea40f834dac4 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?An=C3=ADbal=20Lim=C3=B3n?= -Date: Wed, 12 Aug 2015 15:11:30 -0500 -Subject: [PATCH] cpus.c: Add error messages when qemi_cpu_kick_thread fails. -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -Add custom_debug.h with function for print backtrace information. -When pthread_kill fails in qemu_cpu_kick_thread display backtrace and -current cpu information. - -Upstream-Status: Inappropriate -Signed-off-by: Aníbal Limón - ---- - cpus.c | 5 +++++ - custom_debug.h | 24 ++++++++++++++++++++++++ - 2 files changed, 29 insertions(+) - create mode 100644 custom_debug.h - -diff --git a/cpus.c b/cpus.c -index e83f72b4..e6e2576e 100644 ---- a/cpus.c -+++ b/cpus.c -@@ -1769,6 +1769,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) - return NULL; - } - -+#include "custom_debug.h" -+ - static void qemu_cpu_kick_thread(CPUState *cpu) - { - #ifndef _WIN32 -@@ -1781,6 +1783,9 @@ static void qemu_cpu_kick_thread(CPUState *cpu) - err = pthread_kill(cpu->thread->thread, SIG_IPI); - if (err && err != ESRCH) { - fprintf(stderr, "qemu:%s: %s", __func__, strerror(err)); -+ fprintf(stderr, "CPU #%d:\n", cpu->cpu_index); -+ cpu_dump_state(cpu, stderr, 0); -+ backtrace_print(); - exit(1); - } - #else /* _WIN32 */ -diff --git a/custom_debug.h b/custom_debug.h -new file mode 100644 -index 00000000..f029e455 ---- /dev/null -+++ b/custom_debug.h -@@ -0,0 +1,24 @@ -+#include -+#include -+#define BACKTRACE_MAX 128 -+static void backtrace_print(void) -+{ -+ int nfuncs = 0; -+ void *buf[BACKTRACE_MAX]; -+ char **symbols; -+ int i; -+ -+ nfuncs = backtrace(buf, BACKTRACE_MAX); -+ -+ symbols = backtrace_symbols(buf, nfuncs); -+ if (symbols == NULL) { -+ fprintf(stderr, "backtrace_print failed to get symbols"); -+ return; -+ } -+ -+ fprintf(stderr, "Backtrace ...\n"); -+ for (i = 0; i < nfuncs; i++) -+ fprintf(stderr, "%s\n", symbols[i]); -+ -+ free(symbols); -+} diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch deleted file mode 100644 index 19f26ae5b..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-10761.patch +++ /dev/null @@ -1,151 +0,0 @@ -From 5c4fe018c025740fef4a0a4421e8162db0c3eefd Mon Sep 17 00:00:00 2001 -From: Eric Blake -Date: Mon, 8 Jun 2020 13:26:37 -0500 -Subject: [PATCH] nbd/server: Avoid long error message assertions - CVE-2020-10761 - -Ever since commit 36683283 (v2.8), the server code asserts that error -strings sent to the client are well-formed per the protocol by not -exceeding the maximum string length of 4096. At the time the server -first started sending error messages, the assertion could not be -triggered, because messages were completely under our control. -However, over the years, we have added latent scenarios where a client -could trigger the server to attempt an error message that would -include the client's information if it passed other checks first: - -- requesting NBD_OPT_INFO/GO on an export name that is not present - (commit 0cfae925 in v2.12 echoes the name) - -- requesting NBD_OPT_LIST/SET_META_CONTEXT on an export name that is - not present (commit e7b1948d in v2.12 echoes the name) - -At the time, those were still safe because we flagged names larger -than 256 bytes with a different message; but that changed in commit -93676c88 (v4.2) when we raised the name limit to 4096 to match the NBD -string limit. (That commit also failed to change the magic number -4096 in nbd_negotiate_send_rep_err to the just-introduced named -constant.) So with that commit, long client names appended to server -text can now trigger the assertion, and thus be used as a denial of -service attack against a server. As a mitigating factor, if the -server requires TLS, the client cannot trigger the problematic paths -unless it first supplies TLS credentials, and such trusted clients are -less likely to try to intentionally crash the server. - -We may later want to further sanitize the user-supplied strings we -place into our error messages, such as scrubbing out control -characters, but that is less important to the CVE fix, so it can be a -later patch to the new nbd_sanitize_name. - -Consideration was given to changing the assertion in -nbd_negotiate_send_rep_verr to instead merely log a server error and -truncate the message, to avoid leaving a latent path that could -trigger a future CVE DoS on any new error message. However, this -merely complicates the code for something that is already (correctly) -flagging coding errors, and now that we are aware of the long message -pitfall, we are less likely to introduce such errors in the future, -which would make such error handling dead code. - -Reported-by: Xueqiang Wei -CC: qemu-stable@nongnu.org -Fixes: https://bugzilla.redhat.com/1843684 CVE-2020-10761 -Fixes: 93676c88d7 -Signed-off-by: Eric Blake -Message-Id: <20200610163741.3745251-2-eblake@redhat.com> -Reviewed-by: Vladimir Sementsov-Ogievskiy - -Upstream-Status: Backport [https://github.com/qemu/qemu/commit/5c4fe018c025740fef4a0a4421e8162db0c3eefd] -CVE: CVE-2020-10761 -Signed-off-by: Chee Yang Lee - ---- - nbd/server.c | 23 ++++++++++++++++++++--- - tests/qemu-iotests/143 | 4 ++++ - tests/qemu-iotests/143.out | 2 ++ - 3 files changed, 26 insertions(+), 3 deletions(-) - -diff --git a/nbd/server.c b/nbd/server.c -index 02b1ed08014..20754e9ebc3 100644 ---- a/nbd/server.c -+++ b/nbd/server.c -@@ -217,7 +217,7 @@ nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, - - msg = g_strdup_vprintf(fmt, va); - len = strlen(msg); -- assert(len < 4096); -+ assert(len < NBD_MAX_STRING_SIZE); - trace_nbd_negotiate_send_rep_err(msg); - ret = nbd_negotiate_send_rep_len(client, type, len, errp); - if (ret < 0) { -@@ -231,6 +231,19 @@ nbd_negotiate_send_rep_verr(NBDClient *client, uint32_t type, - return 0; - } - -+/* -+ * Return a malloc'd copy of @name suitable for use in an error reply. -+ */ -+static char * -+nbd_sanitize_name(const char *name) -+{ -+ if (strnlen(name, 80) < 80) { -+ return g_strdup(name); -+ } -+ /* XXX Should we also try to sanitize any control characters? */ -+ return g_strdup_printf("%.80s...", name); -+} -+ - /* Send an error reply. - * Return -errno on error, 0 on success. */ - static int GCC_FMT_ATTR(4, 5) -@@ -595,9 +608,11 @@ static int nbd_negotiate_handle_info(NBDClient *client, Error **errp) - - exp = nbd_export_find(name); - if (!exp) { -+ g_autofree char *sane_name = nbd_sanitize_name(name); -+ - return nbd_negotiate_send_rep_err(client, NBD_REP_ERR_UNKNOWN, - errp, "export '%s' not present", -- name); -+ sane_name); - } - - /* Don't bother sending NBD_INFO_NAME unless client requested it */ -@@ -995,8 +1010,10 @@ static int nbd_negotiate_meta_queries(NBDClient *client, - - meta->exp = nbd_export_find(export_name); - if (meta->exp == NULL) { -+ g_autofree char *sane_name = nbd_sanitize_name(export_name); -+ - return nbd_opt_drop(client, NBD_REP_ERR_UNKNOWN, errp, -- "export '%s' not present", export_name); -+ "export '%s' not present", sane_name); - } - - ret = nbd_opt_read(client, &nb_queries, sizeof(nb_queries), errp); -diff --git a/tests/qemu-iotests/143 b/tests/qemu-iotests/143 -index f649b361950..d2349903b1b 100755 ---- a/tests/qemu-iotests/143 -+++ b/tests/qemu-iotests/143 -@@ -58,6 +58,10 @@ _send_qemu_cmd $QEMU_HANDLE \ - $QEMU_IO_PROG -f raw -c quit \ - "nbd+unix:///no_such_export?socket=$SOCK_DIR/nbd" 2>&1 \ - | _filter_qemu_io | _filter_nbd -+# Likewise, with longest possible name permitted in NBD protocol -+$QEMU_IO_PROG -f raw -c quit \ -+ "nbd+unix:///$(printf %4096d 1 | tr ' ' a)?socket=$SOCK_DIR/nbd" 2>&1 \ -+ | _filter_qemu_io | _filter_nbd | sed 's/aaaa*aa/aa--aa/' - - _send_qemu_cmd $QEMU_HANDLE \ - "{ 'execute': 'quit' }" \ -diff --git a/tests/qemu-iotests/143.out b/tests/qemu-iotests/143.out -index 1f4001c6013..fc9c0a761fa 100644 ---- a/tests/qemu-iotests/143.out -+++ b/tests/qemu-iotests/143.out -@@ -5,6 +5,8 @@ QA output created by 143 - {"return": {}} - qemu-io: can't open device nbd+unix:///no_such_export?socket=SOCK_DIR/nbd: Requested export not available - server reported: export 'no_such_export' not present -+qemu-io: can't open device nbd+unix:///aa--aa1?socket=SOCK_DIR/nbd: Requested export not available -+server reported: export 'aa--aa...' not present - { 'execute': 'quit' } - {"return": {}} - {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "SHUTDOWN", "data": {"guest": false, "reason": "host-qmp-quit"}} diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13361.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13361.patch deleted file mode 100644 index e0acc70f3..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13361.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 369ff955a8497988d079c4e3fa1e93c2570c1c69 Mon Sep 17 00:00:00 2001 -From: Prasad J Pandit -Date: Fri, 15 May 2020 01:36:08 +0530 -Subject: [PATCH] es1370: check total frame count against current frame - -A guest user may set channel frame count via es1370_write() -such that, in es1370_transfer_audio(), total frame count -'size' is lesser than the number of frames that are processed -'cnt'. - - int cnt = d->frame_cnt >> 16; - int size = d->frame_cnt & 0xffff; - -if (size < cnt), it results in incorrect calculations leading -to OOB access issue(s). Add check to avoid it. - -Reported-by: Ren Ding -Reported-by: Hanqing Zhao -Signed-off-by: Prasad J Pandit -Message-id: 20200514200608.1744203-1-ppandit@redhat.com -Signed-off-by: Gerd Hoffmann - -Upstream-Status: Backport [https://lists.gnu.org/archive/html/qemu-devel/2020-05/msg03983.html] -CVE: CVE-2020-13361 -Signed-off-by: Chee Yang Lee ---- - hw/audio/es1370.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - -diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c -index 89c4dabcd44..5f8a83ff562 100644 ---- a/hw/audio/es1370.c -+++ b/hw/audio/es1370.c -@@ -643,6 +643,9 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, - int csc_bytes = (csc + 1) << d->shift; - int cnt = d->frame_cnt >> 16; - int size = d->frame_cnt & 0xffff; -+ if (size < cnt) { -+ return; -+ } - int left = ((size - cnt + 1) << 2) + d->leftover; - int transferred = 0; - int temp = MIN (max, MIN (left, csc_bytes)); -@@ -651,7 +654,7 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, - addr += (cnt << 2) + d->leftover; - - if (index == ADC_CHANNEL) { -- while (temp) { -+ while (temp > 0) { - int acquired, to_copy; - - to_copy = MIN ((size_t) temp, sizeof (tmpbuf)); -@@ -669,7 +672,7 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, - else { - SWVoiceOut *voice = s->dac_voice[index]; - -- while (temp) { -+ while (temp > 0) { - int copied, to_copy; - - to_copy = MIN ((size_t) temp, sizeof (tmpbuf)); diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13362.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13362.patch deleted file mode 100644 index af8d4ba8f..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13362.patch +++ /dev/null @@ -1,55 +0,0 @@ -From f50ab86a2620bd7e8507af865b164655ee921661 Mon Sep 17 00:00:00 2001 -From: Prasad J Pandit -Date: Thu, 14 May 2020 00:55:38 +0530 -Subject: [PATCH] megasas: use unsigned type for reply_queue_head and check - index - -A guest user may set 'reply_queue_head' field of MegasasState to -a negative value. Later in 'megasas_lookup_frame' it is used to -index into s->frames[] array. Use unsigned type to avoid OOB -access issue. - -Also check that 'index' value stays within s->frames[] bounds -through the while() loop in 'megasas_lookup_frame' to avoid OOB -access. - -Reported-by: Ren Ding -Reported-by: Hanqing Zhao -Reported-by: Alexander Bulekov -Signed-off-by: Prasad J Pandit -Acked-by: Alexander Bulekov -Message-Id: <20200513192540.1583887-2-ppandit@redhat.com> -Signed-off-by: Paolo Bonzini - -Upstream-Status: Backport [f50ab86a2620bd7e8507af865b164655ee921661] -CVE: CVE-2020-13362 -Signed-off-by: Sakib Sajal ---- - hw/scsi/megasas.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c -index af18c88b65..6ce598cd69 100644 ---- a/hw/scsi/megasas.c -+++ b/hw/scsi/megasas.c -@@ -112,7 +112,7 @@ typedef struct MegasasState { - uint64_t reply_queue_pa; - void *reply_queue; - int reply_queue_len; -- int reply_queue_head; -+ uint16_t reply_queue_head; - int reply_queue_tail; - uint64_t consumer_pa; - uint64_t producer_pa; -@@ -445,7 +445,7 @@ static MegasasCmd *megasas_lookup_frame(MegasasState *s, - - index = s->reply_queue_head; - -- while (num < s->fw_cmds) { -+ while (num < s->fw_cmds && index < MEGASAS_MAX_FRAMES) { - if (s->frames[index].pa && s->frames[index].pa == frame) { - cmd = &s->frames[index]; - break; --- -2.20.1 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13659.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13659.patch deleted file mode 100644 index 4d12ae8f1..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13659.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 77f55eac6c433e23e82a1b88b2d74f385c4c7d82 Mon Sep 17 00:00:00 2001 -From: Prasad J Pandit -Date: Tue, 26 May 2020 16:47:43 +0530 -Subject: [PATCH] exec: set map length to zero when returning NULL -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -When mapping physical memory into host's virtual address space, -'address_space_map' may return NULL if BounceBuffer is in_use. -Set and return '*plen = 0' to avoid later NULL pointer dereference. - -Reported-by: Alexander Bulekov -Fixes: https://bugs.launchpad.net/qemu/+bug/1878259 -Suggested-by: Paolo Bonzini -Suggested-by: Peter Maydell -Signed-off-by: Prasad J Pandit -Message-Id: <20200526111743.428367-1-ppandit@redhat.com> -Reviewed-by: Philippe Mathieu-Daudé -Signed-off-by: Paolo Bonzini - -Upstream-Status: Backport [77f55eac6c433e23e82a1b88b2d74f385c4c7d82] -CVE: CVE-2020-13659 -Signed-off-by: Sakib Sajal ---- - exec.c | 1 + - include/exec/memory.h | 3 ++- - 2 files changed, 3 insertions(+), 1 deletion(-) - -diff --git a/exec.c b/exec.c -index 9cbde85d8c..778263f1c6 100644 ---- a/exec.c -+++ b/exec.c -@@ -3540,6 +3540,7 @@ void *address_space_map(AddressSpace *as, - - if (!memory_access_is_direct(mr, is_write)) { - if (atomic_xchg(&bounce.in_use, true)) { -+ *plen = 0; - return NULL; - } - /* Avoid unbounded allocations */ -diff --git a/include/exec/memory.h b/include/exec/memory.h -index bd7fdd6081..af8ca7824e 100644 ---- a/include/exec/memory.h -+++ b/include/exec/memory.h -@@ -2314,7 +2314,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, - /* address_space_map: map a physical memory region into a host virtual address - * - * May map a subset of the requested range, given by and returned in @plen. -- * May return %NULL if resources needed to perform the mapping are exhausted. -+ * May return %NULL and set *@plen to zero(0), if resources needed to perform -+ * the mapping are exhausted. - * Use only for reads OR writes - not for read-modify-write operations. - * Use cpu_register_map_client() to know when retrying the map operation is - * likely to succeed. --- -2.20.1 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch deleted file mode 100644 index 049dab914..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13791.patch +++ /dev/null @@ -1,53 +0,0 @@ -From f7d6a635fa3b7797f9d072e280f065bf3cfcd24d Mon Sep 17 00:00:00 2001 -From: Prasad J Pandit -Date: Thu, 4 Jun 2020 17:05:25 +0530 -Subject: [PATCH] pci: assert configuration access is within bounds -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -While accessing PCI configuration bytes, assert that -'address + len' is within PCI configuration space. - -Generally it is within bounds. This is more of a defensive -assert, in case a buggy device was to send 'address' which -may go out of bounds. - -Suggested-by: Philippe Mathieu-Daudé -Signed-off-by: Prasad J Pandit -Message-Id: <20200604113525.58898-1-ppandit@redhat.com> -Reviewed-by: Michael S. Tsirkin -Signed-off-by: Michael S. Tsirkin - -Upstream-Status: Backport [f7d6a635fa3b7797f9d072e280f065bf3cfcd24d] -CVE: CVE-2020-13791 -Signed-off-by: Sakib Sajal ---- - hw/pci/pci.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/hw/pci/pci.c b/hw/pci/pci.c -index 70c66965f5..7bf2ae6d92 100644 ---- a/hw/pci/pci.c -+++ b/hw/pci/pci.c -@@ -1381,6 +1381,8 @@ uint32_t pci_default_read_config(PCIDevice *d, - { - uint32_t val = 0; - -+ assert(address + len <= pci_config_size(d)); -+ - if (pci_is_express_downstream_port(d) && - ranges_overlap(address, len, d->exp.exp_cap + PCI_EXP_LNKSTA, 2)) { - pcie_sync_bridge_lnk(d); -@@ -1394,6 +1396,8 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int - int i, was_irq_disabled = pci_irq_disabled(d); - uint32_t val = val_in; - -+ assert(addr + l <= pci_config_size(d)); -+ - for (i = 0; i < l; val >>= 8, ++i) { - uint8_t wmask = d->wmask[addr + i]; - uint8_t w1cmask = d->w1cmask[addr + i]; --- -2.20.1 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13800.patch b/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13800.patch deleted file mode 100644 index 52bfafbba..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu/CVE-2020-13800.patch +++ /dev/null @@ -1,63 +0,0 @@ -From a98610c429d52db0937c1e48659428929835c455 Mon Sep 17 00:00:00 2001 -From: Prasad J Pandit -Date: Thu, 4 Jun 2020 14:38:30 +0530 -Subject: [PATCH] ati-vga: check mm_index before recursive call - (CVE-2020-13800) -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -While accessing VGA registers via ati_mm_read/write routines, -a guest may set 's->regs.mm_index' such that it leads to infinite -recursion. Check mm_index value to avoid such recursion. Log an -error message for wrong values. - -Reported-by: Ren Ding -Reported-by: Hanqing Zhao -Reported-by: Yi Ren -Message-id: 20200604090830.33885-1-ppandit@redhat.com -Suggested-by: BALATON Zoltan -Suggested-by: Philippe Mathieu-Daudé -Signed-off-by: Prasad J Pandit -Signed-off-by: Gerd Hoffmann - -Upstream-Status: Backport [a98610c429d52db0937c1e48659428929835c455] -CVE: CVE-2020-13800 -Signed-off-by: Sakib Sajal ---- - hw/display/ati.c | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/hw/display/ati.c b/hw/display/ati.c -index 065f197678..67604e68de 100644 ---- a/hw/display/ati.c -+++ b/hw/display/ati.c -@@ -285,8 +285,11 @@ static uint64_t ati_mm_read(void *opaque, hwaddr addr, unsigned int size) - if (idx <= s->vga.vram_size - size) { - val = ldn_le_p(s->vga.vram_ptr + idx, size); - } -- } else { -+ } else if (s->regs.mm_index > MM_DATA + 3) { - val = ati_mm_read(s, s->regs.mm_index + addr - MM_DATA, size); -+ } else { -+ qemu_log_mask(LOG_GUEST_ERROR, -+ "ati_mm_read: mm_index too small: %u\n", s->regs.mm_index); - } - break; - case BIOS_0_SCRATCH ... BUS_CNTL - 1: -@@ -520,8 +523,11 @@ static void ati_mm_write(void *opaque, hwaddr addr, - if (idx <= s->vga.vram_size - size) { - stn_le_p(s->vga.vram_ptr + idx, size, data); - } -- } else { -+ } else if (s->regs.mm_index > MM_DATA + 3) { - ati_mm_write(s, s->regs.mm_index + addr - MM_DATA, data, size); -+ } else { -+ qemu_log_mask(LOG_GUEST_ERROR, -+ "ati_mm_write: mm_index too small: %u\n", s->regs.mm_index); - } - break; - case BIOS_0_SCRATCH ... BUS_CNTL - 1: --- -2.20.1 - diff --git a/poky/meta/recipes-devtools/qemu/qemu/find_datadir.patch b/poky/meta/recipes-devtools/qemu/qemu/find_datadir.patch index 74e9ba56c..9a4c11267 100644 --- a/poky/meta/recipes-devtools/qemu/qemu/find_datadir.patch +++ b/poky/meta/recipes-devtools/qemu/qemu/find_datadir.patch @@ -9,8 +9,10 @@ Upstream-Status: Submitted [qemu-devel@nongnu.org] Signed-off-by: Joe Slater ---- a/os-posix.c -+++ b/os-posix.c +Index: qemu-5.1.0/os-posix.c +=================================================================== +--- qemu-5.1.0.orig/os-posix.c ++++ qemu-5.1.0/os-posix.c @@ -82,8 +82,9 @@ void os_setup_signal_handling(void) /* @@ -19,10 +21,10 @@ Signed-off-by: Joe Slater * When running from the build tree this will be "$bindir/../pc-bios". - * Otherwise, this is CONFIG_QEMU_DATADIR. + * Otherwise, this is CONFIG_QEMU_DATADIR as constructed by configure. - */ - char *os_find_datadir(void) - { -@@ -93,6 +94,12 @@ char *os_find_datadir(void) + * + * The caller must use g_free() to free the returned data when it is + * no longer required. +@@ -96,6 +97,12 @@ char *os_find_datadir(void) exec_dir = qemu_get_exec_dir(); g_return_val_if_fail(exec_dir != NULL, NULL); diff --git a/poky/meta/recipes-devtools/qemu/qemu_5.0.0.bb b/poky/meta/recipes-devtools/qemu/qemu_5.0.0.bb deleted file mode 100644 index 9b0949026..000000000 --- a/poky/meta/recipes-devtools/qemu/qemu_5.0.0.bb +++ /dev/null @@ -1,33 +0,0 @@ -BBCLASSEXTEND = "nativesdk" - -require qemu.inc - -# error: a parameter list without types is only allowed in a function definition -# void (*_function)(sigval_t); -COMPATIBLE_HOST_libc-musl = 'null' - -DEPENDS = "glib-2.0 zlib pixman bison-native" - -RDEPENDS_${PN}_class-target += "bash" - -# Does not compile for -Og because that level does not clean up dead-code. -# See lockable.h. -# -DEBUG_BUILD = "0" - -EXTRA_OECONF_append_class-target = " --target-list=${@get_qemu_target_list(d)}" -EXTRA_OECONF_append_class-target_mipsarcho32 = "${@bb.utils.contains('BBEXTENDCURR', 'multilib', ' --disable-capstone', '', d)}" -EXTRA_OECONF_append_class-nativesdk = " --target-list=${@get_qemu_target_list(d)}" - -do_install_append_class-nativesdk() { - ${@bb.utils.contains('PACKAGECONFIG', 'gtk+', 'make_qemu_wrapper', '', d)} -} - -PACKAGECONFIG ??= " \ - fdt sdl kvm \ - ${@bb.utils.filter('DISTRO_FEATURES', 'alsa xen', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer glx', '' ,d)} \ -" -PACKAGECONFIG_class-nativesdk ??= "fdt sdl kvm \ - ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer glx', '' ,d)} \ -" diff --git a/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb b/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb new file mode 100644 index 000000000..9b0949026 --- /dev/null +++ b/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb @@ -0,0 +1,33 @@ +BBCLASSEXTEND = "nativesdk" + +require qemu.inc + +# error: a parameter list without types is only allowed in a function definition +# void (*_function)(sigval_t); +COMPATIBLE_HOST_libc-musl = 'null' + +DEPENDS = "glib-2.0 zlib pixman bison-native" + +RDEPENDS_${PN}_class-target += "bash" + +# Does not compile for -Og because that level does not clean up dead-code. +# See lockable.h. +# +DEBUG_BUILD = "0" + +EXTRA_OECONF_append_class-target = " --target-list=${@get_qemu_target_list(d)}" +EXTRA_OECONF_append_class-target_mipsarcho32 = "${@bb.utils.contains('BBEXTENDCURR', 'multilib', ' --disable-capstone', '', d)}" +EXTRA_OECONF_append_class-nativesdk = " --target-list=${@get_qemu_target_list(d)}" + +do_install_append_class-nativesdk() { + ${@bb.utils.contains('PACKAGECONFIG', 'gtk+', 'make_qemu_wrapper', '', d)} +} + +PACKAGECONFIG ??= " \ + fdt sdl kvm \ + ${@bb.utils.filter('DISTRO_FEATURES', 'alsa xen', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer glx', '' ,d)} \ +" +PACKAGECONFIG_class-nativesdk ??= "fdt sdl kvm \ + ${@bb.utils.contains('DISTRO_FEATURES', 'opengl', 'virglrenderer glx', '' ,d)} \ +" diff --git a/poky/meta/recipes-devtools/rpm/files/environment.d-rpm.sh b/poky/meta/recipes-devtools/rpm/files/environment.d-rpm.sh new file mode 100644 index 000000000..9b669a18d --- /dev/null +++ b/poky/meta/recipes-devtools/rpm/files/environment.d-rpm.sh @@ -0,0 +1 @@ +export RPM_CONFIGDIR="$OECORE_NATIVE_SYSROOT/usr/lib/rpm" diff --git a/poky/meta/recipes-devtools/rpm/rpm_4.15.1.bb b/poky/meta/recipes-devtools/rpm/rpm_4.15.1.bb index b5a0ac938..c9258632d 100644 --- a/poky/meta/recipes-devtools/rpm/rpm_4.15.1.bb +++ b/poky/meta/recipes-devtools/rpm/rpm_4.15.1.bb @@ -25,6 +25,7 @@ LICENSE = "GPL-2.0" LIC_FILES_CHKSUM = "file://COPYING;md5=c0bf017c0fd1920e6158a333acabfd4a" SRC_URI = "git://github.com/rpm-software-management/rpm;branch=rpm-4.15.x \ + file://environment.d-rpm.sh \ file://0001-Do-not-add-an-unsatisfiable-dependency-when-building.patch \ file://0001-Do-not-read-config-files-from-HOME.patch \ file://0001-When-cross-installing-execute-package-scriptlets-wit.patch \ @@ -112,6 +113,9 @@ do_install_append_class-nativesdk() { done rm -rf ${D}/var + + mkdir -p ${D}${SDKPATHNATIVE}/environment-setup.d + install -m 644 ${WORKDIR}/environment.d-rpm.sh ${D}${SDKPATHNATIVE}/environment-setup.d/rpm.sh } # Rpm's make install creates var/tmp which clashes with base-files packaging @@ -129,6 +133,7 @@ do_install_append () { FILES_${PN} += "${libdir}/rpm-plugins/*.so \ " +FILES_${PN}_append_class-nativesdk = " ${SDKPATHNATIVE}/environment-setup.d/rpm.sh" FILES_${PN}-dev += "${libdir}/rpm-plugins/*.la \ " diff --git a/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch b/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch index 038a67209..4ba766528 100644 --- a/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch +++ b/poky/meta/recipes-devtools/rsync/files/makefile-no-rebuild.patch @@ -1,4 +1,4 @@ -From 5ae38baadd40a996da3d19a147f37e7f1f3355bf Mon Sep 17 00:00:00 2001 +From 1f29584e57f5fda09970c66f3b94f4720e09c1bb Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Tue, 12 Apr 2016 15:51:54 +0100 Subject: [PATCH] rsync: remove upstream's rebuild logic @@ -10,14 +10,14 @@ Upstream-Status: Inappropriate Signed-off-by: Ross Burton --- - Makefile.in | 50 -------------------------------------------------- - 1 file changed, 50 deletions(-) + Makefile.in | 54 ----------------------------------------------------- + 1 file changed, 54 deletions(-) diff --git a/Makefile.in b/Makefile.in -index 31ddc43..41c9a93 100644 +index 672fcc4..c12d8d4 100644 --- a/Makefile.in +++ b/Makefile.in -@@ -167,56 +167,6 @@ gen: conf proto.h man +@@ -168,60 +168,6 @@ gen: conf proto.h man gensend: gen rsync -aic $(GENFILES) $${SAMBA_HOST-samba.org}:/home/ftp/pub/rsync/generated-files/ @@ -56,6 +56,10 @@ index 31ddc43..41c9a93 100644 - ./config.status --recheck - ./config.status - +-.PHONY: restatus +-restatus: +- ./config.status +- -Makefile: Makefile.in config.status configure.sh config.h.in - @if test -f Makefile; then cp -p Makefile Makefile.old; else touch Makefile.old; fi - @./config.status diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb b/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb deleted file mode 100644 index ea6b1ce38..000000000 --- a/poky/meta/recipes-devtools/rsync/rsync_3.2.1.bb +++ /dev/null @@ -1,58 +0,0 @@ -SUMMARY = "File synchronization tool" -HOMEPAGE = "http://rsync.samba.org/" -BUGTRACKER = "http://rsync.samba.org/bugzilla.html" -SECTION = "console/network" -# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) -# Includes opennsh and xxhash dynamic link exception -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26" - -DEPENDS = "popt" - -SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ - file://rsyncd.conf \ - file://makefile-no-rebuild.patch \ - " - -SRC_URI[sha256sum] = "95f2dd62979b500a99b34c1a6453a0787ada0330e4bec7fcffad37b9062d58d3" - -# -16548 required for v3.1.3pre1. Already in v3.1.3. -CVE_CHECK_WHITELIST += " CVE-2017-16548 " - -inherit autotools-brokensep - -PACKAGECONFIG ??= "acl attr \ - ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ -" - -PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," -PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," -PACKAGECONFIG[lz4] = "--enable-lz4,--disable-lz4,lz4" -PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" -PACKAGECONFIG[xxhash] = "--enable-xxhash,--disable-xxhash,xxhash" -PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd" - -# By default, if crosscompiling, rsync disables a number of -# capabilities, hardlinking symlinks and special files (i.e. devices) -CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" - -EXTRA_OEMAKE = 'STRIP=""' -EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm" - -# rsync 3.0 uses configure.sh instead of configure, and -# makefile checks the existence of configure.sh -do_configure_prepend () { - rm -f ${S}/configure ${S}/configure.sh -} - -do_configure_append () { - cp -f ${S}/configure ${S}/configure.sh -} - -do_install_append() { - install -d ${D}${sysconfdir} - install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} -} - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb b/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb new file mode 100644 index 000000000..182628f4b --- /dev/null +++ b/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb @@ -0,0 +1,58 @@ +SUMMARY = "File synchronization tool" +HOMEPAGE = "http://rsync.samba.org/" +BUGTRACKER = "http://rsync.samba.org/bugzilla.html" +SECTION = "console/network" +# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) +# Includes opennsh and xxhash dynamic link exception +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26" + +DEPENDS = "popt" + +SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ + file://rsyncd.conf \ + file://makefile-no-rebuild.patch \ + " + +SRC_URI[sha256sum] = "644bd3841779507665211fd7db8359c8a10670c57e305b4aab61b4e40037afa8" + +# -16548 required for v3.1.3pre1. Already in v3.1.3. +CVE_CHECK_WHITELIST += " CVE-2017-16548 " + +inherit autotools-brokensep + +PACKAGECONFIG ??= "acl attr \ + ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ +" + +PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," +PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +PACKAGECONFIG[lz4] = "--enable-lz4,--disable-lz4,lz4" +PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" +PACKAGECONFIG[xxhash] = "--enable-xxhash,--disable-xxhash,xxhash" +PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd" + +# By default, if crosscompiling, rsync disables a number of +# capabilities, hardlinking symlinks and special files (i.e. devices) +CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" + +EXTRA_OEMAKE = 'STRIP=""' +EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm" + +# rsync 3.0 uses configure.sh instead of configure, and +# makefile checks the existence of configure.sh +do_configure_prepend () { + rm -f ${S}/configure ${S}/configure.sh +} + +do_configure_append () { + cp -f ${S}/configure ${S}/configure.sh +} + +do_install_append() { + install -d ${D}${sysconfdir} + install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} +} + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/strace/strace_5.7.bb b/poky/meta/recipes-devtools/strace/strace_5.7.bb deleted file mode 100644 index c184d035a..000000000 --- a/poky/meta/recipes-devtools/strace/strace_5.7.bb +++ /dev/null @@ -1,55 +0,0 @@ -SUMMARY = "System call tracing tool" -HOMEPAGE = "http://strace.io" -SECTION = "console/utils" -LICENSE = "LGPL-2.1+ & GPL-2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=c756d9d5dabc27663df64f0bf492166c" - -SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \ - file://disable-git-version-gen.patch \ - file://update-gawk-paths.patch \ - file://Makefile-ptest.patch \ - file://run-ptest \ - file://mips-SIGEMT.patch \ - file://0001-caps-abbrev.awk-fix-gawk-s-path.patch \ - file://ptest-spacesave.patch \ - file://uintptr_t.patch \ - file://0001-strace-fix-reproducibilty-issues.patch \ - " -SRC_URI[sha256sum] = "b284b59f9bcd95b9728cea5bd5c0edc5ebe360af73dc76fbf6334f11c777ccd8" - -inherit autotools ptest - -PACKAGECONFIG_class-target ??= "\ - ${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', 'bluez', '', d)} \ -" - -PACKAGECONFIG[bluez] = "ac_cv_header_bluetooth_bluetooth_h=yes,ac_cv_header_bluetooth_bluetooth_h=no,bluez5" -PACKAGECONFIG[libunwind] = "--with-libunwind,--without-libunwind,libunwind" - -EXTRA_OECONF += "--enable-mpers=no --disable-gcc-Werror" - -CFLAGS_append_libc-musl = " -Dsigcontext_struct=sigcontext" - -TESTDIR = "tests" -PTEST_BUILD_HOST_PATTERN = "^(DEB_CHANGELOGTIME|RPM_CHANGELOGTIME|WARN_CFLAGS_FOR_BUILD|LDFLAGS_FOR_BUILD)" - -do_install_append() { - # We don't ship strace-graph here because it needs perl - rm ${D}${bindir}/strace-graph -} - -do_compile_ptest() { - oe_runmake -C ${TESTDIR} buildtest-TESTS -} - -do_install_ptest() { - oe_runmake -C ${TESTDIR} install-ptest BUILDDIR=${B} DESTDIR=${D}${PTEST_PATH} TESTDIR=${TESTDIR} - install -m 755 ${S}/test-driver ${D}${PTEST_PATH} - install -m 644 ${B}/config.h ${D}${PTEST_PATH} - sed -i -e '/^src/s/strace.*[1-9]/ptest/' ${D}/${PTEST_PATH}/${TESTDIR}/Makefile -} - -RDEPENDS_${PN}-ptest += "make coreutils grep gawk sed" - -BBCLASSEXTEND = "native" -TOOLCHAIN = "gcc" diff --git a/poky/meta/recipes-devtools/strace/strace_5.8.bb b/poky/meta/recipes-devtools/strace/strace_5.8.bb new file mode 100644 index 000000000..70d5940f6 --- /dev/null +++ b/poky/meta/recipes-devtools/strace/strace_5.8.bb @@ -0,0 +1,55 @@ +SUMMARY = "System call tracing tool" +HOMEPAGE = "http://strace.io" +SECTION = "console/utils" +LICENSE = "LGPL-2.1+ & GPL-2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=c756d9d5dabc27663df64f0bf492166c" + +SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \ + file://disable-git-version-gen.patch \ + file://update-gawk-paths.patch \ + file://Makefile-ptest.patch \ + file://run-ptest \ + file://mips-SIGEMT.patch \ + file://0001-caps-abbrev.awk-fix-gawk-s-path.patch \ + file://ptest-spacesave.patch \ + file://uintptr_t.patch \ + file://0001-strace-fix-reproducibilty-issues.patch \ + " +SRC_URI[sha256sum] = "df4a669f7fff9cc302784085bd4b72fab216a426a3f72c892b28a537b71e7aa9" + +inherit autotools ptest + +PACKAGECONFIG_class-target ??= "\ + ${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', 'bluez', '', d)} \ +" + +PACKAGECONFIG[bluez] = "ac_cv_header_bluetooth_bluetooth_h=yes,ac_cv_header_bluetooth_bluetooth_h=no,bluez5" +PACKAGECONFIG[libunwind] = "--with-libunwind,--without-libunwind,libunwind" + +EXTRA_OECONF += "--enable-mpers=no --disable-gcc-Werror" + +CFLAGS_append_libc-musl = " -Dsigcontext_struct=sigcontext" + +TESTDIR = "tests" +PTEST_BUILD_HOST_PATTERN = "^(DEB_CHANGELOGTIME|RPM_CHANGELOGTIME|WARN_CFLAGS_FOR_BUILD|LDFLAGS_FOR_BUILD)" + +do_install_append() { + # We don't ship strace-graph here because it needs perl + rm ${D}${bindir}/strace-graph +} + +do_compile_ptest() { + oe_runmake -C ${TESTDIR} buildtest-TESTS +} + +do_install_ptest() { + oe_runmake -C ${TESTDIR} install-ptest BUILDDIR=${B} DESTDIR=${D}${PTEST_PATH} TESTDIR=${TESTDIR} + install -m 755 ${S}/test-driver ${D}${PTEST_PATH} + install -m 644 ${B}/config.h ${D}${PTEST_PATH} + sed -i -e '/^src/s/strace.*[1-9]/ptest/' ${D}/${PTEST_PATH}/${TESTDIR}/Makefile +} + +RDEPENDS_${PN}-ptest += "make coreutils grep gawk sed" + +BBCLASSEXTEND = "native" +TOOLCHAIN = "gcc" diff --git a/poky/meta/recipes-devtools/vala/vala/0001-git-version-gen-don-t-append-dirty-if-we-re-not-in-g.patch b/poky/meta/recipes-devtools/vala/vala/0001-git-version-gen-don-t-append-dirty-if-we-re-not-in-g.patch deleted file mode 100644 index ac9dc8c83..000000000 --- a/poky/meta/recipes-devtools/vala/vala/0001-git-version-gen-don-t-append-dirty-if-we-re-not-in-g.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 2460d7b79f7e90dcfeebde5e9c53d9b6798a1f3c Mon Sep 17 00:00:00 2001 -From: Martin Jansa -Date: Tue, 21 Feb 2012 17:12:50 +0100 -Subject: [PATCH] git-version-gen: don't append -dirty if we're not in git - repo - -* for example if we have some dirty directory and we unpack clean vala tarball in it, then it will append -dirty - -Signed-off-by: Martin Jansa - -Upstream-Status: Inappropriate [OE-Specific] ---- - build-aux/git-version-gen | 25 ++++++++++++++----------- - 1 files changed, 14 insertions(+), 11 deletions(-) - -diff --git a/build-aux/git-version-gen b/build-aux/git-version-gen -index 0fa6faa..1d1f9d2 100755 ---- a/build-aux/git-version-gen -+++ b/build-aux/git-version-gen -@@ -126,18 +126,21 @@ fi - - v=`echo "$v" |sed 's/^v//'` - --# Don't declare a version "dirty" merely because a time stamp has changed. --git status > /dev/null 2>&1 -+if test -d .git -+then -+ # Don't declare a version "dirty" merely because a time stamp has changed. -+ git status > /dev/null 2>&1 - --dirty=`sh -c 'git diff-index --name-only HEAD' 2>/dev/null` || dirty= --case "$dirty" in -- '') ;; -- *) # Append the suffix only if there isn't one already. -- case $v in -- *-dirty) ;; -- *) v="$v-dirty" ;; -- esac ;; --esac -+ dirty=`sh -c 'git diff-index --name-only HEAD' 2>/dev/null` || dirty= -+ case "$dirty" in -+ '') ;; -+ *) # Append the suffix only if there isn't one already. -+ case $v in -+ *-dirty) ;; -+ *) v="$v-dirty" ;; -+ esac ;; -+ esac -+fi - - # Omit the trailing newline, so that m4_esyscmd can use the result directly. - echo "$v" | tr -d '\012' --- -1.7.8.4 - diff --git a/poky/meta/recipes-devtools/vala/vala_0.48.6.bb b/poky/meta/recipes-devtools/vala/vala_0.48.6.bb deleted file mode 100644 index 9dace1348..000000000 --- a/poky/meta/recipes-devtools/vala/vala_0.48.6.bb +++ /dev/null @@ -1,7 +0,0 @@ -require ${BPN}.inc - -SRC_URI += "file://0001-git-version-gen-don-t-append-dirty-if-we-re-not-in-g.patch \ - file://0001-vapigen.m4-use-PKG_CONFIG_SYSROOT_DIR.patch \ - " - -SRC_URI[sha256sum] = "d18d08ed030ce0e0f044f4c15c9df3c25b15beaf8700e45e43b736a6debf9707" diff --git a/poky/meta/recipes-devtools/vala/vala_0.48.7.bb b/poky/meta/recipes-devtools/vala/vala_0.48.7.bb new file mode 100644 index 000000000..2e61db105 --- /dev/null +++ b/poky/meta/recipes-devtools/vala/vala_0.48.7.bb @@ -0,0 +1,5 @@ +require ${BPN}.inc + +SRC_URI += " file://0001-vapigen.m4-use-PKG_CONFIG_SYSROOT_DIR.patch" + +SRC_URI[sha256sum] = "28de33e28da24500cc1675c3a6ced1301c9a6a5e6dd06193569001f9ce9a5c53" diff --git a/poky/meta/recipes-extended/ethtool/ethtool/avoid_parallel_tests.patch b/poky/meta/recipes-extended/ethtool/ethtool/avoid_parallel_tests.patch index 8ecfde141..75a3841f2 100644 --- a/poky/meta/recipes-extended/ethtool/ethtool/avoid_parallel_tests.patch +++ b/poky/meta/recipes-extended/ethtool/ethtool/avoid_parallel_tests.patch @@ -1,4 +1,4 @@ -From 1c6c017e56ec536f5cceacdb02092777d390debb Mon Sep 17 00:00:00 2001 +From afecc8791f904dc1893858d68a642b491356c74b Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Wed, 28 May 2014 18:59:54 +0200 Subject: [PATCH] ethtool: use serial-tests config needed by ptest. @@ -15,11 +15,11 @@ Upstream-Status: Inappropriate 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac -index 1169b7f..ed82524 100644 +index 19223f7..8a58d15 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ dnl Process this file with autoconf to produce a configure script. - AC_INIT(ethtool, 5.7, netdev@vger.kernel.org) + AC_INIT(ethtool, 5.8, netdev@vger.kernel.org) AC_PREREQ(2.52) AC_CONFIG_SRCDIR([ethtool.c]) -AM_INIT_AUTOMAKE([gnu subdir-objects]) diff --git a/poky/meta/recipes-extended/ethtool/ethtool_5.7.bb b/poky/meta/recipes-extended/ethtool/ethtool_5.7.bb deleted file mode 100644 index d9fc30cb5..000000000 --- a/poky/meta/recipes-extended/ethtool/ethtool_5.7.bb +++ /dev/null @@ -1,35 +0,0 @@ -SUMMARY = "Display or change ethernet card settings" -DESCRIPTION = "A small utility for examining and tuning the settings of your ethernet-based network interfaces." -HOMEPAGE = "http://www.kernel.org/pub/software/network/ethtool/" -SECTION = "console/network" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://ethtool.c;beginline=4;endline=17;md5=c19b30548c582577fc6b443626fc1216" - -SRC_URI = "${KERNELORG_MIRROR}/software/network/ethtool/ethtool-${PV}.tar.gz \ - file://run-ptest \ - file://avoid_parallel_tests.patch \ - " - -SRC_URI[sha256sum] = "c2e16f8bf510c5822f8e25061c2533972308a421767c54abcacb1038b08bb782" - -UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/software/network/ethtool/" - -inherit autotools ptest bash-completion pkgconfig - -RDEPENDS_${PN}-ptest += "make" - -PACKAGECONFIG ?= "netlink" -PACKAGECONFIG[netlink] = "--enable-netlink,--disable-netlink,libmnl," - -do_compile_ptest() { - oe_runmake buildtest-TESTS -} - -do_install_ptest () { - cp ${B}/Makefile ${D}${PTEST_PATH} - install ${B}/test-cmdline ${D}${PTEST_PATH} - install ${B}/test-features ${D}${PTEST_PATH} - install ${B}/ethtool ${D}${PTEST_PATH}/ethtool - sed -i 's/^Makefile/_Makefile/' ${D}${PTEST_PATH}/Makefile -} diff --git a/poky/meta/recipes-extended/ethtool/ethtool_5.8.bb b/poky/meta/recipes-extended/ethtool/ethtool_5.8.bb new file mode 100644 index 000000000..0403f0e3a --- /dev/null +++ b/poky/meta/recipes-extended/ethtool/ethtool_5.8.bb @@ -0,0 +1,37 @@ +SUMMARY = "Display or change ethernet card settings" +DESCRIPTION = "A small utility for examining and tuning the settings of your ethernet-based network interfaces." +HOMEPAGE = "http://www.kernel.org/pub/software/network/ethtool/" +SECTION = "console/network" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://ethtool.c;beginline=4;endline=17;md5=c19b30548c582577fc6b443626fc1216" + +SRC_URI = "${KERNELORG_MIRROR}/software/network/ethtool/ethtool-${PV}.tar.gz \ + file://run-ptest \ + file://avoid_parallel_tests.patch \ + " + +SRC_URI[sha256sum] = "91e8bbda48a7fd5d374efacca542364ceb3a6c1f286f024b64ec40ccc799e125" + +UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/software/network/ethtool/" + +inherit autotools ptest bash-completion pkgconfig + +RDEPENDS_${PN}-ptest += "make" + +PACKAGECONFIG ?= "netlink" +PACKAGECONFIG[netlink] = "--enable-netlink,--disable-netlink,libmnl," + +do_compile_ptest() { + oe_runmake buildtest-TESTS +} + +do_install_ptest () { + cp ${B}/Makefile ${D}${PTEST_PATH} + install ${B}/test-cmdline ${D}${PTEST_PATH} + if ${@bb.utils.contains('PACKAGECONFIG', 'netlink', 'false', 'true', d)}; then + install ${B}/test-features ${D}${PTEST_PATH} + fi + install ${B}/ethtool ${D}${PTEST_PATH}/ethtool + sed -i 's/^Makefile/_Makefile/' ${D}${PTEST_PATH}/Makefile +} diff --git a/poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-15900.patch b/poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-15900.patch new file mode 100644 index 000000000..d7c5f034e --- /dev/null +++ b/poky/meta/recipes-extended/ghostscript/ghostscript/CVE-2020-15900.patch @@ -0,0 +1,54 @@ +From 5d499272b95a6b890a1397e11d20937de000d31b Mon Sep 17 00:00:00 2001 +From: Ray Johnston +Date: Wed, 22 Jul 2020 09:57:54 -0700 +Subject: [PATCH] Bug 702582, CVE 2020-15900 Memory Corruption in Ghostscript + 9.52 + +Fix the 'rsearch' calculation for the 'post' size to give the correct +size. Previous calculation would result in a size that was too large, +and could underflow to max uint32_t. Also fix 'rsearch' to return the +correct 'pre' string with empty string match. + +A future change may 'undefine' this undocumented, non-standard operator +during initialization as we do with the many other non-standard internal +PostScript operators and procedures. + +Upstream-Status: Backport [https://git.ghostscript.com/?p=ghostpdl.git;a=commitdiff;h=5d499272b95a6b890a1397e11d20937de000d31b] +CVE: CVE-2020-15900 +Signed-off-by: Chee Yang Lee +--- + psi/zstring.c | 17 +++++++++++------ + 1 file changed, 11 insertions(+), 6 deletions(-) + +diff --git a/psi/zstring.c b/psi/zstring.c +index 33662dafa..58e1af2b3 100644 +--- a/psi/zstring.c ++++ b/psi/zstring.c +@@ -142,13 +142,18 @@ search_impl(i_ctx_t *i_ctx_p, bool forward) + return 0; + found: + op->tas.type_attrs = op1->tas.type_attrs; +- op->value.bytes = ptr; +- r_set_size(op, size); ++ op->value.bytes = ptr; /* match */ ++ op->tas.rsize = size; /* match */ + push(2); +- op[-1] = *op1; +- r_set_size(op - 1, ptr - op[-1].value.bytes); +- op1->value.bytes = ptr + size; +- r_set_size(op1, count + (!forward ? (size - 1) : 0)); ++ op[-1] = *op1; /* pre */ ++ op[-3].value.bytes = ptr + size; /* post */ ++ if (forward) { ++ op[-1].tas.rsize = ptr - op[-1].value.bytes; /* pre */ ++ op[-3].tas.rsize = count; /* post */ ++ } else { ++ op[-1].tas.rsize = count; /* pre */ ++ op[-3].tas.rsize -= count + size; /* post */ ++ } + make_true(op); + return 0; + } +-- +2.17.1 + diff --git a/poky/meta/recipes-extended/ghostscript/ghostscript_9.52.bb b/poky/meta/recipes-extended/ghostscript/ghostscript_9.52.bb index 4cdb6e00d..65135f582 100644 --- a/poky/meta/recipes-extended/ghostscript/ghostscript_9.52.bb +++ b/poky/meta/recipes-extended/ghostscript/ghostscript_9.52.bb @@ -34,6 +34,7 @@ SRC_URI_BASE = "https://github.com/ArtifexSoftware/ghostpdl-downloads/releases/d SRC_URI = "${SRC_URI_BASE} \ file://ghostscript-9.21-prevent_recompiling.patch \ file://cups-no-gcrypt.patch \ + file://CVE-2020-15900.patch \ " SRC_URI_class-native = "${SRC_URI_BASE} \ diff --git a/poky/meta/recipes-extended/logrotate/logrotate/act-as-mv-when-rotate.patch b/poky/meta/recipes-extended/logrotate/logrotate/act-as-mv-when-rotate.patch index acef5ccbe..671fce4ac 100644 --- a/poky/meta/recipes-extended/logrotate/logrotate/act-as-mv-when-rotate.patch +++ b/poky/meta/recipes-extended/logrotate/logrotate/act-as-mv-when-rotate.patch @@ -1,4 +1,4 @@ -From ef1ea905831c5bcd63e04149571c10d75ff8f028 Mon Sep 17 00:00:00 2001 +From 17d57a2a923a4af53c8910a9999aebeab3f5d83a Mon Sep 17 00:00:00 2001 From: Robert Yang Date: Tue, 17 Feb 2015 21:08:07 -0800 Subject: [PATCH] Act as the "mv" command when rotate log @@ -15,10 +15,10 @@ Signed-off-by: Robert Yang 1 file changed, 59 insertions(+), 12 deletions(-) diff --git a/logrotate.c b/logrotate.c -index 25902bc..afa1a90 100644 +index 45b3eb6..231371a 100644 --- a/logrotate.c +++ b/logrotate.c -@@ -1434,6 +1434,53 @@ static int findNeedRotating(struct logInfo *log, int logNum, int force) +@@ -1463,6 +1463,53 @@ static int findNeedRotating(const struct logInfo *log, unsigned logNum, int forc return 0; } @@ -72,7 +72,7 @@ index 25902bc..afa1a90 100644 /* find the rotated file with the highest index */ static int findLastRotated(const struct logNames *rotNames, const char *fileext, const char *compext) -@@ -1911,15 +1958,15 @@ static int prerotateSingleLog(struct logInfo *log, int logNum, +@@ -1958,15 +2005,15 @@ static int prerotateSingleLog(const struct logInfo *log, unsigned logNum, } message(MESS_DEBUG, @@ -91,7 +91,7 @@ index 25902bc..afa1a90 100644 oldName, newName, strerror(errno)); hasErrors = 1; } -@@ -2002,10 +2049,10 @@ static int rotateSingleLog(struct logInfo *log, int logNum, +@@ -2051,10 +2098,10 @@ static int rotateSingleLog(const struct logInfo *log, unsigned logNum, return 1; } @@ -105,7 +105,7 @@ index 25902bc..afa1a90 100644 log->files[logNum], tmpFilename, strerror(errno)); hasErrors = 1; -@@ -2014,11 +2061,11 @@ static int rotateSingleLog(struct logInfo *log, int logNum, +@@ -2063,11 +2110,11 @@ static int rotateSingleLog(const struct logInfo *log, unsigned logNum, free(tmpFilename); } else { @@ -120,7 +120,7 @@ index 25902bc..afa1a90 100644 log->files[logNum], rotNames->finalName, strerror(errno)); hasErrors = 1; -@@ -2424,7 +2471,7 @@ static int rotateLogSet(struct logInfo *log, int force) +@@ -2480,7 +2527,7 @@ static int rotateLogSet(const struct logInfo *log, int force) return hasErrors; } @@ -129,16 +129,16 @@ index 25902bc..afa1a90 100644 { struct logState *p; FILE *f; -@@ -2629,7 +2676,7 @@ static int writeState(const char *stateFilename) +@@ -2659,7 +2706,7 @@ static int writeState(const char *stateFilename) fclose(f); if (error == 0) { - if (rename(tmpFilename, stateFilename)) { + if (mvFile(tmpFilename, stateFilename, log, prev_acl)) { + message(MESS_ERROR, "error renaming temp state file %s to %s: %s\n", + tmpFilename, stateFilename, strerror(errno)); unlink(tmpFilename); - error = 1; - message(MESS_ERROR, "error renaming temp state file %s to %s\n", -@@ -2987,7 +3034,7 @@ int main(int argc, const char **argv) +@@ -3073,7 +3120,7 @@ int main(int argc, const char **argv) rc |= rotateLogSet(log, force); if (!debug) diff --git a/poky/meta/recipes-extended/logrotate/logrotate/disable-check-different-filesystems.patch b/poky/meta/recipes-extended/logrotate/logrotate/disable-check-different-filesystems.patch index 9ba531f81..d7f9a02cc 100644 --- a/poky/meta/recipes-extended/logrotate/logrotate/disable-check-different-filesystems.patch +++ b/poky/meta/recipes-extended/logrotate/logrotate/disable-check-different-filesystems.patch @@ -1,4 +1,4 @@ -From a3fdf3dbdd6ecc0f2550a765dcb9bb274bce8ea8 Mon Sep 17 00:00:00 2001 +From 16c1833ade4c036b30b8761d2c4a5bd85cc65c44 Mon Sep 17 00:00:00 2001 From: Robert Yang Date: Tue, 8 Jan 2019 06:27:06 +0000 Subject: [PATCH] Disable the check for different filesystems @@ -15,10 +15,10 @@ Signed-off-by: Robert Yang 1 file changed, 9 deletions(-) diff --git a/config.c b/config.c -index f027c7e..026136c 100644 +index d2488f1..1de3745 100644 --- a/config.c +++ b/config.c -@@ -1873,15 +1873,6 @@ duperror: +@@ -1902,15 +1902,6 @@ duperror: } free(ld); @@ -28,7 +28,7 @@ index f027c7e..026136c 100644 - message(MESS_ERROR, - "%s:%d olddir %s and log file %s " - "are on different devices\n", configFile, -- lineNum, newlog->oldDir, newlog->files[i]); +- lineNum, newlog->oldDir, newlog->files[j]); - goto error; - } } diff --git a/poky/meta/recipes-extended/logrotate/logrotate_3.16.0.bb b/poky/meta/recipes-extended/logrotate/logrotate_3.16.0.bb deleted file mode 100644 index 1c9df5a3c..000000000 --- a/poky/meta/recipes-extended/logrotate/logrotate_3.16.0.bb +++ /dev/null @@ -1,92 +0,0 @@ -SUMMARY = "Rotates, compresses, removes and mails system log files" -SECTION = "console/utils" -HOMEPAGE = "https://github.com/logrotate/logrotate/issues" -LICENSE = "GPLv2" - -# TODO: Document coreutils dependency. Why not RDEPENDS? Why not busybox? - -DEPENDS="coreutils popt" - -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" - -UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" -UPSTREAM_CHECK_REGEX = "logrotate-(?P\d+(\.\d+)+).tar" - -SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BP}.tar.xz \ - file://act-as-mv-when-rotate.patch \ - file://0001-Update-the-manual.patch \ - file://disable-check-different-filesystems.patch \ - " - -SRC_URI[md5sum] = "faf729e0e24bfaafaa677bc6deb46ed8" -SRC_URI[sha256sum] = "442f6fdf61c349eeae5f76799878b88fe45a11c8863a38b618bac6988f4a7ce5" - -PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'acl selinux', d)}" - -PACKAGECONFIG[acl] = ",,acl" -PACKAGECONFIG[selinux] = ",,libselinux" - -CONFFILES_${PN} += "${localstatedir}/lib/logrotate.status \ - ${sysconfdir}/logrotate.conf \ - ${sysconfdir}/logrotate.d/btmp \ - ${sysconfdir}/logrotate.d/wtmp" - -# If RPM_OPT_FLAGS is unset, it adds -g itself rather than obeying our -# optimization variables, so use it rather than EXTRA_CFLAGS. -EXTRA_OEMAKE = "\ - LFS= \ - OS_NAME='${OS_NAME}' \ - 'CC=${CC}' \ - 'RPM_OPT_FLAGS=${CFLAGS}' \ - 'EXTRA_LDFLAGS=${LDFLAGS}' \ - ${@bb.utils.contains('PACKAGECONFIG', 'acl', 'WITH_ACL=yes', '', d)} \ - ${@bb.utils.contains('PACKAGECONFIG', 'selinux', 'WITH_SELINUX=yes', '', d)} \ -" - -# OS_NAME in the makefile defaults to `uname -s`. The behavior for -# freebsd/netbsd is questionable, so leave it as Linux, which only sets -# INSTALL=install and BASEDIR=/usr. -OS_NAME = "Linux" - -inherit autotools systemd - -SYSTEMD_SERVICE_${PN} = "\ - ${BPN}.service \ - ${BPN}.timer \ -" - -LOGROTATE_OPTIONS ?= "" - -LOGROTATE_SYSTEMD_TIMER_BASIS ?= "daily" -LOGROTATE_SYSTEMD_TIMER_ACCURACY ?= "12h" -LOGROTATE_SYSTEMD_TIMER_PERSISTENT ?= "true" - -do_install(){ - oe_runmake install DESTDIR=${D} PREFIX=${D} MANDIR=${mandir} - mkdir -p ${D}${sysconfdir}/logrotate.d - mkdir -p ${D}${localstatedir}/lib - install -p -m 644 ${S}/examples/logrotate.conf ${D}${sysconfdir}/logrotate.conf - install -p -m 644 ${S}/examples/btmp ${D}${sysconfdir}/logrotate.d/btmp - install -p -m 644 ${S}/examples/wtmp ${D}${sysconfdir}/logrotate.d/wtmp - touch ${D}${localstatedir}/lib/logrotate.status - - if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then - install -d ${D}${systemd_system_unitdir} - install -m 0644 ${S}/examples/logrotate.service ${D}${systemd_system_unitdir}/logrotate.service - install -m 0644 ${S}/examples/logrotate.timer ${D}${systemd_system_unitdir}/logrotate.timer - [ -z "${LOGROTATE_OPTIONS}" ] || - sed -ri \ - -e 's|(ExecStart=.*/logrotate.*)$|\1 ${LOGROTATE_OPTIONS}|g' \ - ${D}${systemd_system_unitdir}/logrotate.service - sed -ri \ - -e 's|(OnCalendar=).*$|\1${LOGROTATE_SYSTEMD_TIMER_BASIS}|g' \ - -e 's|(AccuracySec=).*$|\1${LOGROTATE_SYSTEMD_TIMER_ACCURACY}|g' \ - -e 's|(Persistent=).*$|\1${LOGROTATE_SYSTEMD_TIMER_PERSISTENT}|g' \ - ${D}${systemd_system_unitdir}/logrotate.timer - fi - - if ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then - mkdir -p ${D}${sysconfdir}/cron.daily - install -p -m 0755 ${S}/examples/logrotate.cron ${D}${sysconfdir}/cron.daily/logrotate - fi -} diff --git a/poky/meta/recipes-extended/logrotate/logrotate_3.17.0.bb b/poky/meta/recipes-extended/logrotate/logrotate_3.17.0.bb new file mode 100644 index 000000000..08001f38e --- /dev/null +++ b/poky/meta/recipes-extended/logrotate/logrotate_3.17.0.bb @@ -0,0 +1,91 @@ +SUMMARY = "Rotates, compresses, removes and mails system log files" +SECTION = "console/utils" +HOMEPAGE = "https://github.com/logrotate/logrotate/issues" +LICENSE = "GPLv2" + +# TODO: Document coreutils dependency. Why not RDEPENDS? Why not busybox? + +DEPENDS="coreutils popt" + +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" + +UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" +UPSTREAM_CHECK_REGEX = "logrotate-(?P\d+(\.\d+)+).tar" + +SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BP}.tar.xz \ + file://act-as-mv-when-rotate.patch \ + file://0001-Update-the-manual.patch \ + file://disable-check-different-filesystems.patch \ + " + +SRC_URI[sha256sum] = "58cc2178ff57faa3c0490181cce041345aeca6cff18dba1c5cd1398bf1c19294" + +PACKAGECONFIG ?= "${@bb.utils.filter('DISTRO_FEATURES', 'acl selinux', d)}" + +PACKAGECONFIG[acl] = ",,acl" +PACKAGECONFIG[selinux] = ",,libselinux" + +CONFFILES_${PN} += "${localstatedir}/lib/logrotate.status \ + ${sysconfdir}/logrotate.conf \ + ${sysconfdir}/logrotate.d/btmp \ + ${sysconfdir}/logrotate.d/wtmp" + +# If RPM_OPT_FLAGS is unset, it adds -g itself rather than obeying our +# optimization variables, so use it rather than EXTRA_CFLAGS. +EXTRA_OEMAKE = "\ + LFS= \ + OS_NAME='${OS_NAME}' \ + 'CC=${CC}' \ + 'RPM_OPT_FLAGS=${CFLAGS}' \ + 'EXTRA_LDFLAGS=${LDFLAGS}' \ + ${@bb.utils.contains('PACKAGECONFIG', 'acl', 'WITH_ACL=yes', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'selinux', 'WITH_SELINUX=yes', '', d)} \ +" + +# OS_NAME in the makefile defaults to `uname -s`. The behavior for +# freebsd/netbsd is questionable, so leave it as Linux, which only sets +# INSTALL=install and BASEDIR=/usr. +OS_NAME = "Linux" + +inherit autotools systemd + +SYSTEMD_SERVICE_${PN} = "\ + ${BPN}.service \ + ${BPN}.timer \ +" + +LOGROTATE_OPTIONS ?= "" + +LOGROTATE_SYSTEMD_TIMER_BASIS ?= "daily" +LOGROTATE_SYSTEMD_TIMER_ACCURACY ?= "12h" +LOGROTATE_SYSTEMD_TIMER_PERSISTENT ?= "true" + +do_install(){ + oe_runmake install DESTDIR=${D} PREFIX=${D} MANDIR=${mandir} + mkdir -p ${D}${sysconfdir}/logrotate.d + mkdir -p ${D}${localstatedir}/lib + install -p -m 644 ${S}/examples/logrotate.conf ${D}${sysconfdir}/logrotate.conf + install -p -m 644 ${S}/examples/btmp ${D}${sysconfdir}/logrotate.d/btmp + install -p -m 644 ${S}/examples/wtmp ${D}${sysconfdir}/logrotate.d/wtmp + touch ${D}${localstatedir}/lib/logrotate.status + + if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then + install -d ${D}${systemd_system_unitdir} + install -m 0644 ${S}/examples/logrotate.service ${D}${systemd_system_unitdir}/logrotate.service + install -m 0644 ${S}/examples/logrotate.timer ${D}${systemd_system_unitdir}/logrotate.timer + [ -z "${LOGROTATE_OPTIONS}" ] || + sed -ri \ + -e 's|(ExecStart=.*/logrotate.*)$|\1 ${LOGROTATE_OPTIONS}|g' \ + ${D}${systemd_system_unitdir}/logrotate.service + sed -ri \ + -e 's|(OnCalendar=).*$|\1${LOGROTATE_SYSTEMD_TIMER_BASIS}|g' \ + -e 's|(AccuracySec=).*$|\1${LOGROTATE_SYSTEMD_TIMER_ACCURACY}|g' \ + -e 's|(Persistent=).*$|\1${LOGROTATE_SYSTEMD_TIMER_PERSISTENT}|g' \ + ${D}${systemd_system_unitdir}/logrotate.timer + fi + + if ${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'true', 'false', d)}; then + mkdir -p ${D}${sysconfdir}/cron.daily + install -p -m 0755 ${S}/examples/logrotate.cron ${D}${sysconfdir}/cron.daily/logrotate + fi +} diff --git a/poky/meta/recipes-extended/ltp/ltp/0001-Remove-OOM-tests-from-runtest-mm.patch b/poky/meta/recipes-extended/ltp/ltp/0001-Remove-OOM-tests-from-runtest-mm.patch new file mode 100644 index 000000000..6b665030c --- /dev/null +++ b/poky/meta/recipes-extended/ltp/ltp/0001-Remove-OOM-tests-from-runtest-mm.patch @@ -0,0 +1,34 @@ +From 13ef88cdccfe3f58c53d57806866b91e310eb272 Mon Sep 17 00:00:00 2001 +From: "Mingde (Matthew) Zeng" +Date: Wed, 29 Jul 2020 08:47:09 -0400 +Subject: [PATCH] Remove OOM tests from runtest/mm + +Disable OOM tests, as they might cause oeqa ssh connection lost + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Mingde (Matthew) Zeng + +--- + runtest/mm | 6 ------ + 1 file changed, 6 deletions(-) + +diff --git a/runtest/mm b/runtest/mm +index a09f39c1e..76fa82754 100644 +--- a/runtest/mm ++++ b/runtest/mm +@@ -73,12 +73,6 @@ ksm06 ksm06 + ksm06_1 ksm06 -n 10 + ksm06_2 ksm06 -n 10000 + +-oom01 oom01 +-oom02 oom02 +-oom03 oom03 +-oom04 oom04 +-oom05 oom05 +- + swapping01 swapping01 -i 5 + + thp01 thp01 -I 120 +-- +2.27.0 + diff --git a/poky/meta/recipes-extended/ltp/ltp_20200515.bb b/poky/meta/recipes-extended/ltp/ltp_20200515.bb index b283add12..0c7044d04 100644 --- a/poky/meta/recipes-extended/ltp/ltp_20200515.bb +++ b/poky/meta/recipes-extended/ltp/ltp_20200515.bb @@ -37,6 +37,7 @@ SRC_URI = "git://github.com/linux-test-project/ltp.git \ file://0001-ptrace01-Fix-missing-format-string.patch \ file://0001-sigwaitinfo-Do-not-run-invalid-undefined-test-cases.patch \ file://0001-syscalls-copy_file_range02-Expect-EFBIG-in-subcase-m.patch \ + file://0001-Remove-OOM-tests-from-runtest-mm.patch \ " S = "${WORKDIR}/git" @@ -50,7 +51,7 @@ export exec_prefix = "/opt/${PN}" PACKAGECONFIG[numa] = "--with-numa, --without-numa, numactl," EXTRA_AUTORECONF += "-I ${S}/testcases/realtime/m4" -EXTRA_OECONF = " --with-power-management-testsuite --with-realtime-testsuite --with-open-posix-testsuite " +EXTRA_OECONF = " --with-realtime-testsuite --with-open-posix-testsuite " # ltp network/rpc test cases ftbfs when libtirpc is found EXTRA_OECONF += " --without-tirpc " diff --git a/poky/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch b/poky/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch new file mode 100644 index 000000000..4c6e24931 --- /dev/null +++ b/poky/meta/recipes-extended/minicom/minicom/0001-Drop-superfluous-global-variable-definitions.patch @@ -0,0 +1,35 @@ +From b65152ebc03832972115e6d98e50cb6190d01793 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= +Date: Mon, 3 Feb 2020 13:18:13 +0100 +Subject: [PATCH 1/3] Drop superfluous global variable definitions + +The file minicom.c, by including the minicom.h header, already defines +the global variables 'dial_user' and 'dial_pass'. The object file +minicom.o is always linked to dial.o. Thus the definitions in dial.c +can be dropped. + +This fixes linking with gcc 10 which uses -fno-common by default, +disallowing multiple global variable definitions. + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + src/dial.c | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/src/dial.c b/src/dial.c +index eada5ee..d9d481f 100644 +--- a/src/dial.c ++++ b/src/dial.c +@@ -146,8 +146,6 @@ static int newtype; + /* Access to ".dialdir" denied? */ + static int dendd = 0; + static char *tagged; +-char *dial_user; +-char *dial_pass; + + /* Change the baud rate. Treat all characters in the given array as if + * they were key presses within the comm parameters dialog (C-A P) and +-- +2.24.1 + diff --git a/poky/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch b/poky/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch new file mode 100644 index 000000000..1740051e0 --- /dev/null +++ b/poky/meta/recipes-extended/minicom/minicom/0002-Drop-superfluous-global-variable-definitions.patch @@ -0,0 +1,37 @@ +From 924bd2da3a00e030e29d82b74ef82900bd50b475 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= +Date: Mon, 3 Feb 2020 13:18:33 +0100 +Subject: [PATCH 2/3] Drop superfluous global variable definitions + +The only place where the EXTERN macro mechanism is used to define the +global variables 'vt_outmap' and 'vt_inmap' is minicom.c (by defining +an empty EXTERN macro and including the minicom.h header). The file +vt100.c already defines these variables. The vt100.o object file is +always linked to minicom.o. Thus it is safe not to define the +variables in minicom.c and only declare them in the minicom.h header. + +This fixes linking with gcc 10 which uses -fno-common by default, +disallowing multiple global variable definitions. + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + src/minicom.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/minicom.h b/src/minicom.h +index 061c013..0f9693b 100644 +--- a/src/minicom.h ++++ b/src/minicom.h +@@ -141,7 +141,7 @@ EXTERN int sbcolor; /* Status Bar Background Color */ + EXTERN int st_attr; /* Status Bar attributes. */ + + /* jl 04.09.97 conversion tables */ +-EXTERN unsigned char vt_outmap[256], vt_inmap[256]; ++extern unsigned char vt_outmap[256], vt_inmap[256]; + + /* MARK updated 02/17/95 - history buffer */ + EXTERN int num_hist_lines; /* History buffer size */ +-- +2.24.1 + diff --git a/poky/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch b/poky/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch new file mode 100644 index 000000000..58cd58eda --- /dev/null +++ b/poky/meta/recipes-extended/minicom/minicom/0003-Drop-superfluous-global-variable-definitions.patch @@ -0,0 +1,42 @@ +From a4fc603b3641d2efe31479116eb7ba66932901c7 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Ond=C5=99ej=20Lyson=C4=9Bk?= +Date: Mon, 3 Feb 2020 13:21:41 +0100 +Subject: [PATCH 3/3] Drop superfluous global variable definitions + +The only place where the EXTERN macro mechanism is used to define the +global variables 'portfd_is_socket', 'portfd_is_connected' and +'portfd_sock_addr' is minicom.c (by defining an empty EXTERN macro and +including the minicom.h header). The source file sysdep1_s.c already +defines these variables. The sysdep1_s.o object file is always linked +to minicom.o. Thus it is safe to drop the definitions from minicom.c +and only declare the variables in the minicom.h header. + +This fixes linking with gcc 10 which uses -fno-common by default, +disallowing multiple global variable definitions. + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + src/minicom.h | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/src/minicom.h b/src/minicom.h +index 0f9693b..1e7cb8c 100644 +--- a/src/minicom.h ++++ b/src/minicom.h +@@ -113,9 +113,9 @@ EXTERN char *dial_user; /* Our username there */ + EXTERN char *dial_pass; /* Our password */ + + #ifdef USE_SOCKET +-EXTERN int portfd_is_socket; /* File descriptor is a unix socket */ +-EXTERN int portfd_is_connected; /* 1 if the socket is connected */ +-EXTERN struct sockaddr_un portfd_sock_addr; /* the unix socket address */ ++extern int portfd_is_socket; /* File descriptor is a unix socket */ ++extern int portfd_is_connected; /* 1 if the socket is connected */ ++extern struct sockaddr_un portfd_sock_addr; /* the unix socket address */ + #define portfd_connected ((portfd_is_socket && !portfd_is_connected) \ + ? -1 : portfd) + #else +-- +2.24.1 + diff --git a/poky/meta/recipes-extended/minicom/minicom_2.7.1.bb b/poky/meta/recipes-extended/minicom/minicom_2.7.1.bb index 1e6f1317e..03034864c 100644 --- a/poky/meta/recipes-extended/minicom/minicom_2.7.1.bb +++ b/poky/meta/recipes-extended/minicom/minicom_2.7.1.bb @@ -11,6 +11,9 @@ SRC_URI = "${DEBIAN_MIRROR}/main/m/${BPN}/${BPN}_${PV}.orig.tar.gz \ file://allow.to.disable.lockdev.patch \ file://0001-fix-minicom-h-v-return-value-is-not-0.patch \ file://0001-Fix-build-issus-surfaced-due-to-musl.patch \ + file://0001-Drop-superfluous-global-variable-definitions.patch \ + file://0002-Drop-superfluous-global-variable-definitions.patch \ + file://0003-Drop-superfluous-global-variable-definitions.patch \ " SRC_URI[md5sum] = "9021cb8c5445f6e6e74b2acc39962d62" diff --git a/poky/meta/recipes-extended/net-tools/net-tools/0001-added-ull-prefix-to-unsigned-long-long-constants-to-.patch b/poky/meta/recipes-extended/net-tools/net-tools/0001-added-ull-prefix-to-unsigned-long-long-constants-to-.patch deleted file mode 100644 index 523d434b7..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/0001-added-ull-prefix-to-unsigned-long-long-constants-to-.patch +++ /dev/null @@ -1,381 +0,0 @@ -From eb04ef31571f6c707eacaba6846feeebfab518e6 Mon Sep 17 00:00:00 2001 -From: Bernd Eckenfels -Date: Thu, 29 May 2003 02:09:14 +0000 -Subject: [PATCH] added 'ull' prefix to unsigned long long constants to make - gcc 3.3 happy - -Signed-off-by: Martin Jansa -Upstream-Status: Backport [https://sourceforge.net/p/net-tools/code/ci/eb04ef31571f6c707eacaba6846feeebfab518e6] ---- - lib/interface.c | 299 ++++++++++++++++++++++++++---------------------- - 1 file changed, 161 insertions(+), 138 deletions(-) - -diff -uNr net-tools-1.60.orig/lib/interface.c net-tools-1.60/lib/interface.c ---- net-tools-1.60.orig/lib/interface.c 2020-06-16 10:04:16.308411879 +0000 -+++ net-tools-1.60/lib/interface.c 2020-06-16 10:05:15.697264291 +0000 -@@ -23,7 +23,6 @@ - #include - #include - #include --#include - - #if HAVE_AFIPX - #if (__GLIBC__ > 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1) -@@ -654,6 +653,8 @@ - const char *Rext = "B"; - const char *Text = "B"; - -+ static char flags[200]; -+ - #if HAVE_AFIPX - static struct aftype *ipxtype = NULL; - #endif -@@ -685,32 +686,68 @@ - if (hw == NULL) - hw = get_hwntype(-1); - -- printf(_("%-9s Link encap:%s "), ptr->name, hw->title); -- /* For some hardware types (eg Ash, ATM) we don't print the -- hardware address if it's null. */ -- if (hw->print != NULL && (! (hw_null_address(hw, ptr->hwaddr) && -- hw->suppress_null_addr))) -- printf(_("HWaddr %s "), hw->print(ptr->hwaddr)); --#ifdef IFF_PORTSEL -- if (ptr->flags & IFF_PORTSEL) { -- printf(_("Media:%s"), if_port_text[ptr->map.port][0]); -- if (ptr->flags & IFF_AUTOMEDIA) -- printf(_("(auto)")); -- } -+ sprintf(flags, "flags=%d<", ptr->flags); -+ /* DONT FORGET TO ADD THE FLAGS IN ife_print_short, too */ -+ if (ptr->flags == 0) -+ strcat(flags,">"); -+ if (ptr->flags & IFF_UP) -+ strcat(flags,_("UP,")); -+ if (ptr->flags & IFF_BROADCAST) -+ strcat(flags,_("BROADCAST,")); -+ if (ptr->flags & IFF_DEBUG) -+ strcat(flags,_("DEBUG,")); -+ if (ptr->flags & IFF_LOOPBACK) -+ strcat(flags,_("LOOPBACK,")); -+ if (ptr->flags & IFF_POINTOPOINT) -+ strcat(flags,_("POINTOPOINT,")); -+ if (ptr->flags & IFF_NOTRAILERS) -+ strcat(flags,_("NOTRAILERS,")); -+ if (ptr->flags & IFF_RUNNING) -+ strcat(flags,_("RUNNING,")); -+ if (ptr->flags & IFF_NOARP) -+ strcat(flags,_("NOARP,")); -+ if (ptr->flags & IFF_PROMISC) -+ strcat(flags,_("PROMISC,")); -+ if (ptr->flags & IFF_ALLMULTI) -+ strcat(flags,_("ALLMULTI,")); -+ if (ptr->flags & IFF_SLAVE) -+ strcat(flags,_("SLAVE,")); -+ if (ptr->flags & IFF_MASTER) -+ strcat(flags,_("MASTER,")); -+ if (ptr->flags & IFF_MULTICAST) -+ strcat(flags,_("MULTICAST,")); -+#ifdef HAVE_DYNAMIC -+ if (ptr->flags & IFF_DYNAMIC) -+ strcat(flags,_("DYNAMIC,")); -+#endif -+ /* DONT FORGET TO ADD THE FLAGS IN ife_print_short */ -+ if (flags[strlen(flags)-1] == ',') -+ flags[strlen(flags)-1] = '>'; -+ else -+ flags[strlen(flags)-1] = 0; -+ -+ -+ printf(_("%s: %s mtu %d metric %d"), -+ ptr->name, flags, ptr->mtu, ptr->metric ? ptr->metric : 1); -+#ifdef SIOCSKEEPALIVE -+ if (ptr->outfill || ptr->keepalive) -+ printf(_(" outfill %d keepalive %d"), -+ ptr->outfill, ptr->keepalive); - #endif - printf("\n"); - - #if HAVE_AFINET - if (ptr->has_ip) { -- printf(_(" %s addr:%s "), ap->name, -+ printf(_(" %s %s"), ap->name, - ap->sprint(&ptr->addr, 1)); -- if (ptr->flags & IFF_POINTOPOINT) { -- printf(_(" P-t-P:%s "), ap->sprint(&ptr->dstaddr, 1)); -- } -+ printf(_(" netmask %s"), ap->sprint(&ptr->netmask, 1)); - if (ptr->flags & IFF_BROADCAST) { -- printf(_(" Bcast:%s "), ap->sprint(&ptr->broadaddr, 1)); -+ printf(_(" broadcast %s"), ap->sprint(&ptr->broadaddr, 1)); - } -- printf(_(" Mask:%s\n"), ap->sprint(&ptr->netmask, 1)); -+ if (ptr->flags & IFF_POINTOPOINT) { -+ printf(_(" destination %s"), ap->sprint(&ptr->dstaddr, 1)); -+ } -+ printf("\n"); - } - #endif - -@@ -727,29 +764,30 @@ - addr6p[0], addr6p[1], addr6p[2], addr6p[3], - addr6p[4], addr6p[5], addr6p[6], addr6p[7]); - inet6_aftype.input(1, addr6, (struct sockaddr *) &sap); -- printf(_(" inet6 addr: %s/%d"), -- inet6_aftype.sprint((struct sockaddr *) &sap, 1), plen); -- printf(_(" Scope:")); -- switch (scope) { -- case 0: -- printf(_("Global")); -- break; -- case IPV6_ADDR_LINKLOCAL: -- printf(_("Link")); -- break; -- case IPV6_ADDR_SITELOCAL: -- printf(_("Site")); -- break; -- case IPV6_ADDR_COMPATv4: -- printf(_("Compat")); -- break; -- case IPV6_ADDR_LOOPBACK: -- printf(_("Host")); -- break; -- default: -- printf(_("Unknown")); -+ printf(_(" %s %s prefixlen %d"), -+ inet6_aftype.name, -+ inet6_aftype.sprint((struct sockaddr *) &sap, 1), -+ plen); -+ printf(_(" scopeid 0x%x"), scope); -+ -+ flags[0] = '<'; flags[1] = 0; -+ if (scope & IPV6_ADDR_COMPATv4) { -+ strcat(flags, _("compat,")); -+ scope -= IPV6_ADDR_COMPATv4; - } -- printf("\n"); -+ if (scope == 0) -+ strcat(flags, _("global,")); -+ if (scope & IPV6_ADDR_LINKLOCAL) -+ strcat(flags, _("link,")); -+ if (scope & IPV6_ADDR_SITELOCAL) -+ strcat(flags, _("site,")); -+ if (scope & IPV6_ADDR_LOOPBACK) -+ strcat(flags, _("host,")); -+ if (flags[strlen(flags)-1] == ',') -+ flags[strlen(flags)-1] = '>'; -+ else -+ flags[strlen(flags)-1] = 0; -+ printf("%s\n", flags); - } - } - fclose(f); -@@ -762,17 +800,17 @@ - - if (ipxtype != NULL) { - if (ptr->has_ipx_bb) -- printf(_(" IPX/Ethernet II addr:%s\n"), -- ipxtype->sprint(&ptr->ipxaddr_bb, 1)); -+ printf(_(" %s Ethernet-II %s\n"), -+ ipxtype->name, ipxtype->sprint(&ptr->ipxaddr_bb, 1)); - if (ptr->has_ipx_sn) -- printf(_(" IPX/Ethernet SNAP addr:%s\n"), -- ipxtype->sprint(&ptr->ipxaddr_sn, 1)); -+ printf(_(" %s Ethernet-SNAP %s\n"), -+ ipxtype->name, ipxtype->sprint(&ptr->ipxaddr_sn, 1)); - if (ptr->has_ipx_e2) -- printf(_(" IPX/Ethernet 802.2 addr:%s\n"), -- ipxtype->sprint(&ptr->ipxaddr_e2, 1)); -+ printf(_(" %s Ethernet802.2 %s\n"), -+ ipxtype->name, ipxtype->sprint(&ptr->ipxaddr_e2, 1)); - if (ptr->has_ipx_e3) -- printf(_(" IPX/Ethernet 802.3 addr:%s\n"), -- ipxtype->sprint(&ptr->ipxaddr_e3, 1)); -+ printf(_(" %s Ethernet802.3 %s\n"), -+ ipxtype->name, ipxtype->sprint(&ptr->ipxaddr_e3, 1)); - } - #endif - -@@ -781,7 +819,7 @@ - ddptype = get_afntype(AF_APPLETALK); - if (ddptype != NULL) { - if (ptr->has_ddp) -- printf(_(" EtherTalk Phase 2 addr:%s\n"), ddptype->sprint(&ptr->ddpaddr, 1)); -+ printf(_(" %s %s\n"), ddptype->name, ddptype->sprint(&ptr->ddpaddr, 1)); - } - #endif - -@@ -790,53 +828,30 @@ - ectype = get_afntype(AF_ECONET); - if (ectype != NULL) { - if (ptr->has_econet) -- printf(_(" econet addr:%s\n"), ectype->sprint(&ptr->ecaddr, 1)); -+ printf(_(" %s %s\n"), ectype->name, ectype->sprint(&ptr->ecaddr, 1)); - } - #endif - -- printf(" "); -- /* DONT FORGET TO ADD THE FLAGS IN ife_print_short, too */ -- if (ptr->flags == 0) -- printf(_("[NO FLAGS] ")); -- if (ptr->flags & IFF_UP) -- printf(_("UP ")); -- if (ptr->flags & IFF_BROADCAST) -- printf(_("BROADCAST ")); -- if (ptr->flags & IFF_DEBUG) -- printf(_("DEBUG ")); -- if (ptr->flags & IFF_LOOPBACK) -- printf(_("LOOPBACK ")); -- if (ptr->flags & IFF_POINTOPOINT) -- printf(_("POINTOPOINT ")); -- if (ptr->flags & IFF_NOTRAILERS) -- printf(_("NOTRAILERS ")); -- if (ptr->flags & IFF_RUNNING) -- printf(_("RUNNING ")); -- if (ptr->flags & IFF_NOARP) -- printf(_("NOARP ")); -- if (ptr->flags & IFF_PROMISC) -- printf(_("PROMISC ")); -- if (ptr->flags & IFF_ALLMULTI) -- printf(_("ALLMULTI ")); -- if (ptr->flags & IFF_SLAVE) -- printf(_("SLAVE ")); -- if (ptr->flags & IFF_MASTER) -- printf(_("MASTER ")); -- if (ptr->flags & IFF_MULTICAST) -- printf(_("MULTICAST ")); --#ifdef HAVE_DYNAMIC -- if (ptr->flags & IFF_DYNAMIC) -- printf(_("DYNAMIC ")); --#endif -- /* DONT FORGET TO ADD THE FLAGS IN ife_print_short */ -- printf(_(" MTU:%d Metric:%d"), -- ptr->mtu, ptr->metric ? ptr->metric : 1); --#ifdef SIOCSKEEPALIVE -- if (ptr->outfill || ptr->keepalive) -- printf(_(" Outfill:%d Keepalive:%d"), -- ptr->outfill, ptr->keepalive); -+ /* For some hardware types (eg Ash, ATM) we don't print the -+ hardware address if it's null. */ -+ if (hw->print != NULL && (! (hw_null_address(hw, ptr->hwaddr) && -+ hw->suppress_null_addr))) -+ printf(_(" %s %s"), hw->name, hw->print(ptr->hwaddr)); -+ else -+ printf(_(" %s"), hw->name); -+ if (ptr->tx_queue_len != -1) -+ printf(_(" txqueuelen %d"), ptr->tx_queue_len); -+ printf(" (%s)\n", hw->title); -+ -+#ifdef IFF_PORTSEL -+ if (ptr->flags & IFF_PORTSEL) { -+ printf(_(" media %s"), if_port_text[ptr->map.port][0]); -+ if (ptr->flags & IFF_AUTOMEDIA) -+ printf(_("autoselect")); -+ printf("\n"); -+ } - #endif -- printf("\n"); -+ - - /* If needed, display the interface statistics. */ - -@@ -845,19 +860,9 @@ - * not for the aliases, although strictly speaking they're shared - * by all addresses. - */ -- printf(" "); -- -- printf(_("RX packets:%llu errors:%lu dropped:%lu overruns:%lu frame:%lu\n"), -- ptr->stats.rx_packets, ptr->stats.rx_errors, -- ptr->stats.rx_dropped, ptr->stats.rx_fifo_errors, -- ptr->stats.rx_frame_errors); -- if (can_compress) -- printf(_(" compressed:%lu\n"), ptr->stats.rx_compressed); - - rx = ptr->stats.rx_bytes; -- tx = ptr->stats.tx_bytes; - short_rx = rx * 10; -- short_tx = tx * 10; - if (rx > 1125899906842624ull) { - short_rx /= 1125899906842624ull; - Rext = "PiB"; -@@ -874,6 +879,8 @@ - short_rx /= 1024; - Rext = "KiB"; - } -+ tx = ptr->stats.tx_bytes; -+ short_tx = tx * 10; - if (tx > 1125899906842624ull) { - short_tx /= 1125899906842624ull; - Text = "PiB"; -@@ -891,37 +898,50 @@ - Text = "KiB"; - } - -- printf(" "); -- printf(_("TX packets:%llu errors:%lu dropped:%lu overruns:%lu carrier:%lu\n"), -- ptr->stats.tx_packets, ptr->stats.tx_errors, -- ptr->stats.tx_dropped, ptr->stats.tx_fifo_errors, -- ptr->stats.tx_carrier_errors); -- printf(_(" collisions:%lu "), ptr->stats.collisions); -- if (can_compress) -- printf(_("compressed:%lu "), ptr->stats.tx_compressed); -- if (ptr->tx_queue_len != -1) -- printf(_("txqueuelen:%d "), ptr->tx_queue_len); -- printf("\n "); -- printf(_("RX bytes:%llu (%lu.%lu %s) TX bytes:%llu (%lu.%lu %s)\n"), -+ printf(" "); -+ printf(_("RX packets %llu bytes %llu (%lu.%lu %s)\n"), -+ ptr->stats.rx_packets, - rx, (unsigned long)(short_rx / 10), -- (unsigned long)(short_rx % 10), Rext, -- tx, (unsigned long)(short_tx / 10), -- (unsigned long)(short_tx % 10), Text); -+ (unsigned long)(short_rx % 10), Rext); -+ if (can_compress) { -+ printf(" "); -+ printf(_("RX compressed:%lu\n"), ptr->stats.rx_compressed); -+ } -+ printf(" "); -+ printf(_("RX errors %lu dropped %lu overruns %lu frame %lu\n"), -+ ptr->stats.rx_errors, ptr->stats.rx_dropped, -+ ptr->stats.rx_fifo_errors, ptr->stats.rx_frame_errors); -+ -+ -+ printf(" "); -+ printf(_("TX packets %llu bytes %llu (%lu.%lu %s)\n"), -+ ptr->stats.tx_packets, -+ tx, (unsigned long)(short_tx / 10), -+ (unsigned long)(short_tx % 10), Text); -+ if (can_compress) { -+ printf(" "); -+ printf(_("TX compressed %lu\n"), ptr->stats.tx_compressed); -+ } -+ printf(" "); -+ printf(_("TX errors %lu dropped %lu overruns %lu carrier %lu collisions %lu\n"), -+ ptr->stats.tx_errors, -+ ptr->stats.tx_dropped, ptr->stats.tx_fifo_errors, -+ ptr->stats.tx_carrier_errors, ptr->stats.collisions); - } - - if ((ptr->map.irq || ptr->map.mem_start || ptr->map.dma || - ptr->map.base_addr >= 0x100)) { -- printf(" "); -+ printf(" device "); - if (ptr->map.irq) -- printf(_("Interrupt:%d "), ptr->map.irq); -+ printf(_("interrupt %d "), ptr->map.irq); - if (ptr->map.base_addr >= 0x100) /* Only print devices using it for - I/O maps */ -- printf(_("Base address:0x%x "), ptr->map.base_addr); -+ printf(_("base 0x%x "), ptr->map.base_addr); - if (ptr->map.mem_start) { -- printf(_("Memory:%lx-%lx "), ptr->map.mem_start, ptr->map.mem_end); -+ printf(_("memory 0x%lx-%lx "), ptr->map.mem_start, ptr->map.mem_end); - } - if (ptr->map.dma) -- printf(_("DMA chan:%x "), ptr->map.dma); -+ printf(_(" dma 0x%x"), ptr->map.dma); - printf("\n"); - } - printf("\n"); diff --git a/poky/meta/recipes-extended/net-tools/net-tools/0001-lib-inet6.c-INET6_rresolve-various-fixes.patch b/poky/meta/recipes-extended/net-tools/net-tools/0001-lib-inet6.c-INET6_rresolve-various-fixes.patch deleted file mode 100644 index 8be45ccac..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/0001-lib-inet6.c-INET6_rresolve-various-fixes.patch +++ /dev/null @@ -1,87 +0,0 @@ -From 08abfcd923e9f37d1902db26771b1dc6731eb265 Mon Sep 17 00:00:00 2001 -From: Jiri Popelka -Date: Fri, 27 Sep 2013 18:40:06 +0200 -Subject: [PATCH 1/1] lib/inet6.c:INET6_rresolve() - various fixes - -1) Fall-back to numeric address if getnameinfo fails. - Reverse lookup is not mandatory, therefore its fail - is not an error. Just return numeric address in that case. - This makes netstat/route show IPv6 address instead of - [UNKNOWN] in case of DNS problems. - -2) Pass length of 'name' buffer into function. - 'name' is a pointer and therefore sizeof(name) - returns size of pointer and not size of the buffer. - see http://stackoverflow.com/questions/14298710/c-pointers-and-arrays-sizeof-operator - The sizeof() usage was added with commit 604785adc, - so I checked all the other changes in that commit - and they seem to be OK. - -3) remove unused 's' variable - -Upstream-Status: Pending - -Signed-off-by: Shan Hai -Signed-off-by: Jianchuan Wang ---- - lib/inet6.c | 21 ++++++++++----------- - 1 file changed, 10 insertions(+), 11 deletions(-) - -diff --git a/lib/inet6.c b/lib/inet6.c -index 9a484a0..2a9c459 100644 ---- a/lib/inet6.c -+++ b/lib/inet6.c -@@ -84,10 +84,9 @@ static int INET6_resolve(char *name, struct sockaddr_in6 *sin6) - #endif - - --static int INET6_rresolve(char *name, struct sockaddr_in6 *sin6, int numeric) -+static int INET6_rresolve(char *name, size_t namelen, -+ struct sockaddr_in6 *sin6, int numeric) - { -- int s; -- - /* Grmpf. -FvK */ - if (sin6->sin6_family != AF_INET6) { - #ifdef DEBUG -@@ -98,21 +97,20 @@ static int INET6_rresolve(char *name, struct sockaddr_in6 *sin6, int numeric) - return (-1); - } - if (numeric & 0x7FFF) { -- inet_ntop( AF_INET6, &sin6->sin6_addr, name, 80); -+ inet_ntop( AF_INET6, &sin6->sin6_addr, name, namelen); - return (0); - } - if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { - if (numeric & 0x8000) -- strcpy(name, "default"); -+ safe_strncpy(name, "default", namelen); - else -- strcpy(name, "[::]"); -+ safe_strncpy(name, "[::]", namelen); - return (0); - } - -- if ((s = getnameinfo((struct sockaddr *) sin6, sizeof(struct sockaddr_in6), -- name, 255 /* !! */ , NULL, 0, 0))) { -- fputs("getnameinfo failed\n", stderr); -- return -1; -+ if (getnameinfo((struct sockaddr *) sin6, sizeof(struct sockaddr_in6), -+ name, namelen , NULL, 0, 0)) { -+ inet_ntop( AF_INET6, &sin6->sin6_addr, name, namelen); - } - return (0); - } -@@ -143,7 +141,8 @@ static char *INET6_sprint(struct sockaddr *sap, int numeric) - - if (sap->sa_family == 0xFFFF || sap->sa_family == 0) - return safe_strncpy(buff, _("[NONE SET]"), sizeof(buff)); -- if (INET6_rresolve(buff, (struct sockaddr_in6 *) sap, numeric) != 0) -+ if (INET6_rresolve(buff, sizeof(buff), -+ (struct sockaddr_in6 *) sap, numeric) != 0) - return safe_strncpy(buff, _("[UNKNOWN]"), sizeof(buff)); - return (fix_v4_address(buff, &((struct sockaddr_in6 *)sap)->sin6_addr)); - } --- -1.8.5.2.233.g932f7e4 - diff --git a/poky/meta/recipes-extended/net-tools/net-tools/Add_missing_headers.patch b/poky/meta/recipes-extended/net-tools/net-tools/Add_missing_headers.patch new file mode 100644 index 000000000..f15d3654c --- /dev/null +++ b/poky/meta/recipes-extended/net-tools/net-tools/Add_missing_headers.patch @@ -0,0 +1,15 @@ +Description: Add missing headers + +Upstream-Status: Pending +Signed-off-by: Martin Jansa + +--- a/netstat.c ++++ b/netstat.c +@@ -88,6 +88,7 @@ + #include + #include + #include ++#include + + #include "net-support.h" + #include "pathnames.h" diff --git a/poky/meta/recipes-extended/net-tools/net-tools/Bug_443075-ifconfig.c-pointtopoint_spelling.patch b/poky/meta/recipes-extended/net-tools/net-tools/Bug_443075-ifconfig.c-pointtopoint_spelling.patch new file mode 100644 index 000000000..74d74668c --- /dev/null +++ b/poky/meta/recipes-extended/net-tools/net-tools/Bug_443075-ifconfig.c-pointtopoint_spelling.patch @@ -0,0 +1,26 @@ +Description: Accept "pointtopoint" as a synonym for "pointopoint" +Bug-Debian: https://bugs.debian.org/443075 +Author: Justin Pryzby + +Upstream-Status: Pending +Signed-off-by: Martin Jansa + +--- a/ifconfig.c ++++ b/ifconfig.c +@@ -644,14 +644,14 @@ + spp++; + continue; + } +- if (!strcmp(*spp, "-pointopoint")) { ++ if (!strcmp(*spp, "-pointopoint") || !strcmp(*spp, "-pointtopoint")) { + goterr |= clr_flag(ifr.ifr_name, IFF_POINTOPOINT); + spp++; + if (test_flag(ifr.ifr_name, IFF_POINTOPOINT) > 0) + fprintf(stderr, _("Warning: Interface %s still in POINTOPOINT mode.\n"), ifr.ifr_name); + continue; + } +- if (!strcmp(*spp, "pointopoint")) { ++ if (!strcmp(*spp, "pointopoint") || !strcmp(*spp, "pointtopoint")) { + if (*(spp + 1) != NULL) { + spp++; + safe_strncpy(host, *spp, (sizeof host)); diff --git a/poky/meta/recipes-extended/net-tools/net-tools/Bug_541172-netstat.c-exit-codes.patch b/poky/meta/recipes-extended/net-tools/net-tools/Bug_541172-netstat.c-exit-codes.patch new file mode 100644 index 000000000..782b94bc6 --- /dev/null +++ b/poky/meta/recipes-extended/net-tools/net-tools/Bug_541172-netstat.c-exit-codes.patch @@ -0,0 +1,22 @@ +Description: Bug#541172: netstat.c exit codes + +Upstream-Status: Pending +Signed-off-by: Martin Jansa + +--- a/netstat.c ++++ b/netstat.c +@@ -2237,12 +2237,14 @@ + parsesnmp(flag_raw, flag_tcp, flag_udp, flag_sctp); + #else + ENOSUPP("netstat", "AF INET"); ++ exit(1); + #endif + } else if(!strcmp(afname, "inet6")) { + #if HAVE_AFINET6 + parsesnmp6(flag_raw, flag_tcp, flag_udp); + #else + ENOSUPP("netstat", "AF INET6"); ++ exit(1); + #endif + } else { + printf(_("netstat: No statistics support for specified address family: %s\n"), afname); diff --git a/poky/meta/recipes-extended/net-tools/net-tools/ifconfig-interface-0-del-IP-will-remove-the-aliased-.patch b/poky/meta/recipes-extended/net-tools/net-tools/ifconfig-interface-0-del-IP-will-remove-the-aliased-.patch deleted file mode 100644 index 06f81420e..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/ifconfig-interface-0-del-IP-will-remove-the-aliased-.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 81814dc2b14843009193efd307d814c26baa61f0 Mon Sep 17 00:00:00 2001 -From: Jiri Popelka -Date: Wed, 7 Dec 2011 19:14:09 +0100 -Subject: [PATCH] ifconfig interface:0 del will remove the aliased IP on IA64 - -Upstream-Status: Backport - -commit 81814dc2b14843009193efd307d814c26baa61f0 from -git://git.code.sf.net/p/net-tools/code - ---- - ifconfig.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/ifconfig.c b/ifconfig.c -index bc405c6..dae8922 100644 ---- a/ifconfig.c -+++ b/ifconfig.c -@@ -890,7 +890,9 @@ int main(int argc, char **argv) - continue; - } - -- memcpy(&ip, &sin.sin_addr.s_addr, sizeof(unsigned long)); -+ /* Clear "ip" in case sizeof(unsigned long) > sizeof(sin.sin_addr.s_addr) */ -+ ip = 0; -+ memcpy(&ip, &sin.sin_addr.s_addr, sizeof(sin.sin_addr.s_addr)); - - if (get_nmbc_parent(ifr.ifr_name, &nm, &bc) < 0) { - fprintf(stderr, _("Interface %s not initialized\n"), --- -1.7.9.5 - diff --git a/poky/meta/recipes-extended/net-tools/net-tools/musl-fixes.patch b/poky/meta/recipes-extended/net-tools/net-tools/musl-fixes.patch deleted file mode 100644 index f694d594b..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/musl-fixes.patch +++ /dev/null @@ -1,100 +0,0 @@ -Adjust headers for non-glibc cases -especially exposed by musl - -Signed-off-by: Khem Raj -Upstream-Status: Pending - -Index: net-tools-1.60/lib/inet6_gr.c -=================================================================== ---- net-tools-1.60.orig/lib/inet6_gr.c -+++ net-tools-1.60/lib/inet6_gr.c -@@ -23,7 +23,7 @@ - #include - #include - #include --#ifndef __GLIBC__ -+#ifdef HAVE_IPV6_ROUTE_H - #include /* glibc doesn't have this */ - #endif - #include "version.h" -Index: net-tools-1.60/lib/inet6_sr.c -=================================================================== ---- net-tools-1.60.orig/lib/inet6_sr.c -+++ net-tools-1.60/lib/inet6_sr.c -@@ -23,10 +23,10 @@ - #include - #include - #include --#ifdef __GLIBC__ --#include --#else -+#ifdef HAVE_IPV6_ROUTE_H - #include /* glibc does not have this */ -+#else -+#include - #endif - #include "version.h" - #include "net-support.h" -Index: net-tools-1.60/lib/inet_sr.c -=================================================================== ---- net-tools-1.60.orig/lib/inet_sr.c -+++ net-tools-1.60/lib/inet_sr.c -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - #include "version.h" - #include "net-support.h" - #include "pathnames.h" -Index: net-tools-1.60/lib/util-ank.c -=================================================================== ---- net-tools-1.60.orig/lib/util-ank.c -+++ net-tools-1.60/lib/util-ank.c -@@ -14,6 +14,7 @@ - * Rani Assaf 980929: resolve addresses - */ - -+#include - #include - #include - #include -Index: net-tools-1.60/mii-tool.c -=================================================================== ---- net-tools-1.60.orig/mii-tool.c -+++ net-tools-1.60/mii-tool.c -@@ -47,10 +47,6 @@ static char Version[] = "$Id: mii-tool.c - #include - #include - --#ifndef __GLIBC__ --#include --#include --#endif - #include "mii.h" - #include "version.h" - -Index: net-tools-1.60/netstat.c -=================================================================== ---- net-tools-1.60.orig/netstat.c -+++ net-tools-1.60/netstat.c -@@ -87,6 +87,7 @@ - #include - #include - #include -+#include - - #include "net-support.h" - #include "pathnames.h" -Index: net-tools-1.60/slattach.c -=================================================================== ---- net-tools-1.60.orig/slattach.c -+++ net-tools-1.60/slattach.c -@@ -44,6 +44,7 @@ - #include - #include - #include -+#include - #include - - #if defined(__GLIBC__) diff --git a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp1.patch b/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp1.patch deleted file mode 100644 index 78daf6c3e..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp1.patch +++ /dev/null @@ -1,635 +0,0 @@ -From 23276afe270009420cfbc52bffafdd25ac0817fe Mon Sep 17 00:00:00 2001 -From: Li Zhou -Date: Thu, 14 Jan 2016 17:01:29 +0800 -Subject: [PATCH 1/3] net-tools: add SCTP support for netstat - -Upstream-Status: pending - -Signed-off-by: Li Zhou ---- - netstat.c | 411 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++-- - statistics.c | 68 +++++++++- - 2 files changed, 465 insertions(+), 14 deletions(-) - -Index: net-tools-1.60/netstat.c -=================================================================== ---- net-tools-1.60.orig/netstat.c -+++ net-tools-1.60/netstat.c -@@ -58,6 +58,7 @@ - * - *990420 {1.38} Tuan Hoang removed a useless assignment from igmp_do_one() - *20010404 {1.39} Arnaldo Carvalho de Melo - use setlocale -+ *20050516 {1.40} Ivan Skytte Joergensen:Added SCTP support - * - * This program is free software; you can redistribute it - * and/or modify it under the terms of the GNU General -@@ -106,7 +107,7 @@ - #endif - - /* prototypes for statistics.c */ --void parsesnmp(int, int, int); -+void parsesnmp(int, int, int, int); - void inittab(void); - void parsesnmp6(int, int, int); - void inittab6(void); -@@ -119,6 +120,28 @@ typedef enum { - SS_DISCONNECTING /* in process of disconnecting */ - } socket_state; - -+#define SCTP_NSTATES 9 /* The number of states in array*/ -+ -+static const char *sctp_state[] = { -+ N_("EMPTY"), -+ N_("CLOSED"), -+ N_("COOKIE_WAIT"), -+ N_("COOKIE_ECHOED"), -+ N_("ESTABLISHED"), -+ N_("SHUTDOWN_PENDING"), -+ N_("SHUTDOWN_SENT"), -+ N_("SHUTDOWN_RECEIVED"), -+ N_("SHUTDOWN_ACK_SENT") -+}; -+ -+#define SCTP_NTYPES 3 /* The number of types in array */ -+ -+static const char *sctp_type[] = { -+ N_("udp"), -+ N_("udp-high-bw"), -+ N_("tcp") -+}; -+ - #define SO_ACCEPTCON (1<<16) /* performed a listen */ - #define SO_WAITDATA (1<<17) /* wait data to read */ - #define SO_NOSPACE (1<<18) /* no space to write */ -@@ -149,6 +172,7 @@ int flag_opt = 0; - int flag_raw = 0; - int flag_tcp = 0; - int flag_udp = 0; -+int flag_sctp= 0; - int flag_igmp= 0; - int flag_rom = 0; - int flag_exp = 1; -@@ -995,6 +1019,365 @@ static int udp_info(void) - udp_do_one); - } - -+static const char *sctp_socket_type_str(int type) { -+ if(type>=0 && type=0 && state=0 && state<=10) -+ return tcp_state[state]; -+ else { -+ static char state_str_buf[64]; -+ sprintf(state_str_buf,"UNKNOWN(%d)",state); -+ return state_str_buf; -+ } -+} -+ -+static struct aftype *process_sctp_addr_str(const char *addr_str, struct sockaddr *sa) -+{ -+ if (strchr(addr_str,':')) { -+#if HAVE_AFINET6 -+ extern struct aftype inet6_aftype; -+ /* Demangle what the kernel gives us */ -+ struct in6_addr in6; -+ char addr6_str[INET6_ADDRSTRLEN]; -+ unsigned u0,u1,u2,u3,u4,u5,u6,u7; -+ sscanf(addr_str, "%04X:%04X:%04X:%04X:%04X:%04X:%04X:%04X", -+ &u0, &u1, &u2, &u3, &u4, &u5, &u6, &u7); -+ in6.s6_addr16[0] = htons(u0); -+ in6.s6_addr16[1] = htons(u1); -+ in6.s6_addr16[2] = htons(u2); -+ in6.s6_addr16[3] = htons(u3); -+ in6.s6_addr16[4] = htons(u4); -+ in6.s6_addr16[5] = htons(u5); -+ in6.s6_addr16[6] = htons(u6); -+ in6.s6_addr16[7] = htons(u7); -+ -+ inet_ntop(AF_INET6, &in6, addr6_str, sizeof(addr6_str)); -+ inet6_aftype.input(1, addr6_str, sa); -+ sa->sa_family = AF_INET6; -+#endif -+ } else { -+ ((struct sockaddr_in*)sa)->sin_addr.s_addr = inet_addr(addr_str); -+ sa->sa_family = AF_INET; -+ } -+ return get_afntype(sa->sa_family); -+} -+ -+static void sctp_eps_do_one(int lnr, char *line) -+{ -+ char buffer[1024]; -+ int type, state, port; -+ int uid; -+ unsigned long inode; -+ -+ struct aftype *ap; -+#if HAVE_AFINET6 -+ struct sockaddr_in6 localaddr; -+#else -+ struct sockaddr_in localaddr; -+#endif -+ const char *sty_str; -+ const char *sst_str; -+ const char *lport_str; -+ const char *uid_str; -+ const char *inode_str; -+ const char *pladdr_str; -+ char *laddrs_str; -+ -+ if(lnr == 0) { -+ /* ENDPT SOCK STY SST HBKT LPORT uid inode pladdr LADDRS*/ -+ return; -+ } -+ -+ strtok(line," \t\n"); /*skip ptr*/ -+ strtok(0," \t\n"); /*skip ptr*/ -+ sty_str = strtok(0," \t\n"); -+ sst_str = strtok(0," \t\n"); -+ strtok(0," \t\n"); /*skip hash bucket*/ -+ lport_str=strtok(0," \t\n"); -+ uid_str = strtok(0," \t\n"); -+ inode_str = strtok(0," \t\n"); -+ pladdr_str = strtok(0," \t\n"); -+ laddrs_str=strtok(0,"\t\n"); -+ -+ type = atoi(sty_str); -+ state = atoi(sst_str); -+ port = atoi(lport_str); -+ uid = atoi(uid_str); -+ inode = strtoul(inode_str,0,0); -+ -+ if(flag_sctp<=1) { -+ /* only print the primary address */ -+ char local_addr[64]; -+ char local_port[16]; -+ -+ ap = process_sctp_addr_str(pladdr_str, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), -+ sizeof(local_addr)); -+ else -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(port), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ -+ printf("sctp "); -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ printf("%-47s", buffer); -+ printf(" %-12s", sctp_socket_state_str(state)); -+ } else { -+ /*print all addresses*/ -+ const char *this_local_addr; -+ int first=1; -+ char local_port[16]; -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(port), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ for(this_local_addr=strtok(laddrs_str," \t\n"); -+ this_local_addr; -+ this_local_addr=strtok(0," \t\n")) -+ { -+ char local_addr[64]; -+ ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), -+ sizeof(local_addr)); -+ else -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ -+ if(!first) printf("\n"); -+ if(first) -+ printf("sctp "); -+ else -+ printf(" "); -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ printf("%-47s", buffer); -+ printf(" %-12s", first?sctp_socket_state_str(state):""); -+ first = 0; -+ } -+ } -+ -+ finish_this_one(uid,inode,""); -+} -+ -+static void sctp_assoc_do_one(int lnr, char *line) -+{ -+ char buffer[1024]; -+ int type, state, state2, lport,rport; -+ int uid; -+ unsigned rxqueue,txqueue; -+ unsigned long inode; -+ -+ struct aftype *ap; -+#if HAVE_AFINET6 -+ struct sockaddr_in6 localaddr,remoteaddr; -+#else -+ struct sockaddr_in localaddr,remoteaddr; -+#endif -+ const char *sty_str; -+ const char *sst_str; -+ const char *st_str; -+ const char *txqueue_str; -+ const char *rxqueue_str; -+ const char *lport_str,*rport_str; -+ const char *uid_str; -+ const char *inode_str; -+ const char *pladdr_str; -+ char *laddrs_str; -+ const char *praddr_str; -+ char *raddrs_str; -+ -+ if(lnr == 0) { -+ /* ASSOC SOCK STY SST ST HBKT tx_queue rx_queue uid inode LPORT RPORT pladdr praddr LADDRS <-> RADDRS*/ -+ return; -+ } -+ -+ strtok(line," \t\n"); /*skip ptr*/ -+ strtok(0," \t\n"); /*skip ptr*/ -+ sty_str = strtok(0," \t\n"); -+ sst_str = strtok(0," \t\n"); -+ st_str = strtok(0," \t\n"); -+ strtok(0," \t\n"); /*skip hash bucket*/ -+ txqueue_str = strtok(0," \t\n"); -+ rxqueue_str = strtok(0," \t\n"); -+ uid_str = strtok(0," \t\n"); -+ inode_str = strtok(0," \t\n"); -+ lport_str=strtok(0," \t\n"); -+ rport_str=strtok(0," \t\n"); -+ pladdr_str = strtok(0," \t\n"); -+ praddr_str = strtok(0," \t\n"); -+ laddrs_str=strtok(0,"<->\t\n"); -+ raddrs_str=strtok(0,"<->\t\n"); -+ -+ type = atoi(sty_str); -+ state = atoi(sst_str); -+ state2 = atoi(st_str); -+ txqueue = atoi(txqueue_str); -+ rxqueue = atoi(rxqueue_str); -+ uid = atoi(uid_str); -+ inode = strtoul(inode_str,0,0); -+ lport = atoi(lport_str); -+ rport = atoi(rport_str); -+ -+ if(flag_sctp<=1) { -+ /* only print the primary addresses */ -+ char local_addr[64]; -+ char local_port[16]; -+ char remote_addr[64]; -+ char remote_port[16]; -+ -+ ap = process_sctp_addr_str(pladdr_str, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), -+ sizeof(local_addr)); -+ else -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(lport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ -+ ap = process_sctp_addr_str(praddr_str, (struct sockaddr*)&remoteaddr); -+ if(ap) -+ safe_strncpy(remote_addr, -+ ap->sprint((struct sockaddr *) &remoteaddr, flag_not), -+ sizeof(remote_addr)); -+ else -+ sprintf(remote_addr,_("unsupported address family %d"), ((struct sockaddr*)&remoteaddr)->sa_family); -+ -+ snprintf(remote_port, sizeof(remote_port), "%s", -+ get_sname(htons(rport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ -+ printf("sctp"); -+ printf(" %6u %6u ", rxqueue, txqueue); -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ printf("%-23s", buffer); -+ printf(" "); -+ sprintf(buffer,"%s:%s", remote_addr, remote_port); -+ printf("%-23s", buffer); -+ printf(" %-12s", sctp_socket_state_str(state)); -+ } else { -+ /*print all addresses*/ -+ const char *this_local_addr; -+ const char *this_remote_addr; -+ char *ss1,*ss2; -+ int first=1; -+ char local_port[16]; -+ char remote_port[16]; -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(lport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ snprintf(remote_port, sizeof(remote_port), "%s", -+ get_sname(htons(rport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ -+ this_local_addr=strtok_r(laddrs_str," \t\n",&ss1); -+ this_remote_addr=strtok_r(raddrs_str," \t\n",&ss2); -+ while(this_local_addr || this_remote_addr) { -+ char local_addr[64]; -+ char remote_addr[64]; -+ if(this_local_addr) { -+ ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), -+ sizeof(local_addr)); -+ else -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ } -+ if(this_remote_addr) { -+ ap = process_sctp_addr_str(this_remote_addr, (struct sockaddr*)&remoteaddr); -+ if(ap) -+ safe_strncpy(remote_addr, -+ ap->sprint((struct sockaddr *) &remoteaddr, flag_not), -+ sizeof(remote_addr)); -+ else -+ sprintf(remote_addr,_("unsupported address family %d"), ((struct sockaddr*)&remoteaddr)->sa_family); -+ } -+ -+ if(!first) printf("\n"); -+ if(first) -+ printf("sctp %6u %6u ", rxqueue, txqueue); -+ else -+ printf(" "); -+ if(this_local_addr) { -+ if(first) -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ else -+ sprintf(buffer,"%s", local_addr); -+ printf("%-23s", buffer); -+ } else -+ printf("%-23s", ""); -+ printf(" "); -+ if(this_remote_addr) { -+ if(first) -+ sprintf(buffer,"%s:%s", remote_addr, remote_port); -+ else -+ sprintf(buffer,"%s", remote_addr); -+ printf("%-23s", buffer); -+ } else -+ printf("%-23s", ""); -+ -+ printf(" %-12s", first?sctp_socket_state_str(state):""); -+ -+ first = 0; -+ this_local_addr=strtok_r(0," \t\n",&ss1); -+ this_remote_addr=strtok_r(0," \t\n",&ss2); -+ } -+ } -+ -+ finish_this_one(uid,inode,""); -+} -+ -+static int sctp_info_eps(void) -+{ -+#if !defined(_PATH_PROCNET_SCTP_EPS) -+#define _PATH_PROCNET_SCTP_EPS "/proc/net/sctp/eps" -+#endif -+ INFO_GUTS(_PATH_PROCNET_SCTP_EPS, "AF INET (sctp)", -+ sctp_eps_do_one); -+} -+ -+static int sctp_info_assocs(void) -+{ -+#if !defined(_PATH_PROCNET_SCTP_ASSOCS) -+#define _PATH_PROCNET_SCTP_ASSOCS "/proc/net/sctp/assocs" -+#endif -+ INFO_GUTS(_PATH_PROCNET_SCTP_ASSOCS, "AF INET (sctp)", -+ sctp_assoc_do_one); -+} -+ -+static int sctp_info(void) -+{ -+ if(flag_all) -+ sctp_info_eps(); -+ return sctp_info_assocs(); -+} -+ - static void raw_do_one(int lnr, const char *line) - { - char buffer[8192], local_addr[64], rem_addr[64]; -@@ -1558,7 +1941,7 @@ static void usage(void) - fprintf(stderr, _(" -F, --fib display Forwarding Information Base (default)\n")); - fprintf(stderr, _(" -C, --cache display routing cache instead of FIB\n\n")); - -- fprintf(stderr, _(" ={-t|--tcp} {-u|--udp} {-w|--raw} {-x|--unix} --ax25 --ipx --netrom\n")); -+ fprintf(stderr, _(" ={-t|--tcp} {-u|--udp} {-S|--sctp} {-w|--raw} {-x|--unix} --ax25 --ipx --netrom\n")); - fprintf(stderr, _(" =Use '-6|-4' or '-A ' or '--'; default: %s\n"), DFLT_AF); - fprintf(stderr, _(" List of possible address families (which support routing):\n")); - print_aflist(1); /* 1 = routeable */ -@@ -1583,6 +1966,7 @@ int main - {"protocol", 1, 0, 'A'}, - {"tcp", 0, 0, 't'}, - {"udp", 0, 0, 'u'}, -+ {"sctp", 0, 0, 'S' }, - {"raw", 0, 0, 'w'}, - {"unix", 0, 0, 'x'}, - {"listening", 0, 0, 'l'}, -@@ -1613,7 +1997,7 @@ int main - getroute_init(); /* Set up AF routing support */ - - afname[0] = '\0'; -- while ((i = getopt_long(argc, argv, "MCFA:acdegphinNorstuWVv?wxl64", longopts, &lop)) != EOF) -+ while ((i = getopt_long(argc, argv, "MCFA:acdegphinNorstuSWVv?wxl64", longopts, &lop)) != EOF) - switch (i) { - case -1: - break; -@@ -1705,10 +2089,12 @@ int main - case 't': - flag_tcp++; - break; -- - case 'u': - flag_udp++; - break; -+ case 'S': -+ flag_sctp++; -+ break; - case 'w': - flag_raw++; - break; -@@ -1726,13 +2112,13 @@ int main - if (flag_int + flag_rou + flag_mas + flag_sta > 1) - usage(); - -- if ((flag_inet || flag_inet6 || flag_sta) && !(flag_tcp || flag_udp || flag_raw)) -- flag_tcp = flag_udp = flag_raw = 1; -+ if ((flag_inet || flag_inet6 || flag_sta) && !(flag_tcp || flag_udp || flag_sctp || flag_raw)) -+ flag_tcp = flag_udp = flag_sctp = flag_raw = 1; - -- if ((flag_tcp || flag_udp || flag_raw || flag_igmp) && !(flag_inet || flag_inet6)) -+ if ((flag_tcp || flag_udp || flag_sctp || flag_raw || flag_igmp) && !(flag_inet || flag_inet6)) - flag_inet = flag_inet6 = 1; - -- flag_arg = flag_tcp + flag_udp + flag_raw + flag_unx + flag_ipx -+ flag_arg = flag_tcp + flag_udp + flag_sctp + flag_raw + flag_unx + flag_ipx - + flag_ax25 + flag_netrom + flag_igmp + flag_x25; - - if (flag_mas) { -@@ -1760,7 +2146,7 @@ int main - char buf[256]; - if (!afname[0]) { - inittab(); -- parsesnmp(flag_raw, flag_tcp, flag_udp); -+ parsesnmp(flag_raw, flag_tcp, flag_udp, flag_sctp); - } else { - safe_strncpy(buf, afname, sizeof(buf)); - tmp1 = buf; -@@ -1815,7 +2201,7 @@ int main - return (i); - } - for (;;) { -- if (!flag_arg || flag_tcp || flag_udp || flag_raw) { -+ if (!flag_arg || flag_tcp || flag_udp || flag_sctp || flag_raw) { - #if HAVE_AFINET - prg_cache_load(); - printf(_("Active Internet connections ")); /* xxx */ -@@ -1854,6 +2240,11 @@ int main - if (i) - return (i); - } -+ if (!flag_arg || flag_sctp) { -+ i = sctp_info(); -+ if (i) -+ return (i); -+ } - if (!flag_arg || flag_raw) { - i = raw_info(); - if (i) -Index: net-tools-1.60/statistics.c -=================================================================== ---- net-tools-1.60.orig/statistics.c -+++ net-tools-1.60/statistics.c -@@ -21,7 +21,7 @@ - #define UFWARN(x) - #endif - --int print_static,f_raw,f_tcp,f_udp,f_unknown = 1; -+int print_static,f_raw,f_tcp,f_udp,f_sctp,f_unknown = 1; - - enum State { - number = 0, opt_number, i_forward, i_inp_icmp, i_outp_icmp, i_rto_alg, -@@ -297,6 +297,27 @@ struct entry Tcpexttab[] = - { "TCPRenoRecoveryFail", N_("%llu classic Reno fast retransmits failed"), opt_number }, - }; - -+struct entry Sctptab[] = -+{ -+ {"SctpCurrEstab", N_("%u Current Associations"), number}, -+ {"SctpActiveEstabs", N_("%u Active Associations"), number}, -+ {"SctpPassiveEstabs", N_("%u Passive Associations"), number}, -+ {"SctpAborteds", N_("%u Number of Aborteds "), number}, -+ {"SctpShutdowns", N_("%u Number of Graceful Terminations"), number}, -+ {"SctpOutOfBlues", N_("%u Number of Out of Blue packets"), number}, -+ {"SctpChecksumErrors", N_("%u Number of Packets with invalid Checksum"), number}, -+ {"SctpOutCtrlChunks", N_("%u Number of control chunks sent"), number}, -+ {"SctpOutOrderChunks", N_("%u Number of ordered chunks sent"), number}, -+ {"SctpOutUnorderChunks", N_("%u Number of Unordered chunks sent"), number}, -+ {"SctpInCtrlChunks", N_("%u Number of control chunks received"), number}, -+ {"SctpInOrderChunks", N_("%u Number of ordered chunks received"), number}, -+ {"SctpInUnorderChunks", N_("%u Number of Unordered chunks received"), number}, -+ {"SctpFragUsrMsgs", N_("%u Number of messages fragmented"), number}, -+ {"SctpReasmUsrMsgs", N_("%u Number of messages reassembled "), number}, -+ {"SctpOutSCTPPacks", N_("%u Number of SCTP packets sent"), number}, -+ {"SctpInSCTPPacks", N_("%u Number of SCTP packets received"), number}, -+}; -+ - struct tabtab { - char *title; - struct entry *tab; -@@ -310,6 +331,7 @@ struct tabtab snmptabs[] = - {"Icmp", Icmptab, sizeof(Icmptab), &f_raw}, - {"Tcp", Tcptab, sizeof(Tcptab), &f_tcp}, - {"Udp", Udptab, sizeof(Udptab), &f_udp}, -+ {"Sctp", Sctptab, sizeof(Sctptab), &f_sctp}, - {"TcpExt", Tcpexttab, sizeof(Tcpexttab), &f_tcp}, - {NULL} - }; -@@ -499,12 +521,40 @@ void process6_fd(FILE *f) - - } - --void parsesnmp(int flag_raw, int flag_tcp, int flag_udp) -+/* Process a file with name-value lines (like /proc/net/sctp/snmp) */ -+void process_fd2(FILE *f, const char *filename) -+{ -+ char buf1[1024]; -+ char *sp; -+ struct tabtab *tab; -+ -+ tab = newtable(snmptabs, "Sctp"); -+ -+ while (fgets(buf1, sizeof buf1, f)) { -+ sp = buf1 + strcspn(buf1, " \t\n"); -+ if (!sp) -+ goto formaterr; -+ *sp = '\0'; -+ sp++; -+ -+ sp += strspn(sp, " \t\n"); -+ -+ if (*sp != '\0' && *(tab->flag)) -+ printval(tab, buf1, strtoul(sp, 0, 10)); -+ } -+ return; -+ -+formaterr: -+ fprintf(stderr,_("error parsing %s\n"), filename); -+ return; -+} -+ -+void parsesnmp(int flag_raw, int flag_tcp, int flag_udp, int flag_sctp) - { - FILE *f; - -- f_raw = flag_raw; f_tcp = flag_tcp; f_udp = flag_udp; -- -+ f_raw = flag_raw; f_tcp = flag_tcp; f_udp = flag_udp; f_sctp = flag_sctp; -+ - f = proc_fopen("/proc/net/snmp"); - if (!f) { - perror(_("cannot open /proc/net/snmp")); -@@ -530,6 +580,16 @@ void parsesnmp(int flag_raw, int flag_tc - - fclose(f); - } -+ -+ f = fopen("/proc/net/sctp/snmp", "r"); -+ if (f) { -+ process_fd2(f,"/proc/net/sctp/snmp"); -+ if (ferror(f)) -+ perror("/proc/net/sctp/snmp"); -+ -+ fclose(f); -+ } -+ - return; - } - diff --git a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp2-quiet.patch b/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp2-quiet.patch deleted file mode 100644 index d34e65132..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp2-quiet.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 14287b594e1f02b811f889fb515c1a51b72c08d4 Mon Sep 17 00:00:00 2001 -From: Li Zhou -Date: Thu, 14 Jan 2016 17:07:48 +0800 -Subject: [PATCH 2/3] net-tools: add SCTP support for netstat - -Upstream-Status: pending - -Signed-off-by: Li Zhou ---- - netstat.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/netstat.c b/netstat.c -index 5d1a4a1..56a15c2 100644 ---- a/netstat.c -+++ b/netstat.c -@@ -2104,7 +2104,7 @@ int main - usage(); - - if ((flag_inet || flag_inet6 || flag_sta) && !(flag_tcp || flag_udp || flag_sctp || flag_raw)) -- flag_tcp = flag_udp = flag_sctp = flag_raw = 1; -+ flag_tcp = flag_udp = flag_raw = 1; - - if ((flag_tcp || flag_udp || flag_sctp || flag_raw || flag_igmp) && !(flag_inet || flag_inet6)) - flag_inet = flag_inet6 = 1; --- -1.8.5.2.233.g932f7e4 - diff --git a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp3-addrs.patch b/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp3-addrs.patch deleted file mode 100644 index 8b2ecab70..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-1.60-sctp3-addrs.patch +++ /dev/null @@ -1,363 +0,0 @@ -From 1d386279a449a1a6b96b88a71f35bf13b14b2c2c Mon Sep 17 00:00:00 2001 -From: Li Zhou -Date: Thu, 14 Jan 2016 17:11:24 +0800 -Subject: [PATCH 3/3] net-tools: add SCTP support for netstat - -Upstream-Status: pending - -Signed-off-by: Li Zhou ---- - netstat.c | 282 ++++++++++++++++++++++++-------------------------------------- - 1 file changed, 108 insertions(+), 174 deletions(-) - -diff --git a/netstat.c b/netstat.c -index 56a15c2..86adadb 100644 ---- a/netstat.c -+++ b/netstat.c -@@ -1095,23 +1095,21 @@ static void sctp_eps_do_one(int lnr, char *line) - const char *lport_str; - const char *uid_str; - const char *inode_str; -- const char *pladdr_str; - char *laddrs_str; - - if(lnr == 0) { -- /* ENDPT SOCK STY SST HBKT LPORT uid inode pladdr LADDRS*/ -+ /* ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS */ - return; - } - -- strtok(line," \t\n"); /*skip ptr*/ -- strtok(0," \t\n"); /*skip ptr*/ -+ strtok(line," \t\n"); /*skip endpt*/ -+ strtok(0," \t\n"); /*skip sock*/ - sty_str = strtok(0," \t\n"); - sst_str = strtok(0," \t\n"); - strtok(0," \t\n"); /*skip hash bucket*/ - lport_str=strtok(0," \t\n"); - uid_str = strtok(0," \t\n"); - inode_str = strtok(0," \t\n"); -- pladdr_str = strtok(0," \t\n"); - laddrs_str=strtok(0,"\t\n"); - - type = atoi(sty_str); -@@ -1119,61 +1117,35 @@ static void sctp_eps_do_one(int lnr, char *line) - port = atoi(lport_str); - uid = atoi(uid_str); - inode = strtoul(inode_str,0,0); -- -- if(flag_sctp<=1) { -- /* only print the primary address */ -- char local_addr[64]; -- char local_port[16]; -- -- ap = process_sctp_addr_str(pladdr_str, (struct sockaddr*)&localaddr); -- if(ap) -- safe_strncpy(local_addr, -- ap->sprint((struct sockaddr *) &localaddr, flag_not), -- sizeof(local_addr)); -- else -- sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -- -- snprintf(local_port, sizeof(local_port), "%s", -- get_sname(htons(port), "sctp", -- flag_not & FLAG_NUM_PORT)); -- -- printf("sctp "); -- sprintf(buffer,"%s:%s", local_addr, local_port); -- printf("%-47s", buffer); -- printf(" %-12s", sctp_socket_state_str(state)); -- } else { -- /*print all addresses*/ -- const char *this_local_addr; -- int first=1; -- char local_port[16]; -- snprintf(local_port, sizeof(local_port), "%s", -- get_sname(htons(port), "sctp", -- flag_not & FLAG_NUM_PORT)); -- for(this_local_addr=strtok(laddrs_str," \t\n"); -- this_local_addr; -- this_local_addr=strtok(0," \t\n")) -- { -- char local_addr[64]; -- ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -- if(ap) -- safe_strncpy(local_addr, -- ap->sprint((struct sockaddr *) &localaddr, flag_not), -- sizeof(local_addr)); -- else -- sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); - -- if(!first) printf("\n"); -- if(first) -- printf("sctp "); -- else -- printf(" "); -- sprintf(buffer,"%s:%s", local_addr, local_port); -- printf("%-47s", buffer); -- printf(" %-12s", first?sctp_socket_state_str(state):""); -- first = 0; -- } -+ const char *this_local_addr; -+ int first=1; -+ char local_port[16]; -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(port), "sctp", flag_not & FLAG_NUM_PORT)); -+ for(this_local_addr=strtok(laddrs_str," \t\n"); -+ this_local_addr; -+ this_local_addr=strtok(0," \t\n")) -+ { -+ char local_addr[64]; -+ ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), -+ sizeof(local_addr)); -+ else -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ -+ if(!first) printf("\n"); -+ if(first) -+ printf("sctp "); -+ else -+ printf(" "); -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ printf("%-55s", buffer); -+ printf(" %-12s", first?sctp_socket_state_str(state):""); -+ first = 0; - } -- - finish_this_one(uid,inode,""); - } - -@@ -1199,32 +1171,29 @@ static void sctp_assoc_do_one(int lnr, char *line) - const char *lport_str,*rport_str; - const char *uid_str; - const char *inode_str; -- const char *pladdr_str; - char *laddrs_str; -- const char *praddr_str; - char *raddrs_str; -- -+ - if(lnr == 0) { -- /* ASSOC SOCK STY SST ST HBKT tx_queue rx_queue uid inode LPORT RPORT pladdr praddr LADDRS <-> RADDRS*/ -+ /* ASSOC SOCK STY SST ST HBKT ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT RPORT LADDRS <-> RADDRS */ - return; - } -- -- strtok(line," \t\n"); /*skip ptr*/ -- strtok(0," \t\n"); /*skip ptr*/ -+ -+ strtok(line," \t\n"); /*skip assoc*/ -+ strtok(0," \t\n"); /*skip sock*/ - sty_str = strtok(0," \t\n"); - sst_str = strtok(0," \t\n"); - st_str = strtok(0," \t\n"); - strtok(0," \t\n"); /*skip hash bucket*/ -+ strtok(0," \t\n"); /*skip hash assoc-id*/ - txqueue_str = strtok(0," \t\n"); - rxqueue_str = strtok(0," \t\n"); - uid_str = strtok(0," \t\n"); - inode_str = strtok(0," \t\n"); - lport_str=strtok(0," \t\n"); - rport_str=strtok(0," \t\n"); -- pladdr_str = strtok(0," \t\n"); -- praddr_str = strtok(0," \t\n"); -- laddrs_str=strtok(0,"<->\t\n"); -- raddrs_str=strtok(0,"<->\t\n"); -+ laddrs_str = strtok(0,"<->\t\n"); -+ raddrs_str = strtok(0,"<->\t\n"); - - type = atoi(sty_str); - state = atoi(sst_str); -@@ -1235,116 +1204,81 @@ static void sctp_assoc_do_one(int lnr, char *line) - inode = strtoul(inode_str,0,0); - lport = atoi(lport_str); - rport = atoi(rport_str); -- -- if(flag_sctp<=1) { -- /* only print the primary addresses */ -- char local_addr[64]; -- char local_port[16]; -- char remote_addr[64]; -- char remote_port[16]; -- -- ap = process_sctp_addr_str(pladdr_str, (struct sockaddr*)&localaddr); -- if(ap) -- safe_strncpy(local_addr, -- ap->sprint((struct sockaddr *) &localaddr, flag_not), -- sizeof(local_addr)); -- else -- sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -- -- snprintf(local_port, sizeof(local_port), "%s", -- get_sname(htons(lport), "sctp", -- flag_not & FLAG_NUM_PORT)); -- -- ap = process_sctp_addr_str(praddr_str, (struct sockaddr*)&remoteaddr); -- if(ap) -- safe_strncpy(remote_addr, -- ap->sprint((struct sockaddr *) &remoteaddr, flag_not), -- sizeof(remote_addr)); -- else -- sprintf(remote_addr,_("unsupported address family %d"), ((struct sockaddr*)&remoteaddr)->sa_family); -- -- snprintf(remote_port, sizeof(remote_port), "%s", -- get_sname(htons(rport), "sctp", -- flag_not & FLAG_NUM_PORT)); -- -- printf("sctp"); -- printf(" %6u %6u ", rxqueue, txqueue); -- sprintf(buffer,"%s:%s", local_addr, local_port); -- printf("%-23s", buffer); -- printf(" "); -- sprintf(buffer,"%s:%s", remote_addr, remote_port); -- printf("%-23s", buffer); -- printf(" %-12s", sctp_socket_state_str(state)); -- } else { -- /*print all addresses*/ -- const char *this_local_addr; -- const char *this_remote_addr; -- char *ss1,*ss2; -- int first=1; -- char local_port[16]; -- char remote_port[16]; -- snprintf(local_port, sizeof(local_port), "%s", -- get_sname(htons(lport), "sctp", -- flag_not & FLAG_NUM_PORT)); -- snprintf(remote_port, sizeof(remote_port), "%s", -- get_sname(htons(rport), "sctp", -- flag_not & FLAG_NUM_PORT)); -- -- this_local_addr=strtok_r(laddrs_str," \t\n",&ss1); -- this_remote_addr=strtok_r(raddrs_str," \t\n",&ss2); -- while(this_local_addr || this_remote_addr) { -- char local_addr[64]; -- char remote_addr[64]; -- if(this_local_addr) { -- ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -- if(ap) -- safe_strncpy(local_addr, -- ap->sprint((struct sockaddr *) &localaddr, flag_not), -- sizeof(local_addr)); -- else -- sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -- } -- if(this_remote_addr) { -- ap = process_sctp_addr_str(this_remote_addr, (struct sockaddr*)&remoteaddr); -- if(ap) -- safe_strncpy(remote_addr, -- ap->sprint((struct sockaddr *) &remoteaddr, flag_not), -- sizeof(remote_addr)); -- else -- sprintf(remote_addr,_("unsupported address family %d"), ((struct sockaddr*)&remoteaddr)->sa_family); -- } - -- if(!first) printf("\n"); -- if(first) -- printf("sctp %6u %6u ", rxqueue, txqueue); -- else -- printf(" "); -- if(this_local_addr) { -- if(first) -- sprintf(buffer,"%s:%s", local_addr, local_port); -+ /*print all addresses*/ -+ const char *this_local_addr; -+ const char *this_remote_addr; -+ char *ss1,*ss2; -+ int first=1; -+ char local_port[16]; -+ char remote_port[16]; -+ snprintf(local_port, sizeof(local_port), "%s", -+ get_sname(htons(lport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ snprintf(remote_port, sizeof(remote_port), "%s", -+ get_sname(htons(rport), "sctp", -+ flag_not & FLAG_NUM_PORT)); -+ -+ this_local_addr=strtok_r(laddrs_str," \t\n",&ss1); -+ this_remote_addr=strtok_r(raddrs_str," \t\n",&ss2); -+ while(this_local_addr || this_remote_addr) { -+ char local_addr[64]; -+ char remote_addr[64]; -+ -+ if(this_local_addr) { -+ if (this_local_addr[0] == '*') { -+ /* skip * */ -+ this_local_addr++; -+ } -+ ap = process_sctp_addr_str(this_local_addr, (struct sockaddr*)&localaddr); -+ if(ap) -+ safe_strncpy(local_addr, -+ ap->sprint((struct sockaddr *) &localaddr, flag_not), sizeof(local_addr)); - else -- sprintf(buffer,"%s", local_addr); -- printf("%-23s", buffer); -- } else -- printf("%-23s", ""); -- printf(" "); -- if(this_remote_addr) { -- if(first) -- sprintf(buffer,"%s:%s", remote_addr, remote_port); -+ sprintf(local_addr,_("unsupported address family %d"), ((struct sockaddr*)&localaddr)->sa_family); -+ } -+ if(this_remote_addr) { -+ if (this_remote_addr[0] == '*') { -+ /* skip * */ -+ this_remote_addr++; -+ } -+ ap = process_sctp_addr_str(this_remote_addr, (struct sockaddr*)&remoteaddr); -+ if(ap) -+ safe_strncpy(remote_addr, -+ ap->sprint((struct sockaddr *) &remoteaddr, flag_not), sizeof(remote_addr)); - else -- sprintf(buffer,"%s", remote_addr); -- printf("%-23s", buffer); -- } else -- printf("%-23s", ""); -- -- printf(" %-12s", first?sctp_socket_state_str(state):""); -+ sprintf(remote_addr,_("unsupported address family %d"), ((struct sockaddr*)&remoteaddr)->sa_family); -+ } - -- first = 0; -- this_local_addr=strtok_r(0," \t\n",&ss1); -- this_remote_addr=strtok_r(0," \t\n",&ss2); -- } -+ if(!first) printf("\n"); -+ if(first) -+ printf("sctp %6u %6u ", rxqueue, txqueue); -+ else -+ printf(" "); -+ if(this_local_addr) { -+ if(first) -+ sprintf(buffer,"%s:%s", local_addr, local_port); -+ else -+ sprintf(buffer,"%s", local_addr); -+ printf("%-27s", buffer); -+ } else -+ printf("%-27s", ""); -+ printf(" "); -+ if(this_remote_addr) { -+ if(first) -+ sprintf(buffer,"%s:%s", remote_addr, remote_port); -+ else -+ sprintf(buffer,"%s", remote_addr); -+ printf("%-27s", buffer); -+ } else -+ printf("%-27s", ""); -+ -+ printf(" %-12s", first?sctp_socket_state_str(state):""); -+ -+ first = 0; -+ this_local_addr=strtok_r(0," \t\n",&ss1); -+ this_remote_addr=strtok_r(0," \t\n",&ss2); - } -- - finish_this_one(uid,inode,""); - } - --- -1.8.5.2.233.g932f7e4 - diff --git a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-fix-building-with-linux-4.8.patch b/poky/meta/recipes-extended/net-tools/net-tools/net-tools-fix-building-with-linux-4.8.patch deleted file mode 100644 index 505eeb048..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools/net-tools-fix-building-with-linux-4.8.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 4d56645ea144a34f7cdd3e3ede6452d81fbae251 Mon Sep 17 00:00:00 2001 -From: Randy MacLeod -Date: Sat, 8 Oct 2016 14:42:54 +0800 -Subject: [PATCH] iptunnel.c: include linux/ip.h to fix building with linux-4.8 - -Fix a build error when using the linux-4.8 headers that results in: - -In file included from -.../sysroots/qemuarm64/usr/include/linux/if_tunnel.h:6:0, - from iptunnel.c:39: -.../qemuarm64/usr/include/linux/ip.h:85:8: error: redefinition of -'struct iphdr' - struct iphdr { - ^~~~~ -In file included from iptunnel.c:29:0: -.../qemuarm64/usr/include/netinet/ip.h:44:8: note: originally defined here - struct iphdr - ^~~~~ - -Upstream-Status: Submitted [1] - -[1] https://sourceforge.net/p/net-tools/mailman/message/35413022/ - -Signed-off-by: Randy MacLeod -Signed-off-by: Jackie Huang ---- - iptunnel.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/iptunnel.c b/iptunnel.c -index 4943d83..acfcbc7 100644 ---- a/iptunnel.c -+++ b/iptunnel.c -@@ -26,7 +26,6 @@ - #include - #include - #include --#include - #include - #if defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 1)) - #include -@@ -36,6 +35,7 @@ - #include - #endif - #include -+#include - #include - - #include "config.h" --- -2.8.3 - diff --git a/poky/meta/recipes-extended/net-tools/net-tools_1.60-20181103.bb b/poky/meta/recipes-extended/net-tools/net-tools_1.60-20181103.bb new file mode 100644 index 000000000..e97731073 --- /dev/null +++ b/poky/meta/recipes-extended/net-tools/net-tools_1.60-20181103.bb @@ -0,0 +1,110 @@ +SUMMARY = "Basic networking tools" +DESCRIPTION = "A collection of programs that form the base set of the NET-3 networking distribution for the Linux operating system" +HOMEPAGE = "http://net-tools.berlios.de/" +BUGTRACKER = "http://bugs.debian.org/net-tools" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://ifconfig.c;beginline=11;endline=15;md5=d1ca372080ad5401e23ca0afc35cf9ba" + +PV = "1.60-20181103+git${SRCPV}" +SRCREV = "0eebece8c964e3cfa8a018f42b2e7e751a7009a0" +SRC_URI = "git://git.code.sf.net/p/net-tools/code;protocol=https \ + file://net-tools-config.h \ + file://net-tools-config.make \ + file://Add_missing_headers.patch \ + file://Bug_443075-ifconfig.c-pointtopoint_spelling.patch \ + file://Bug_541172-netstat.c-exit-codes.patch \ +" + +UPSTREAM_VERSION_UNKNOWN = "1" + +S = "${WORKDIR}/git" + +inherit gettext + +# The Makefile is lame, no parallel build +PARALLEL_MAKE = "" + +PACKAGECONFIG ??= "hostname arp serial plip" +PACKAGECONFIG[hostname] = "" +PACKAGECONFIG[arp] = "" +PACKAGECONFIG[serial] = "" +PACKAGECONFIG[plip] = "" + +do_configure() { + # net-tools has its own config mechanism requiring "make config" + # we pre-generate desired options and copy to source directory instead + cp ${WORKDIR}/net-tools-config.h ${S}/config.h + cp ${WORKDIR}/net-tools-config.make ${S}/config.make + + if [ "${USE_NLS}" = "no" ]; then + sed -i -e 's/^I18N=1/# I18N=1/' ${S}/config.make + fi + + if ${@bb.utils.contains('PACKAGECONFIG', 'hostname', 'true', 'false', d)} ; then + echo "#define HAVE_HOSTNAME_TOOLS 1" >> ${S}/config.h + echo "#define HAVE_HOSTNAME_SYMLINKS 1" >> ${S}/config.h + echo "HAVE_HOSTNAME_TOOLS=1" >> ${S}/config.make + echo "HAVE_HOSTNAME_SYMLINKS=1" >> ${S}/config.make + fi + if ${@bb.utils.contains('PACKAGECONFIG', 'arp', 'true', 'false', d)} ; then + echo "#define HAVE_ARP_TOOLS 1" >> ${S}/config.h + echo "HAVE_ARP_TOOLS=1" >> ${S}/config.make + fi + if ${@bb.utils.contains('PACKAGECONFIG', 'serial', 'true', 'false', d)} ; then + echo "#define HAVE_SERIAL_TOOLS 1" >> ${S}/config.h + echo "HAVE_SERIAL_TOOLS=1" >> ${S}/config.make + fi + if ${@bb.utils.contains('PACKAGECONFIG', 'plip', 'true', 'false', d)} ; then + echo "#define HAVE_PLIP_TOOLS 1" >> ${S}/config.h + echo "HAVE_PLIP_TOOLS=1" >> ${S}/config.make + fi +} + +do_compile() { + # net-tools use COPTS/LOPTS to allow adding custom options + oe_runmake COPTS="$CFLAGS" LOPTS="$LDFLAGS" +} + +do_install() { + # We don't need COPTS or LOPTS, but let's be consistent. + oe_runmake COPTS="$CFLAGS" LOPTS="$LDFLAGS" BASEDIR=${D} INSTALLNLSDIR=${D}${datadir}/locale mandir=${mandir} install + + if [ "${base_bindir}" != "/bin" ]; then + mkdir -p ${D}/${base_bindir} + mv ${D}/bin/* ${D}/${base_bindir}/ + rmdir ${D}/bin + fi + if [ "${base_sbindir}" != "/sbin" ]; then + mkdir ${D}/${base_sbindir} + mv ${D}/sbin/* ${D}/${base_sbindir}/ + rmdir ${D}/sbin + fi +} + +inherit update-alternatives + +base_sbindir_progs = "ipmaddr iptunnel mii-tool nameif \ + ${@bb.utils.contains('PACKAGECONFIG', 'arp', 'arp rarp', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'plip', 'plipconfig', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'serial', 'slattach', '', d)} \ +" +base_bindir_progs = "ifconfig netstat route \ + ${@bb.utils.contains('PACKAGECONFIG', 'hostname', 'dnsdomainname domainname hostname nisdomainname ypdomainname', '', d)} \ +" + +ALTERNATIVE_${PN} = "${base_sbindir_progs} ${base_bindir_progs}" +ALTERNATIVE_${PN}-doc += "${@bb.utils.contains('PACKAGECONFIG', 'hostname', 'hostname.1 dnsdomainname.1', '', d)}" +ALTERNATIVE_LINK_NAME[hostname.1] = "${mandir}/man1/hostname.1" +ALTERNATIVE_LINK_NAME[dnsdomainname.1] = "${mandir}/man1/dnsdomainname.1" +ALTERNATIVE_PRIORITY[hostname.1] = "10" + +python __anonymous() { + for prog in d.getVar('base_sbindir_progs').split(): + d.setVarFlag('ALTERNATIVE_LINK_NAME', prog, '%s/%s' % (d.getVar('base_sbindir'), prog)) + for prog in d.getVar('base_bindir_progs').split(): + d.setVarFlag('ALTERNATIVE_LINK_NAME', prog, '%s/%s' % (d.getVar('base_bindir'), prog)) +} +ALTERNATIVE_PRIORITY = "100" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/net-tools/net-tools_1.60-26.bb b/poky/meta/recipes-extended/net-tools/net-tools_1.60-26.bb deleted file mode 100644 index 8bd30fc6d..000000000 --- a/poky/meta/recipes-extended/net-tools/net-tools_1.60-26.bb +++ /dev/null @@ -1,132 +0,0 @@ -SUMMARY = "Basic networking tools" -DESCRIPTION = "A collection of programs that form the base set of the NET-3 networking distribution for the Linux operating system" -HOMEPAGE = "http://net-tools.berlios.de/" -BUGTRACKER = "http://bugs.debian.org/net-tools" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=8ca43cbc842c2336e835926c2166c28b \ - file://ifconfig.c;beginline=11;endline=15;md5=d1ca372080ad5401e23ca0afc35cf9ba" - -SRC_URI = "http://snapshot.debian.org/archive/debian/20050312T000000Z/pool/main/n/${BPN}/${BPN}_1.60.orig.tar.gz;name=tarball \ - http://snapshot.debian.org/archive/debian//20150831T093342Z/pool/main/n/${BPN}/${BPN}_${PV}.diff.gz;apply=no;name=patch \ - file://net-tools-config.h \ - file://net-tools-config.make \ - file://ifconfig-interface-0-del-IP-will-remove-the-aliased-.patch \ - file://musl-fixes.patch \ - file://net-tools-1.60-sctp1.patch \ - file://net-tools-1.60-sctp2-quiet.patch \ - file://net-tools-1.60-sctp3-addrs.patch \ - file://0001-lib-inet6.c-INET6_rresolve-various-fixes.patch \ - file://net-tools-fix-building-with-linux-4.8.patch \ - file://0001-added-ull-prefix-to-unsigned-long-long-constants-to-.patch \ - " - -# for this package we're mostly interested in tracking debian patches, -# and not in the upstream version where all development has effectively stopped -UPSTREAM_CHECK_REGEX = "(?P((\d+\.*)+)-((\d+\.*)+))\.(diff|debian\.tar)\.(gz|xz)" - -S = "${WORKDIR}/net-tools-1.60" - -SRC_URI[tarball.md5sum] = "ecaf37acb5b5daff4bdda77785fd916d" -SRC_URI[tarball.sha256sum] = "ec67967cf7b1a3a3828a84762fbc013ac50ee5dc9aa3095d5c591f302c2de0f5" - -SRC_URI[patch.md5sum] = "ea3592f49ac8380962bc4d9b66c7e7e9" -SRC_URI[patch.sha256sum] = "aeeeafaff68866a446f01bb639d4e0146a60af34dcd20e31a3e46585022fc76c" - -# the package is taken from snapshots.debian.org; that source is static and goes stale -# so we check the latest upstream from a directory that does get updated -UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/n/net-tools/" - -inherit gettext - -do_patch[depends] += "quilt-native:do_populate_sysroot" - -# The Makefile is lame, no parallel build -PARALLEL_MAKE = "" - -# Unlike other Debian packages, net-tools *.diff.gz contains another series of -# patches maintained by quilt. So manually apply them before applying other local -# patches. Also remove all temp files before leaving, because do_patch() will pop -# up all previously applied patches in the start -nettools_do_patch() { - cd ${S} - # it's important that we only pop the existing patches when they've - # been applied, otherwise quilt will climb the directory tree - # and reverse out some completely different set of patches - if [ -d ${S}/patches ]; then - # whilst this is the default directory, doing it like this - # defeats the directory climbing that quilt will otherwise - # do; note the directory must exist to defeat this, hence - # the test inside which we operate - QUILT_PATCHES=${S}/patches quilt pop -a - fi - if [ -d ${S}/.pc-nettools ]; then - rm -rf ${S}/.pc - mv ${S}/.pc-nettools ${S}/.pc - QUILT_PATCHES=${S}/debian/patches quilt pop -a - rm -rf ${S}/.pc ${S}/debian - fi - patch -p1 < ${WORKDIR}/${BPN}_${PV}.diff - QUILT_PATCHES=${S}/debian/patches quilt push -a - mv ${S}/.pc ${S}/.pc-nettools -} - -do_unpack[cleandirs] += "${S}" - -# We invoke base do_patch at end, to incorporate any local patch -python do_patch() { - bb.build.exec_func('nettools_do_patch', d) - bb.build.exec_func('patch_do_patch', d) -} - -do_configure() { - # net-tools has its own config mechanism requiring "make config" - # we pre-generate desired options and copy to source directory instead - cp ${WORKDIR}/net-tools-config.h ${S}/config.h - cp ${WORKDIR}/net-tools-config.make ${S}/config.make - - if [ "${USE_NLS}" = "no" ]; then - sed -i -e 's/^I18N=1/# I18N=1/' ${S}/config.make - fi -} - -do_compile() { - # net-tools use COPTS/LOPTS to allow adding custom options - oe_runmake COPTS="$CFLAGS" LOPTS="$LDFLAGS" -} - -do_install() { - # We don't need COPTS or LOPTS, but let's be consistent. - oe_runmake COPTS="$CFLAGS" LOPTS="$LDFLAGS" BASEDIR=${D} INSTALLNLSDIR=${D}${datadir}/locale mandir=${mandir} install - - if [ "${base_bindir}" != "/bin" ]; then - mkdir -p ${D}/${base_bindir} - mv ${D}/bin/* ${D}/${base_bindir}/ - rmdir ${D}/bin - fi - if [ "${base_sbindir}" != "/sbin" ]; then - mkdir ${D}/${base_sbindir} - mv ${D}/sbin/* ${D}/${base_sbindir}/ - rmdir ${D}/sbin - fi -} - -inherit update-alternatives - -base_sbindir_progs = "arp ifconfig ipmaddr iptunnel mii-tool nameif plipconfig rarp route slattach" -base_bindir_progs = "dnsdomainname domainname hostname netstat nisdomainname ypdomainname" - -ALTERNATIVE_${PN} = "${base_sbindir_progs} ${base_bindir_progs}" -ALTERNATIVE_${PN}-doc += "hostname.1 dnsdomainname.1" -ALTERNATIVE_LINK_NAME[hostname.1] = "${mandir}/man1/hostname.1" -ALTERNATIVE_LINK_NAME[dnsdomainname.1] = "${mandir}/man1/dnsdomainname.1" -ALTERNATIVE_PRIORITY[hostname.1] = "10" - -python __anonymous() { - for prog in d.getVar('base_sbindir_progs').split(): - d.setVarFlag('ALTERNATIVE_LINK_NAME', prog, '%s/%s' % (d.getVar('base_sbindir'), prog)) - for prog in d.getVar('base_bindir_progs').split(): - d.setVarFlag('ALTERNATIVE_LINK_NAME', prog, '%s/%s' % (d.getVar('base_bindir'), prog)) -} -ALTERNATIVE_PRIORITY = "100" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb b/poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb index e321cd2b2..d24035b67 100644 --- a/poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb +++ b/poky/meta/recipes-extended/pbzip2/pbzip2_1.1.13.bb @@ -28,4 +28,4 @@ do_install() { install -m 0755 pbzip2 ${D}${bindir}/ } -BBCLASSEXTEND = "native" +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.15.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.15.bb deleted file mode 100644 index 8ea0476db..000000000 --- a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.15.bb +++ /dev/null @@ -1,26 +0,0 @@ -SUMMARY = "System load testing utility" -DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ -imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." -HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" - -SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ - file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ - file://no_daddr_t.patch \ - " -SRC_URI[sha256sum] = "6a333650fb5c85c5221f10d2cf890e9fc56530696e118d975fbbe96126c06963" - -DEPENDS = "coreutils-native" - -PROVIDES = "stress" -RPROVIDES_${PN} = "stress" -RREPLACES_${PN} = "stress" -RCONFLICTS_${PN} = "stress" - -inherit bash-completion - -do_install() { - oe_runmake DESTDIR=${D} install -} - diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb new file mode 100644 index 000000000..c668d8cbd --- /dev/null +++ b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb @@ -0,0 +1,27 @@ +SUMMARY = "System load testing utility" +DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ +imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." +HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" + +SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ + file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ + file://no_daddr_t.patch \ + " +SRC_URI[sha256sum] = "07c82a5c89538b5b696a79192faa70d0232352004c9e532946f7f3613d0adf23" + +DEPENDS = "coreutils-native" + +PROVIDES = "stress" +RPROVIDES_${PN} = "stress" +RREPLACES_${PN} = "stress" +RCONFLICTS_${PN} = "stress" + +inherit bash-completion + +do_install() { + oe_runmake DESTDIR=${D} install + ln -s stress-ng ${D}${bindir}/stress +} + diff --git a/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb b/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb deleted file mode 100644 index d6bc1a9c3..000000000 --- a/poky/meta/recipes-extended/sudo/sudo_1.9.1.bb +++ /dev/null @@ -1,47 +0,0 @@ -require sudo.inc - -SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \ - ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \ - " - -PAM_SRC_URI = "file://sudo.pam" - -SRC_URI[sha256sum] = "294116cefe10a02773917fc7440d8384b925955bc96a6e0eaa1977c83b34adff" - -DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}" -RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}" - -CACHED_CONFIGUREVARS = " \ - ac_cv_type_rsize_t=no \ - ac_cv_path_MVPROG=${base_bindir}/mv \ - ac_cv_path_BSHELLPROG=${base_bindir}/sh \ - ac_cv_path_SENDMAILPROG=${sbindir}/sendmail \ - ac_cv_path_VIPROG=${base_bindir}/vi \ - " - -EXTRA_OECONF += " \ - ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '--with-pam', '--without-pam', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '--enable-tmpfiles.d=${nonarch_libdir}/tmpfiles.d', '--disable-tmpfiles.d', d)} \ - --with-rundir=/run/sudo \ - --with-vardir=/var/lib/sudo \ - " - -do_install_append () { - if [ "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" ]; then - install -D -m 644 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo - if ${@bb.utils.contains('PACKAGECONFIG', 'pam-wheel', 'true', 'false', d)} ; then - echo 'auth required pam_wheel.so use_uid' >>${D}${sysconfdir}/pam.d/sudo - sed -i 's/# \(%wheel ALL=(ALL) ALL\)/\1/' ${D}${sysconfdir}/sudoers - fi - fi - - chmod 4111 ${D}${bindir}/sudo - chmod 0440 ${D}${sysconfdir}/sudoers - - # Explicitly remove the /sudo directory to avoid QA error - rmdir -p --ignore-fail-on-non-empty ${D}/run/sudo -} - -FILES_${PN} += "${nonarch_libdir}/tmpfiles.d" -FILES_${PN}-dev += "${libexecdir}/${BPN}/lib*${SOLIBSDEV} ${libexecdir}/${BPN}/*.la \ - ${libexecdir}/lib*${SOLIBSDEV} ${libexecdir}/*.la" diff --git a/poky/meta/recipes-extended/sudo/sudo_1.9.2.bb b/poky/meta/recipes-extended/sudo/sudo_1.9.2.bb new file mode 100644 index 000000000..5756b2e14 --- /dev/null +++ b/poky/meta/recipes-extended/sudo/sudo_1.9.2.bb @@ -0,0 +1,47 @@ +require sudo.inc + +SRC_URI = "https://www.sudo.ws/dist/sudo-${PV}.tar.gz \ + ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \ + " + +PAM_SRC_URI = "file://sudo.pam" + +SRC_URI[sha256sum] = "7c98d201f181c47152711b9f391e0f6b5545f3ef8926298a3e8bc6288e118314" + +DEPENDS += " virtual/crypt ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}" +RDEPENDS_${PN} += " ${@bb.utils.contains('DISTRO_FEATURES', 'pam', 'pam-plugin-limits pam-plugin-keyinit', '', d)}" + +CACHED_CONFIGUREVARS = " \ + ac_cv_type_rsize_t=no \ + ac_cv_path_MVPROG=${base_bindir}/mv \ + ac_cv_path_BSHELLPROG=${base_bindir}/sh \ + ac_cv_path_SENDMAILPROG=${sbindir}/sendmail \ + ac_cv_path_VIPROG=${base_bindir}/vi \ + " + +EXTRA_OECONF += " \ + ${@bb.utils.contains('DISTRO_FEATURES', 'pam', '--with-pam', '--without-pam', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', '--enable-tmpfiles.d=${nonarch_libdir}/tmpfiles.d', '--disable-tmpfiles.d', d)} \ + --with-rundir=/run/sudo \ + --with-vardir=/var/lib/sudo \ + " + +do_install_append () { + if [ "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" ]; then + install -D -m 644 ${WORKDIR}/sudo.pam ${D}/${sysconfdir}/pam.d/sudo + if ${@bb.utils.contains('PACKAGECONFIG', 'pam-wheel', 'true', 'false', d)} ; then + echo 'auth required pam_wheel.so use_uid' >>${D}${sysconfdir}/pam.d/sudo + sed -i 's/# \(%wheel ALL=(ALL) ALL\)/\1/' ${D}${sysconfdir}/sudoers + fi + fi + + chmod 4111 ${D}${bindir}/sudo + chmod 0440 ${D}${sysconfdir}/sudoers + + # Explicitly remove the /sudo directory to avoid QA error + rmdir -p --ignore-fail-on-non-empty ${D}/run/sudo +} + +FILES_${PN} += "${nonarch_libdir}/tmpfiles.d" +FILES_${PN}-dev += "${libexecdir}/${BPN}/lib*${SOLIBSDEV} ${libexecdir}/${BPN}/*.la \ + ${libexecdir}/lib*${SOLIBSDEV} ${libexecdir}/*.la" diff --git a/poky/meta/recipes-gnome/gcr/gcr/0001-meson-Make-sure-gcr-oids.h-is-built.patch b/poky/meta/recipes-gnome/gcr/gcr/0001-meson-Make-sure-gcr-oids.h-is-built.patch new file mode 100644 index 000000000..4bf5bfba9 --- /dev/null +++ b/poky/meta/recipes-gnome/gcr/gcr/0001-meson-Make-sure-gcr-oids.h-is-built.patch @@ -0,0 +1,36 @@ +From 9fca6ae0aa7355c27d0922c561b9fbe18dde5b3d Mon Sep 17 00:00:00 2001 +From: Niels De Graef +Date: Fri, 19 Jun 2020 22:37:31 +0200 +Subject: [PATCH 1/1] meson: Make sure gcr-oids.h is built + +Fixes https://gitlab.gnome.org/GNOME/gcr/-/issues/48 +--- + gcr/meson.build | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- end of original header --- + +Upstream-Status: Backport [https://github.com/GNOME/gcr.git] + +Signed-off-by: Joe Slater + +--- +diff --git a/gcr/meson.build b/gcr/meson.build +index 199452f..06c3a63 100644 +--- a/gcr/meson.build ++++ b/gcr/meson.build +@@ -178,7 +178,10 @@ endif + gcr_base_dep = declare_dependency( + link_with: gcr_base_lib, + include_directories: include_directories('..'), +- sources: gcr_enums_gen[1], # Make sure gcr-enum-types-base.h can be included ++ sources: [ ++ gcr_enums_gen[1], ++ gcr_oids[1], ++ ], + ) + + if get_option('introspection') +-- +2.7.4 + diff --git a/poky/meta/recipes-gnome/gcr/gcr_3.36.0.bb b/poky/meta/recipes-gnome/gcr/gcr_3.36.0.bb index d5a88dfb3..ff455a68e 100644 --- a/poky/meta/recipes-gnome/gcr/gcr_3.36.0.bb +++ b/poky/meta/recipes-gnome/gcr/gcr_3.36.0.bb @@ -18,6 +18,8 @@ inherit gnomebase gtk-icon-cache gtk-doc features_check upstream-version-is-even REQUIRED_DISTRO_FEATURES = "x11" SRC_URI += " file://0001-meson.build-correctly-set-internal-vapi-dependencies.patch" +SRC_URI += " file://0001-meson-Make-sure-gcr-oids.h-is-built.patch" + SRC_URI[archive.md5sum] = "adc65563b6b458507b9a578a8b68fb61" SRC_URI[archive.sha256sum] = "aaf9bed017a2263c6145c89a1a84178f9f40f238426463e4ae486694ef5f6601" diff --git a/poky/meta/recipes-gnome/gnome/gconf/python3.patch b/poky/meta/recipes-gnome/gnome/gconf/python3.patch new file mode 100644 index 000000000..7c022a2e1 --- /dev/null +++ b/poky/meta/recipes-gnome/gnome/gconf/python3.patch @@ -0,0 +1,60 @@ +gconf: use python3 + +Convert gsettings-schema-convert to use python3. + +Upstream-Status: Inappropriate [gconf is deprecated] + +Signed-off-by: Joe Slater + + +--- a/gsettings/gsettings-schema-convert ++++ b/gsettings/gsettings-schema-convert +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/env python3 + # vim: set ts=4 sw=4 et: coding=UTF-8 + # + # Copyright (c) 2010, Novell, Inc. +@@ -603,7 +603,7 @@ class SimpleSchemaParser: + for line in lines: + current_line_nb += 1 + self.parse_line(line) +- except GSettingsSchemaConvertException, e: ++ except GSettingsSchemaConvertException as e: + raise GSettingsSchemaConvertException('%s:%s: %s' % (os.path.basename(self.file), current_line_nb, e)) + + return self.root +@@ -1095,7 +1095,7 @@ def main(args): + try: + parser = GConfSchemaParser(argfile, options.gettext_domain, options.schema_id, options.keep_underscores) + schema_root = parser.parse() +- except SyntaxError, e: ++ except SyntaxError as e: + raise GSettingsSchemaConvertException('\'%s\' does not look like a valid gconf schema file: %s' % (argfile, e)) + else: + # autodetect if file is XML or not +@@ -1104,7 +1104,7 @@ def main(args): + schema_root = parser.parse() + if not options.simple and not options.xml: + options.simple = True +- except SyntaxError, e: ++ except SyntaxError as e: + parser = SimpleSchemaParser(argfile) + schema_root = parser.parse() + if not options.simple and not options.xml: +@@ -1127,13 +1127,13 @@ def main(args): + fout = open(options.output, 'w') + fout.write(output) + fout.close() +- except GSettingsSchemaConvertException, e: ++ except GSettingsSchemaConvertException as e: + fout.close() + if os.path.exists(options.output): + os.unlink(options.output) + raise e + +- except GSettingsSchemaConvertException, e: ++ except GSettingsSchemaConvertException as e: + print >> sys.stderr, '%s' % e + return 1 + diff --git a/poky/meta/recipes-gnome/gnome/gconf_3.2.6.bb b/poky/meta/recipes-gnome/gnome/gconf_3.2.6.bb index b8466d483..ff365551d 100644 --- a/poky/meta/recipes-gnome/gnome/gconf_3.2.6.bb +++ b/poky/meta/recipes-gnome/gnome/gconf_3.2.6.bb @@ -15,6 +15,7 @@ SRC_URI = "${GNOME_MIRROR}/GConf/${@gnome_verdir("${PV}")}/GConf-${PV}.tar.xz;na file://remove_plus_from_invalid_characters_list.patch \ file://unable-connect-dbus.patch \ file://create_config_directory.patch \ + file://python3.patch \ " SRC_URI[archive.md5sum] = "2b16996d0e4b112856ee5c59130e822c" @@ -52,6 +53,8 @@ FILES_${PN} += "${libdir}/GConf/* \ ${datadir}/dbus-1/services/*.service \ ${datadir}/dbus-1/system-services/*.service \ " +RDEPENDS_${PN} = "python3-xml" + FILES_${PN}-dev += "${datadir}/sgml/gconf/gconf-1.0.dtd" BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.64.1.bb b/poky/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.64.1.bb index 7eefdd3e2..4d80f00e1 100644 --- a/poky/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.64.1.bb +++ b/poky/meta/recipes-gnome/gobject-introspection/gobject-introspection_1.64.1.bb @@ -131,6 +131,11 @@ do_compile_prepend() { export GIR_EXTRA_LIBS_PATH=$B/.libs } +do_install_prepend() { + # This prevents g-ir-scanner from writing cache data to $HOME + export GI_SCANNER_DISABLE_CACHE=1 +} + # Our wrappers need to be available system-wide, because they will be used # to build introspection files for all other gobject-based packages do_install_append_class-target() { diff --git a/poky/meta/recipes-gnome/libhandy/libhandy_git.bb b/poky/meta/recipes-gnome/libhandy/libhandy_git.bb index 64258941e..9f7401464 100644 --- a/poky/meta/recipes-gnome/libhandy/libhandy_git.bb +++ b/poky/meta/recipes-gnome/libhandy/libhandy_git.bb @@ -2,7 +2,7 @@ SUMMARY = "A library full of GTK+ widgets for mobile phones" LICENSE = "LGPLv2.1" LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" -SRC_URI = "git://source.puri.sm/Librem5/${BPN}.git;protocol=https" +SRC_URI = "git://gitlab.gnome.org/GNOME/${BPN}.git;protocol=https" SRCREV = "7a193d7692c9c76a1a94f17c4d30b585f77d177c" S = "${WORKDIR}/git" PV = "0.0.13" diff --git a/poky/meta/recipes-gnome/librsvg/librsvg_2.40.20.bb b/poky/meta/recipes-gnome/librsvg/librsvg_2.40.20.bb deleted file mode 100644 index 337299ff6..000000000 --- a/poky/meta/recipes-gnome/librsvg/librsvg_2.40.20.bb +++ /dev/null @@ -1,50 +0,0 @@ -SUMMARY = "Library for rendering SVG files" -DESCRIPTION = "A small library to render Scalable Vector Graphics (SVG), \ -associated with the GNOME Project. It renders SVG files to Cairo surfaces. \ -Cairo is the 2D, antialiased drawing library that GNOME uses to draw things to \ -the screen or to generate output for printing." -HOMEPAGE = "https://gitlab.gnome.org/GNOME/librsvg" -BUGTRACKER = "https://gitlab.gnome.org/GNOME/librsvg/issues" - -RECIPE_NO_UPDATE_REASON = "Versions from 2.41.0 requires Rust compiler to build it" - -LICENSE = "LGPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ - file://rsvg.h;beginline=3;endline=24;md5=20b4113c4909bbf0d67e006778302bc6" - -SECTION = "x11/utils" -DEPENDS = "cairo gdk-pixbuf glib-2.0 libcroco libxml2 pango" -BBCLASSEXTEND = "native nativesdk" - -inherit gnomebase gtk-doc pixbufcache upstream-version-is-even gobject-introspection - -SRC_URI += "file://gtk-option.patch \ - file://0001-Auto-detect-Bsymbolic-fixes-configure-on-macOS.patch \ - file://0001-Remove-non-reproducible-SRCDIR.patch \ -" - -SRC_URI[archive.md5sum] = "4949d313b0c5d9161a5c259104af5568" -SRC_URI[archive.sha256sum] = "cff4dd3c3b78bfe99d8fcfad3b8ba1eee3289a0823c0e118d78106be6b84c92b" - -CACHED_CONFIGUREVARS = "ac_cv_path_GDK_PIXBUF_QUERYLOADERS=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders" - -PACKAGECONFIG ??= "gdkpixbuf" -# The gdk-pixbuf loader -PACKAGECONFIG[gdkpixbuf] = "--enable-pixbuf-loader,--disable-pixbuf-loader,gdk-pixbuf-native" -# GTK+ test application (rsvg-view) -PACKAGECONFIG[gtk] = "--with-gtk3,--without-gtk3,gtk+3" - -do_install_append() { - # Loadable modules don't need .a or .la on Linux - rm -f ${D}${libdir}/gdk-pixbuf-2.0/*/loaders/*.a ${D}${libdir}/gdk-pixbuf-2.0/*/loaders/*.la -} - -PACKAGES =+ "librsvg-gtk rsvg" -FILES_rsvg = "${bindir}/rsvg* \ - ${datadir}/pixmaps/svg-viewer.svg \ - ${datadir}/themes" -FILES_librsvg-gtk = "${libdir}/gdk-pixbuf-2.0/*/*/*.so \ - ${datadir}/thumbnailers/librsvg.thumbnailer" -RRECOMMENDS_librsvg-gtk = "gdk-pixbuf-bin" - -PIXBUF_PACKAGES = "librsvg-gtk" diff --git a/poky/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb b/poky/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb new file mode 100644 index 000000000..acdbc1f1b --- /dev/null +++ b/poky/meta/recipes-gnome/librsvg/librsvg_2.40.21.bb @@ -0,0 +1,49 @@ +SUMMARY = "Library for rendering SVG files" +DESCRIPTION = "A small library to render Scalable Vector Graphics (SVG), \ +associated with the GNOME Project. It renders SVG files to Cairo surfaces. \ +Cairo is the 2D, antialiased drawing library that GNOME uses to draw things to \ +the screen or to generate output for printing." +HOMEPAGE = "https://gitlab.gnome.org/GNOME/librsvg" +BUGTRACKER = "https://gitlab.gnome.org/GNOME/librsvg/issues" + +RECIPE_NO_UPDATE_REASON = "Versions from 2.41.0 requires Rust compiler to build it" + +LICENSE = "LGPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ + file://rsvg.h;beginline=3;endline=24;md5=20b4113c4909bbf0d67e006778302bc6" + +SECTION = "x11/utils" +DEPENDS = "cairo gdk-pixbuf glib-2.0 libcroco libxml2 pango" +BBCLASSEXTEND = "native nativesdk" + +inherit gnomebase gtk-doc pixbufcache upstream-version-is-even gobject-introspection + +SRC_URI += "file://gtk-option.patch \ + file://0001-Auto-detect-Bsymbolic-fixes-configure-on-macOS.patch \ + file://0001-Remove-non-reproducible-SRCDIR.patch \ +" + +SRC_URI[archive.sha256sum] = "f7628905f1cada84e87e2b14883ed57d8094dca3281d5bcb24ece4279e9a92ba" + +CACHED_CONFIGUREVARS = "ac_cv_path_GDK_PIXBUF_QUERYLOADERS=${STAGING_LIBDIR_NATIVE}/gdk-pixbuf-2.0/gdk-pixbuf-query-loaders" + +PACKAGECONFIG ??= "gdkpixbuf" +# The gdk-pixbuf loader +PACKAGECONFIG[gdkpixbuf] = "--enable-pixbuf-loader,--disable-pixbuf-loader,gdk-pixbuf-native" +# GTK+ test application (rsvg-view) +PACKAGECONFIG[gtk] = "--with-gtk3,--without-gtk3,gtk+3" + +do_install_append() { + # Loadable modules don't need .a or .la on Linux + rm -f ${D}${libdir}/gdk-pixbuf-2.0/*/loaders/*.a ${D}${libdir}/gdk-pixbuf-2.0/*/loaders/*.la +} + +PACKAGES =+ "librsvg-gtk rsvg" +FILES_rsvg = "${bindir}/rsvg* \ + ${datadir}/pixmaps/svg-viewer.svg \ + ${datadir}/themes" +FILES_librsvg-gtk = "${libdir}/gdk-pixbuf-2.0/*/*/*.so \ + ${datadir}/thumbnailers/librsvg.thumbnailer" +RRECOMMENDS_librsvg-gtk = "gdk-pixbuf-bin" + +PIXBUF_PACKAGES = "librsvg-gtk" diff --git a/poky/meta/recipes-graphics/cogl/cogl-1.0.inc b/poky/meta/recipes-graphics/cogl/cogl-1.0.inc index 1c5d0b79c..d581ad1c0 100644 --- a/poky/meta/recipes-graphics/cogl/cogl-1.0.inc +++ b/poky/meta/recipes-graphics/cogl/cogl-1.0.inc @@ -21,7 +21,7 @@ EDEPENDS_GL = "virtual/libgl libdrm" EDEPENDS_GLES2 = "virtual/libgles2" EDEPENDS_KMS = "libdrm virtual/egl" EDEPENDS_EGL = "virtual/egl" -EDEPENDS_X11 = "virtual/libx11 libxcomposite libxfixes libxi libxrandr" +EDEPENDS_X11 = "virtual/libx11 libxcomposite libxdamage libxfixes libxrandr" EDEPENDS_WAYLAND = "virtual/egl virtual/libgles2 wayland" # Extra RDEPENDS for PACKAGECONFIG diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.8.bb b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.8.bb deleted file mode 100644 index 3c4a5b5a2..000000000 --- a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.6.8.bb +++ /dev/null @@ -1,44 +0,0 @@ -SUMMARY = "Text shaping library" -DESCRIPTION = "HarfBuzz is an OpenType text shaping engine." -HOMEPAGE = "http://www.freedesktop.org/wiki/Software/HarfBuzz" -BUGTRACKER = "https://bugs.freedesktop.org/enter_bug.cgi?product=HarfBuzz" -SECTION = "libs" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=8f787620b7d3866d9552fd1924c07572 \ - file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc" - -UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" -UPSTREAM_CHECK_REGEX = "harfbuzz-(?P\d+(\.\d+)+).tar" - -SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz" -SRC_URI[md5sum] = "c8d4f2aeed6e576bd42f9dc6def1b1ae" -SRC_URI[sha256sum] = "6648a571a27f186e47094121f0095e1b809e918b3037c630c7f38ffad86e3035" - -inherit autotools pkgconfig lib_package gtk-doc - -PACKAGECONFIG ??= "cairo fontconfig freetype glib icu" -PACKAGECONFIG[cairo] = "--with-cairo,--without-cairo,cairo" -PACKAGECONFIG[fontconfig] = "--with-fontconfig,--without-fontconfig,fontconfig" -PACKAGECONFIG[freetype] = "--with-freetype,--without-freetype,freetype" -PACKAGECONFIG[glib] = "--with-glib,--without-glib,glib-2.0" -PACKAGECONFIG[graphite] = "--with-graphite2,--without-graphite2,graphite2" -PACKAGECONFIG[icu] = "--with-icu,--without-icu,icu" - -PACKAGES =+ "${PN}-icu ${PN}-icu-dev ${PN}-subset" - -LEAD_SONAME = "libharfbuzz.so" - -do_install_append() { - # If no tools are installed due to PACKAGECONFIG then this directory is - #still installed, so remove it to stop packaging wanings. - rmdir --ignore-fail-on-non-empty ${D}${bindir} -} - -FILES_${PN}-icu = "${libdir}/libharfbuzz-icu.so.*" -FILES_${PN}-icu-dev = "${libdir}/libharfbuzz-icu.la \ - ${libdir}/libharfbuzz-icu.so \ - ${libdir}/pkgconfig/harfbuzz-icu.pc \ -" -FILES_${PN}-subset = "${libdir}/libharfbuzz-subset.so.*" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb new file mode 100644 index 000000000..08c8f8323 --- /dev/null +++ b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb @@ -0,0 +1,43 @@ +SUMMARY = "Text shaping library" +DESCRIPTION = "HarfBuzz is an OpenType text shaping engine." +HOMEPAGE = "http://www.freedesktop.org/wiki/Software/HarfBuzz" +BUGTRACKER = "https://bugs.freedesktop.org/enter_bug.cgi?product=HarfBuzz" +SECTION = "libs" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=8f787620b7d3866d9552fd1924c07572 \ + file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc" + +UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" +UPSTREAM_CHECK_REGEX = "harfbuzz-(?P\d+(\.\d+)+).tar" + +SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "e95ee43b6bd0d3d1307e2aacf0f9c0050e5baceb21988b367b833028114aa569" + +inherit autotools pkgconfig lib_package gtk-doc + +PACKAGECONFIG ??= "cairo fontconfig freetype glib icu" +PACKAGECONFIG[cairo] = "--with-cairo,--without-cairo,cairo" +PACKAGECONFIG[fontconfig] = "--with-fontconfig,--without-fontconfig,fontconfig" +PACKAGECONFIG[freetype] = "--with-freetype,--without-freetype,freetype" +PACKAGECONFIG[glib] = "--with-glib,--without-glib,glib-2.0" +PACKAGECONFIG[graphite] = "--with-graphite2,--without-graphite2,graphite2" +PACKAGECONFIG[icu] = "--with-icu,--without-icu,icu" + +PACKAGES =+ "${PN}-icu ${PN}-icu-dev ${PN}-subset" + +LEAD_SONAME = "libharfbuzz.so" + +do_install_append() { + # If no tools are installed due to PACKAGECONFIG then this directory is + #still installed, so remove it to stop packaging wanings. + rmdir --ignore-fail-on-non-empty ${D}${bindir} +} + +FILES_${PN}-icu = "${libdir}/libharfbuzz-icu.so.*" +FILES_${PN}-icu-dev = "${libdir}/libharfbuzz-icu.la \ + ${libdir}/libharfbuzz-icu.so \ + ${libdir}/pkgconfig/harfbuzz-icu.pc \ +" +FILES_${PN}-subset = "${libdir}/libharfbuzz-subset.so.*" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch b/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch index 91e59d14e..ee171ad1c 100644 --- a/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch +++ b/poky/meta/recipes-graphics/mesa/files/0002-meson.build-make-TLS-ELF-optional.patch @@ -1,4 +1,4 @@ -From dd1d15c75f6ff8ee96cf1e7b74e582bff3183ef6 Mon Sep 17 00:00:00 2001 +From 65857eaee12a21a631750ffcd9e64e0afbbc3af0 Mon Sep 17 00:00:00 2001 From: Alistair Francis Date: Thu, 14 Nov 2019 13:08:31 -0800 Subject: [PATCH] meson.build: make TLS ELF optional @@ -15,15 +15,15 @@ Signed-off-by: Alistair Francis 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/meson.build b/meson.build -index 261b588..311436e 100644 +index c51dde9..c16f78f 100644 --- a/meson.build +++ b/meson.build @@ -392,7 +392,7 @@ if with_egl and not (with_platform_drm or with_platform_surfaceless or with_plat endif # Android uses emutls for versions <= P/28. For USE_ELF_TLS we need ELF TLS. --if host_machine.system() != 'windows' and (not with_platform_android or get_option('platform-sdk-version') >= 29) -+if (not with_platform_android or get_option('platform-sdk-version') >= 29) and get_option('elf-tls') +-if not ['windows', 'freebsd'].contains(host_machine.system()) and (not with_platform_android or get_option('platform-sdk-version') >= 29) ++if not ['windows', 'freebsd'].contains(host_machine.system()) and (not with_platform_android or get_option('platform-sdk-version') >= 29) and get_option('elf-tls') pre_args += '-DUSE_ELF_TLS' endif diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb deleted file mode 100644 index e50782be1..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.2.bb +++ /dev/null @@ -1,15 +0,0 @@ -require mesa.inc - -SUMMARY += " (OpenGL only, no EGL/GLES)" - -PROVIDES = "virtual/libgl virtual/mesa" - -S = "${WORKDIR}/mesa-${PV}" - -# At least one DRI rendering engine is required to build mesa. -# When no X11 is available, use osmesa for the rendering engine. -PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" -PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" - -# When NOT using X11, we need to make sure we have swrast available. -DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb new file mode 100644 index 000000000..e50782be1 --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb @@ -0,0 +1,15 @@ +require mesa.inc + +SUMMARY += " (OpenGL only, no EGL/GLES)" + +PROVIDES = "virtual/libgl virtual/mesa" + +S = "${WORKDIR}/mesa-${PV}" + +# At least one DRI rendering engine is required to build mesa. +# When no X11 is available, use osmesa for the rendering engine. +PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" +PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" + +# When NOT using X11, we need to make sure we have swrast available. +DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa.inc b/poky/meta/recipes-graphics/mesa/mesa.inc index 4fe5f025e..8d8e5288a 100644 --- a/poky/meta/recipes-graphics/mesa/mesa.inc +++ b/poky/meta/recipes-graphics/mesa/mesa.inc @@ -23,7 +23,7 @@ SRC_URI = "https://mesa.freedesktop.org/archive/mesa-${PV}.tar.xz \ file://0001-meson-misdetects-64bit-atomics-on-mips-clang.patch \ " -SRC_URI[sha256sum] = "283dff72814c8a80ce1ff8271e3f055895d26f4da3f4362acc49193e635780cb" +SRC_URI[sha256sum] = "6800271c2be2a0447510eb4e9b67edd9521859a4d565310617c4b359eb6799fe" UPSTREAM_CHECK_GITTAGREGEX = "mesa-(?P\d+(\.\d+)+)" @@ -121,6 +121,7 @@ PACKAGECONFIG[dri3] = "-Ddri3=true, -Ddri3=false, xorgproto libxshmfence" VULKAN_DRIVERS = "" VULKAN_DRIVERS_append_x86_class-target = ",intel" VULKAN_DRIVERS_append_x86-64_class-target = ",intel" +VULKAN_DRIVERS_append ="${@bb.utils.contains('PACKAGECONFIG', 'freedreno', ',freedreno', '', d)}" PACKAGECONFIG[vulkan] = "-Dvulkan-drivers=${VULKAN_DRIVERS}, -Dvulkan-drivers=''," PACKAGECONFIG[opengl] = "-Dopengl=true, -Dopengl=false" diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb b/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb deleted file mode 100644 index 96e8aa38d..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa_20.1.2.bb +++ /dev/null @@ -1,2 +0,0 @@ -require ${BPN}.inc - diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb b/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb new file mode 100644 index 000000000..96e8aa38d --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb @@ -0,0 +1,2 @@ +require ${BPN}.inc + diff --git a/poky/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb b/poky/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb index 70b32cf8f..3e1ba196b 100644 --- a/poky/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb +++ b/poky/meta/recipes-graphics/ttf-fonts/ttf-bitstream-vera_1.10.bb @@ -9,7 +9,7 @@ LICENSE = "BitstreamVera" LIC_FILES_CHKSUM = "file://COPYRIGHT.TXT;md5=27d7484b1e18d0ee4ce538644a3f04be" PR = "r7" -inherit fontcache +inherit allarch fontcache FONT_PACKAGES = "${PN}" diff --git a/poky/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb b/poky/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb index 29b12628d..52821195d 100644 --- a/poky/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb +++ b/poky/meta/recipes-graphics/virglrenderer/virglrenderer_0.8.2.bb @@ -8,7 +8,7 @@ HOMEPAGE = "https://virgil3d.github.io/" LICENSE = "MIT" LIC_FILES_CHKSUM = "file://COPYING;md5=c81c08eeefd9418fca8f88309a76db10" -DEPENDS = "libdrm mesa libepoxy" +DEPENDS = "libdrm virtual/libgl libepoxy" SRCREV = "7d204f3927be65fb3365dce01dbcd04d447a4985" SRC_URI = "git://anongit.freedesktop.org/virglrenderer \ file://0001-gallium-Expand-libc-check-to-be-platform-OS-check.patch \ diff --git a/poky/meta/recipes-graphics/wayland/libinput_1.15.6.bb b/poky/meta/recipes-graphics/wayland/libinput_1.15.6.bb deleted file mode 100644 index f81cf7f03..000000000 --- a/poky/meta/recipes-graphics/wayland/libinput_1.15.6.bb +++ /dev/null @@ -1,50 +0,0 @@ -SUMMARY = "Library to handle input devices in Wayland compositors" -DESCRIPTION = "libinput is a library to handle input devices in Wayland \ -compositors and to provide a generic X.Org input driver. It provides \ -device detection, device handling, input device event processing and \ -abstraction so minimize the amount of custom input code compositors need to \ -provide the common set of functionality that users expect." -HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libinput/" -SECTION = "libs" - -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=1f2ea9ebff3a2c6d458faf58492efb63" - -DEPENDS = "libevdev udev mtdev libcheck" - -SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \ - file://run-ptest \ - file://determinism.patch \ - " -SRC_URI[md5sum] = "b2388a1d6f0dcc944b49bc7239a53be8" -SRC_URI[sha256sum] = "aeedea216a6317ddc6e27c3d54f26b987078780db6a8320cc09e19c25b307f1c" - -UPSTREAM_CHECK_REGEX = "libinput-(?P\d+\.\d+\.(?!9\d+)\d+)" - -inherit meson pkgconfig lib_package ptest - -# Patch out build directory, otherwise it leaks into ptest binary -do_configure_append() { - sed -i -e "s,${WORKDIR},,g" config.h - if [ -e "litest-config.h" ]; then - sed -i -e "s,${WORKDIR},,g" litest-config.h - fi -} - -PACKAGECONFIG ??= "" -PACKAGECONFIG[libwacom] = "-Dlibwacom=true,-Dlibwacom=false,libwacom" -PACKAGECONFIG[gui] = "-Ddebug-gui=true,-Ddebug-gui=false,cairo gtk+3" - -UDEVDIR = "`pkg-config --variable=udevdir udev`" - -EXTRA_OEMESON += "-Dudev-dir=${UDEVDIR} \ - -Ddocumentation=false \ - ${@bb.utils.contains('PTEST_ENABLED', '1', '-Dtests=true -Dinstall-tests=true', '-Dtests=false -Dinstall-tests=false', d)} \ - -Dzshcompletiondir=no" - -# package name changed in 1.8.1 upgrade: make sure package upgrades work -RPROVIDES_${PN} = "libinput" -RREPLACES_${PN} = "libinput" -RCONFLICTS_${PN} = "libinput" - -FILES_${PN}-ptest += "${libexecdir}/libinput/libinput-test-suite" diff --git a/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb b/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb new file mode 100644 index 000000000..baf5c1d16 --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb @@ -0,0 +1,50 @@ +SUMMARY = "Library to handle input devices in Wayland compositors" +DESCRIPTION = "libinput is a library to handle input devices in Wayland \ +compositors and to provide a generic X.Org input driver. It provides \ +device detection, device handling, input device event processing and \ +abstraction so minimize the amount of custom input code compositors need to \ +provide the common set of functionality that users expect." +HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libinput/" +SECTION = "libs" + +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=1f2ea9ebff3a2c6d458faf58492efb63" + +DEPENDS = "libevdev udev mtdev libcheck" + +SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \ + file://run-ptest \ + file://determinism.patch \ + " +SRC_URI[md5sum] = "b518dae7f603040872739216971ee97b" +SRC_URI[sha256sum] = "83f6d0c94e5e0dd87094ce73f0edb631919617d24a60ee0ab9bd9197411d76e8" + +UPSTREAM_CHECK_REGEX = "libinput-(?P\d+\.\d+\.(?!9\d+)\d+)" + +inherit meson pkgconfig lib_package ptest + +# Patch out build directory, otherwise it leaks into ptest binary +do_configure_append() { + sed -i -e "s,${WORKDIR},,g" config.h + if [ -e "litest-config.h" ]; then + sed -i -e "s,${WORKDIR},,g" litest-config.h + fi +} + +PACKAGECONFIG ??= "" +PACKAGECONFIG[libwacom] = "-Dlibwacom=true,-Dlibwacom=false,libwacom" +PACKAGECONFIG[gui] = "-Ddebug-gui=true,-Ddebug-gui=false,cairo gtk+3" + +UDEVDIR = "`pkg-config --variable=udevdir udev`" + +EXTRA_OEMESON += "-Dudev-dir=${UDEVDIR} \ + -Ddocumentation=false \ + ${@bb.utils.contains('PTEST_ENABLED', '1', '-Dtests=true -Dinstall-tests=true', '-Dtests=false -Dinstall-tests=false', d)} \ + -Dzshcompletiondir=no" + +# package name changed in 1.8.1 upgrade: make sure package upgrades work +RPROVIDES_${PN} = "libinput" +RREPLACES_${PN} = "libinput" +RCONFLICTS_${PN} = "libinput" + +FILES_${PN}-ptest += "${libexecdir}/libinput/libinput-test-suite" diff --git a/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb b/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb index f8e9e15f8..8fef86482 100644 --- a/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb +++ b/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb @@ -20,7 +20,7 @@ inherit meson pkgconfig useradd features_check # depends on virtual/egl REQUIRED_DISTRO_FEATURES = "opengl" -DEPENDS = "libxkbcommon gdk-pixbuf pixman cairo glib-2.0 jpeg" +DEPENDS = "libxkbcommon gdk-pixbuf pixman cairo glib-2.0" DEPENDS += "wayland wayland-protocols libinput virtual/egl pango wayland-native" WESTON_MAJOR_VERSION = "${@'.'.join(d.getVar('PV').split('.')[0:1])}" @@ -31,7 +31,13 @@ PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'kms fbdev ${@bb.utils.contains('DISTRO_FEATURES', 'x11 wayland', 'xwayland', '', d)} \ ${@bb.utils.filter('DISTRO_FEATURES', 'pam systemd x11', d)} \ ${@bb.utils.contains_any('DISTRO_FEATURES', 'wayland x11', '', 'headless', d)} \ - launch" + launch \ + image-jpeg \ + screenshare \ + shell-desktop \ + shell-fullscreen \ + shell-ivi" + # # Compositor choices # @@ -67,6 +73,16 @@ PACKAGECONFIG[clients] = "-Dsimple-clients=all -Ddemo-clients=true,-Dsimple-clie PACKAGECONFIG[remoting] = "-Dremoting=true,-Dremoting=false,gstreamer-1.0" # Weston with PAM support PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam" +# Weston with screen-share support +PACKAGECONFIG[screenshare] = "-Dscreenshare=true,-Dscreenshare=false" +# Traditional desktop shell +PACKAGECONFIG[shell-desktop] = "-Dshell-desktop=true,-Dshell-desktop=false" +# Fullscreen shell +PACKAGECONFIG[shell-fullscreen] = "-Dshell-fullscreen=true,-Dshell-fullscreen=false" +# In-Vehicle Infotainment (IVI) shell +PACKAGECONFIG[shell-ivi] = "-Dshell-ivi=true,-Dshell-ivi=false" +# JPEG image loading support +PACKAGECONFIG[image-jpeg] = "-Dimage-jpeg=true,-Dimage-jpeg=false, jpeg" do_install_append() { # Weston doesn't need the .la files to load modules, so wipe them diff --git a/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel/0001-i810-Avoid-duplicate-definition-of-I810PatternROP.patch b/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel/0001-i810-Avoid-duplicate-definition-of-I810PatternROP.patch new file mode 100644 index 000000000..765d9ec09 --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel/0001-i810-Avoid-duplicate-definition-of-I810PatternROP.patch @@ -0,0 +1,27 @@ +From c2d730cf79eb3e4bea41f5ed8a8a21092ced8b03 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Hanno=20B=C3=B6ck?= +Date: Sun, 2 Feb 2020 14:18:39 +0100 +Subject: [PATCH] i810: Avoid duplicate definition of I810PatternROP + +Upstream-Status: Backport [https://gitlab.freedesktop.org/xorg/driver/xf86-video-intel/-/commit/652d93cbbdc159c0883f1b626ea48e28bac63ae3] +Signed-off-by: Chris Wilson +Signed-off-by: Khem Raj +--- + src/legacy/i810/i810.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/legacy/i810/i810.h b/src/legacy/i810/i810.h +index 347188c9..19be049c 100644 +--- a/src/legacy/i810/i810.h ++++ b/src/legacy/i810/i810.h +@@ -322,6 +322,6 @@ extern void I810InitMC(ScreenPtr pScreen); + extern const OptionInfoRec *I810AvailableOptions(int chipid, int busid); + + extern const int I810CopyROP[16]; +-const int I810PatternROP[16]; ++extern const int I810PatternROP[16]; + + #endif /* _I810_H_ */ +-- +2.28.0 + diff --git a/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb b/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb index 7ab223623..161371b11 100644 --- a/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb +++ b/poky/meta/recipes-graphics/xorg-driver/xf86-video-intel_git.bb @@ -15,6 +15,7 @@ S = "${WORKDIR}/git" SRC_URI = "git://anongit.freedesktop.org/xorg/driver/xf86-video-intel \ file://0001-Sync-i915_pciids-upto-8717c6b7414f.patch \ + file://0001-i810-Avoid-duplicate-definition-of-I810PatternROP.patch \ " UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+)" diff --git a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Avoid-duplicate-definitions-of-IOPortBase.patch b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Avoid-duplicate-definitions-of-IOPortBase.patch new file mode 100644 index 000000000..473704067 --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Avoid-duplicate-definitions-of-IOPortBase.patch @@ -0,0 +1,45 @@ +From fc04acfd948ac99d04a5dc08c78f3b13bc0c5c41 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Mon, 17 Aug 2020 10:50:51 -0700 +Subject: [PATCH] Avoid duplicate definitions of IOPortBase + +This fixed build with gcc10/-fno-common + +Fixes +compiler.h:528: multiple definition of `IOPortBase'; + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + hw/xfree86/common/compiler.h | 2 +- + hw/xfree86/os-support/linux/lnx_video.c | 1 + + 2 files changed, 2 insertions(+), 1 deletion(-) + +diff --git a/hw/xfree86/common/compiler.h b/hw/xfree86/common/compiler.h +index 2b2008b..c7d617e 100644 +--- a/hw/xfree86/common/compiler.h ++++ b/hw/xfree86/common/compiler.h +@@ -525,7 +525,7 @@ xf86WriteMmio32Le(__volatile__ void *base, const unsigned long offset, + #define PORT_SIZE short + #endif + +-_X_EXPORT unsigned int IOPortBase; /* Memory mapped I/O port area */ ++extern _X_EXPORT unsigned int IOPortBase; /* Memory mapped I/O port area */ + + static __inline__ void + outb(unsigned PORT_SIZE port, unsigned char val) +diff --git a/hw/xfree86/os-support/linux/lnx_video.c b/hw/xfree86/os-support/linux/lnx_video.c +index 04e4509..9dc7316 100644 +--- a/hw/xfree86/os-support/linux/lnx_video.c ++++ b/hw/xfree86/os-support/linux/lnx_video.c +@@ -78,6 +78,7 @@ xf86OSInitVidMem(VidMemInfoPtr pVidMem) + /***************************************************************************/ + /* I/O Permissions section */ + /***************************************************************************/ ++_X_EXPORT unsigned int IOPortBase; /* Memory mapped I/O port area */ + + #if defined(__powerpc__) + volatile unsigned char *ioBase = NULL; +-- +2.28.0 + diff --git a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb index 26815feb3..8c19692de 100644 --- a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb +++ b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb @@ -5,6 +5,7 @@ SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.pat file://0001-test-xtest-Initialize-array-with-braces.patch \ file://sdksyms-no-build-path.patch \ file://0001-drmmode_display.c-add-missing-mi.h-include.patch \ + file://0001-Avoid-duplicate-definitions-of-IOPortBase.patch \ " SRC_URI[md5sum] = "a770aec600116444a953ff632f51f839" SRC_URI[sha256sum] = "d17b646bee4ba0fb7850c1cc55b18e3e8513ed5c02bdf38da7e107f84e2d0146" diff --git a/poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb b/poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb index 4f1af731d..ea33732a3 100644 --- a/poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb +++ b/poky/meta/recipes-kernel/kern-tools/kern-tools-native_git.bb @@ -4,7 +4,7 @@ LIC_FILES_CHKSUM = "file://tools/kgit;beginline=5;endline=9;md5=9c30e971d435e249 DEPENDS = "git-native" -SRCREV = "c66833e1caac25279a5052fceb13213f5e4f79f9" +SRCREV = "df4390b18a500a1a7d4695e1856971f8e36ce517" PR = "r12" PV = "0.2+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch b/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch new file mode 100644 index 000000000..a3ba0912d --- /dev/null +++ b/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch @@ -0,0 +1,68 @@ +From c5fec6d6368b4103557deb710150119dca438544 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Wed, 5 Aug 2020 10:46:39 -0700 +Subject: [PATCH] kexec: Fix build with -fno-common + +Ensure that my_debug is not doubly defined + +Upstream-Status: Pending +Signed-off-by: Khem Raj +--- + kexec/arch/ppc64/kexec-elf-ppc64.c | 2 -- + kexec/fs2dt.h | 2 +- + 2 files changed, 1 insertion(+), 3 deletions(-) + +--- a/kexec/arch/ppc64/kexec-elf-ppc64.c ++++ b/kexec/arch/ppc64/kexec-elf-ppc64.c +@@ -44,8 +44,6 @@ + uint64_t initrd_base, initrd_size; + unsigned char reuse_initrd = 0; + const char *ramdisk; +-/* Used for enabling printing message from purgatory code */ +-int my_debug = 0; + + int elf_ppc64_probe(const char *buf, off_t len) + { +--- a/kexec/fs2dt.h ++++ b/kexec/fs2dt.h +@@ -30,7 +30,7 @@ extern struct bootblock bb[1]; + + /* Used for enabling printing message from purgatory code + * Only has implemented for PPC64 */ +-int my_debug; ++extern int my_debug; + extern int dt_no_old_root; + + void reserve(unsigned long long where, unsigned long long length); +--- a/kexec/arch/arm64/kexec-arm64.h ++++ b/kexec/arch/arm64/kexec-arm64.h +@@ -50,8 +50,8 @@ int zImage_arm64_load(int argc, char **a + void zImage_arm64_usage(void); + + +-off_t initrd_base; +-off_t initrd_size; ++extern off_t initrd_base; ++extern off_t initrd_size; + + /** + * struct arm64_mem - Memory layout info. +@@ -65,7 +65,7 @@ struct arm64_mem { + }; + + #define arm64_mem_ngv UINT64_MAX +-struct arm64_mem arm64_mem; ++extern struct arm64_mem arm64_mem; + + uint64_t get_phys_offset(void); + uint64_t get_vp_offset(void); +--- a/kexec/arch/x86_64/kexec-bzImage64.c ++++ b/kexec/arch/x86_64/kexec-bzImage64.c +@@ -42,7 +42,6 @@ + #include + + static const int probe_debug = 0; +-int bzImage_support_efi_boot; + + int bzImage64_probe(const char *buf, off_t len) + { diff --git a/poky/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb b/poky/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb index 871b36440..1e81ecc2c 100644 --- a/poky/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb +++ b/poky/meta/recipes-kernel/kexec/kexec-tools_2.0.20.bb @@ -20,6 +20,7 @@ SRC_URI = "${KERNELORG_MIRROR}/linux/utils/kernel/kexec/kexec-tools-${PV}.tar.gz file://0005-Disable-PIE-during-link.patch \ file://0006-kexec-arm-undefine-__NR_kexec_file_load-for-arm.patch \ file://0007-kexec-un-break-the-build-on-32-bit-x86.patch \ + file://0001-kexec-Fix-build-with-fno-common.patch \ " SRC_URI[md5sum] = "46724b67f32501c5d3e778161347cad9" diff --git a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc index 20139a849..b1cb553c7 100644 --- a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc +++ b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc @@ -30,7 +30,7 @@ LICENSE = "GPLv2" # # -- RP -LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" +LIC_FILES_CHKSUM ?= "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" RECIPE_NO_UPDATE_REASON = "Recipe is updated through a separate process" @@ -46,6 +46,9 @@ python __anonymous () { d.setVar("HEADER_FETCH_VER", "2.6") } +MAJ_VER = "${@oe.utils.trim_version("${PV}", 2).split('.')[0]}" +MIN_VER = "${@oe.utils.trim_version("${PV}", 2).split('.')[1]}" + inherit kernel-arch pkgconfig multilib_header KORG_ARCHIVE_COMPRESSION ?= "xz" @@ -83,7 +86,16 @@ do_install_append_armeb () { } do_install_armmultilib () { - oe_multilib_header asm/auxvec.h asm/bitsperlong.h asm/byteorder.h asm/fcntl.h asm/hwcap.h asm/ioctls.h asm/kvm.h asm/kvm_para.h asm/mman.h asm/param.h asm/perf_regs.h asm/bpf_perf_event.h + if [ ${MAJ_VER} -gt 5 ]; then + ARM_KVM_HEADER="" + else + if [ ${MAJ_VER} -eq 5 ] && [ ${MIN_VER} -ge 8 ]; then + ARM_KVM_HEADER="" + else + ARM_KVM_HEADER="asm/kvm.h" + fi + fi + oe_multilib_header asm/auxvec.h asm/bitsperlong.h asm/byteorder.h asm/fcntl.h asm/hwcap.h asm/ioctls.h $ARM_KVM_HEADER asm/kvm_para.h asm/mman.h asm/param.h asm/perf_regs.h asm/bpf_perf_event.h oe_multilib_header asm/posix_types.h asm/ptrace.h asm/setup.h asm/sigcontext.h asm/siginfo.h asm/signal.h asm/stat.h asm/statfs.h asm/swab.h asm/types.h asm/unistd.h } diff --git a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers/0001-include-linux-stddef.h-in-swab.h-uapi-header.patch b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers/0001-include-linux-stddef.h-in-swab.h-uapi-header.patch index 9d17daa70..5b7c1b6e2 100644 --- a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers/0001-include-linux-stddef.h-in-swab.h-uapi-header.patch +++ b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers/0001-include-linux-stddef.h-in-swab.h-uapi-header.patch @@ -1,4 +1,4 @@ -From 9708dc74d9f49488d669e070982f6224a888d61a Mon Sep 17 00:00:00 2001 +From dc221138c809125dc1bbff8506c70cb7bd846368 Mon Sep 17 00:00:00 2001 From: Khem Raj Date: Wed, 12 Sep 2018 17:08:58 -0700 Subject: [PATCH] include linux/stddef.h in swab.h uapi header @@ -23,12 +23,13 @@ Cc: Philippe Ombredanne Cc: Kate Stewart Cc: Greg Kroah-Hartman Cc: Thomas Gleixner + --- include/uapi/linux/swab.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h -index 23cd84868cc3..acddbe50a20d 100644 +index 7272f85d6..2912fe463 100644 --- a/include/uapi/linux/swab.h +++ b/include/uapi/linux/swab.h @@ -3,6 +3,7 @@ @@ -37,8 +38,5 @@ index 23cd84868cc3..acddbe50a20d 100644 #include +#include #include + #include #include - --- -2.19.0 - diff --git a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.4.bb b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.4.bb deleted file mode 100644 index 8a12103ee..000000000 --- a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.4.bb +++ /dev/null @@ -1,16 +0,0 @@ -require linux-libc-headers.inc - -SRC_URI_append_libc-musl = "\ - file://0001-libc-compat.h-fix-some-issues-arising-from-in6.h.patch \ - file://0003-remove-inclusion-of-sysinfo.h-in-kernel.h.patch \ - file://0001-libc-compat.h-musl-_does_-define-IFF_LOWER_UP-DORMAN.patch \ - file://0001-include-linux-stddef.h-in-swab.h-uapi-header.patch \ - " - -SRC_URI_append = "\ - file://0001-scripts-Use-fixed-input-and-output-files-instead-of-.patch \ - file://0001-kbuild-install_headers.sh-Strip-_UAPI-from-if-define.patch \ -" - -SRC_URI[md5sum] = "ce9b2d974d27408a61c53a30d3f98fb9" -SRC_URI[sha256sum] = "bf338980b1670bca287f9994b7441c2361907635879169c64ae78364efc5f491" diff --git a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.8.bb b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.8.bb new file mode 100644 index 000000000..d76a8a36f --- /dev/null +++ b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers_5.8.bb @@ -0,0 +1,18 @@ +require linux-libc-headers.inc + +SRC_URI_append_libc-musl = "\ + file://0001-libc-compat.h-fix-some-issues-arising-from-in6.h.patch \ + file://0003-remove-inclusion-of-sysinfo.h-in-kernel.h.patch \ + file://0001-libc-compat.h-musl-_does_-define-IFF_LOWER_UP-DORMAN.patch \ + file://0001-include-linux-stddef.h-in-swab.h-uapi-header.patch \ + " + +SRC_URI_append = "\ + file://0001-scripts-Use-fixed-input-and-output-files-instead-of-.patch \ + file://0001-kbuild-install_headers.sh-Strip-_UAPI-from-if-define.patch \ +" + +LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" + +SRC_URI[md5sum] = "0e5c4c15266218ef26c50fac0016095b" +SRC_URI[sha256sum] = "e7f75186aa0642114af8f19d99559937300ca27acaf7451b36d4f9b0f85cf1f5" diff --git a/poky/meta/recipes-kernel/linux/kernel-devsrc.bb b/poky/meta/recipes-kernel/linux/kernel-devsrc.bb index 0c98535f7..a9c7be0f8 100644 --- a/poky/meta/recipes-kernel/linux/kernel-devsrc.bb +++ b/poky/meta/recipes-kernel/linux/kernel-devsrc.bb @@ -259,10 +259,12 @@ do_install() { touch -r $kerneldir/build/.config $kerneldir/build/include/config/auto.conf* 2>/dev/null || : if [ -e "$kerneldir/build/include/config/auto.conf.cmd" ]; then - sed -i 's/ifneq "$(CC)" ".*-linux-gcc.*$/ifneq "$(CC)" "gcc"/' "$kerneldir/build/include/config/auto.conf.cmd" - sed -i 's/ifneq "$(LD)" ".*-linux-ld.bfd.*$/ifneq "$(LD)" "ld"/' "$kerneldir/build/include/config/auto.conf.cmd" + sed -i 's/ifneq "$(CC)" ".*-linux-.*gcc.*$/ifneq "$(CC)" "gcc"/' "$kerneldir/build/include/config/auto.conf.cmd" + sed -i 's/ifneq "$(LD)" ".*-linux-.*ld.bfd.*$/ifneq "$(LD)" "ld"/' "$kerneldir/build/include/config/auto.conf.cmd" sed -i 's/ifneq "$(CC_VERSION_TEXT)".*\(gcc.*\)"/ifneq "$(CC_VERSION_TEXT)" "\1"/' "$kerneldir/build/include/config/auto.conf.cmd" sed -i 's/ifneq "$(srctree)" ".*"/ifneq "$(srctree)" "."/' "$kerneldir/build/include/config/auto.conf.cmd" + # we don't build against the defconfig, so make sure it isn't the trigger for syncconfig + sed -i 's/ifneq "$(KBUILD_DEFCONFIG)".*"\(.*\)"/ifneq "\1" "\1"/' "$kerneldir/build/include/config/auto.conf.cmd" fi # make the scripts python3 safe. We won't be running these, and if they are @@ -288,3 +290,5 @@ RDEPENDS_${PN} = "bc python3 flex bison ${TCLIBC}-utils" RDEPENDS_${PN} += "openssl-dev util-linux" # and x86 needs a bit more for 4.15+ RDEPENDS_${PN} += "${@bb.utils.contains('ARCH', 'x86', 'elfutils', '', d)}" +# 5.8+ needs gcc-plugins libmpc-dev +RDEPENDS_${PN} += "gcc-plugins libmpc-dev" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb index caa5b4ef5..cfe3277e8 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "508b4e6ada7f78b3ef5a9dbdd182d13dffe00123" -SRCREV_meta ?= "caafbdfe382bf22a4786d871af097acd49d0867a" +SRCREV_machine ?= "22664d170488313b5c2713b6d9c8df6563387728" +SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" -LINUX_VERSION ?= "5.4.51" +LINUX_VERSION ?= "5.4.58" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb new file mode 100644 index 000000000..e23e7dcfb --- /dev/null +++ b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb @@ -0,0 +1,44 @@ +KBRANCH ?= "v5.8/standard/preempt-rt/base" + +require recipes-kernel/linux/linux-yocto.inc + +# Skip processing of this recipe if it is not explicitly specified as the +# PREFERRED_PROVIDER for virtual/kernel. This avoids errors when trying +# to build multiple virtual/kernel providers, e.g. as dependency of +# core-image-rt-sdk, core-image-rt. +python () { + if d.getVar("KERNEL_PACKAGE_NAME") == "kernel" and d.getVar("PREFERRED_PROVIDER_virtual/kernel") != "linux-yocto-rt": + raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") +} + +SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" + +SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ + git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.8;destsuffix=${KMETA}" + +LINUX_VERSION ?= "5.8.1" + +LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" + +DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" +DEPENDS += "openssl-native util-linux-native" + +PV = "${LINUX_VERSION}+git${SRCPV}" + +KMETA = "kernel-meta" +KCONF_BSP_AUDIT_LEVEL = "2" + +LINUX_KERNEL_TYPE = "preempt-rt" + +COMPATIBLE_MACHINE = "(qemux86|qemux86-64|qemuarm|qemuarmv5|qemuarm64|qemuppc|qemumips)" + +KERNEL_DEVICETREE_qemuarmv5 = "versatile-pb.dtb" + +# Functionality flags +KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc features/taskstats/taskstats.scc" +KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}" +KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc features/drm-bochs/drm-bochs.scc" +KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc" +KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc" +KERNEL_FEATURES_append = "${@bb.utils.contains("DISTRO_FEATURES", "ptest", " features/scsi/scsi-debug.scc", "" ,d)}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb index 86e133f9f..b90b1259a 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb @@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "5.4.51" +LINUX_VERSION ?= "5.4.58" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" @@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine_qemuarm ?= "d4c9ad88abadd22f7b2785e8a101523fe9a74dc0" -SRCREV_machine ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_meta ?= "caafbdfe382bf22a4786d871af097acd49d0867a" +SRCREV_machine_qemuarm ?= "d192ae0b9995a7be2a33b12005a95348ec6aae94" +SRCREV_machine ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb new file mode 100644 index 000000000..36a8ae457 --- /dev/null +++ b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb @@ -0,0 +1,32 @@ +KBRANCH ?= "v5.8/standard/tiny/base" +KBRANCH_qemuarm ?= "v5.8/standard/tiny/arm-versatile-926ejs" + +LINUX_KERNEL_TYPE = "tiny" +KCONFIG_MODE = "--allnoconfig" + +require recipes-kernel/linux/linux-yocto.inc + +LINUX_VERSION ?= "5.8.1" +LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" + +DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" +DEPENDS += "openssl-native util-linux-native" + +KMETA = "kernel-meta" +KCONF_BSP_AUDIT_LEVEL = "2" + +SRCREV_machine_qemuarm ?= "566e869df9400258b6f162bf34933f5b6dcd0115" +SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" + +PV = "${LINUX_VERSION}+git${SRCPV}" + +SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ + git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.8;destsuffix=${KMETA}" + +COMPATIBLE_MACHINE = "qemux86|qemux86-64|qemuarm|qemuarmv5" + +# Functionality flags +KERNEL_FEATURES = "" + +KERNEL_DEVICETREE_qemuarmv5 = "versatile-pb.dtb" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb index e79793b5f..f85e37d2c 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb @@ -12,16 +12,16 @@ KBRANCH_qemux86 ?= "v5.4/standard/base" KBRANCH_qemux86-64 ?= "v5.4/standard/base" KBRANCH_qemumips64 ?= "v5.4/standard/mti-malta64" -SRCREV_machine_qemuarm ?= "601e67d37274e4a0890bcdbe6660c2dbd08d3b97" -SRCREV_machine_qemuarm64 ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_machine_qemumips ?= "c8543a84037b88da45d0d825216187b42d0c509a" -SRCREV_machine_qemuppc ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_machine_qemuriscv64 ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_machine_qemux86 ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_machine_qemux86-64 ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_machine_qemumips64 ?= "c741fec6daabb449d08c9f96052be1477fe3c968" -SRCREV_machine ?= "fed60f1c8e56095647fa8497270ecacea4c45dbc" -SRCREV_meta ?= "caafbdfe382bf22a4786d871af097acd49d0867a" +SRCREV_machine_qemuarm ?= "7bbd138602fda3d69d74674460e73bffdec73cd2" +SRCREV_machine_qemuarm64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_qemumips ?= "e43ed1586cd85a007b0fae3c63d6980d4f5cb336" +SRCREV_machine_qemuppc ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_qemuriscv64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_qemux86 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_qemux86-64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_qemumips64 ?= "d1ff96887c64f70de00add62eb91d4c36f1b181a" +SRCREV_machine ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" # remap qemuarm to qemuarma15 for the 5.4 kernel # KMACHINE_qemuarm ?= "qemuarma15" @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" -LINUX_VERSION ?= "5.4.51" +LINUX_VERSION ?= "5.4.58" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" DEPENDS += "openssl-native util-linux-native" @@ -38,7 +38,7 @@ DEPENDS += "openssl-native util-linux-native" PV = "${LINUX_VERSION}+git${SRCPV}" KMETA = "kernel-meta" -KCONF_BSP_AUDIT_LEVEL = "2" +KCONF_BSP_AUDIT_LEVEL = "1" KERNEL_DEVICETREE_qemuarmv5 = "versatile-pb.dtb" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb new file mode 100644 index 000000000..aad689590 --- /dev/null +++ b/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb @@ -0,0 +1,55 @@ +KBRANCH ?= "v5.8/standard/base" + +require recipes-kernel/linux/linux-yocto.inc + +# board specific branches +KBRANCH_qemuarm ?= "v5.8/standard/arm-versatile-926ejs" +KBRANCH_qemuarm64 ?= "v5.8/standard/qemuarm64" +KBRANCH_qemumips ?= "v5.8/standard/mti-malta32" +KBRANCH_qemuppc ?= "v5.8/standard/qemuppc" +KBRANCH_qemuriscv64 ?= "v5.8/standard/base" +KBRANCH_qemux86 ?= "v5.8/standard/base" +KBRANCH_qemux86-64 ?= "v5.8/standard/base" +KBRANCH_qemumips64 ?= "v5.8/standard/mti-malta64" + +SRCREV_machine_qemuarm ?= "097417e785af04be0cbe757bc6e24456a3f701fd" +SRCREV_machine_qemuarm64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_machine_qemumips ?= "1fc5490bef8322680d73f6ab2c7b666eccc3bce1" +SRCREV_machine_qemuppc ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_machine_qemuriscv64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_machine_qemux86 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_machine_qemux86-64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_machine_qemumips64 ?= "e61fc06792254eed92c6908a9b35790ed54b0ace" +SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" +SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" + +# remap qemuarm to qemuarma15 for the 5.8 kernel +# KMACHINE_qemuarm ?= "qemuarma15" + +SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRANCH}; \ + git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.8;destsuffix=${KMETA}" + +LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" +LINUX_VERSION ?= "5.8.1" + +DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" +DEPENDS += "openssl-native util-linux-native" +DEPENDS += "gmp-native" + +PV = "${LINUX_VERSION}+git${SRCPV}" + +KMETA = "kernel-meta" +KCONF_BSP_AUDIT_LEVEL = "1" + +KERNEL_DEVICETREE_qemuarmv5 = "versatile-pb.dtb" + +COMPATIBLE_MACHINE = "qemuarm|qemuarmv5|qemuarm64|qemux86|qemuppc|qemumips|qemumips64|qemux86-64|qemuriscv64" + +# Functionality flags +KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc" +KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}" +KERNEL_FEATURES_append_qemuall=" cfg/virtio.scc features/drm-bochs/drm-bochs.scc" +KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc" +KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc" +KERNEL_FEATURES_append = " ${@bb.utils.contains("TUNE_FEATURES", "mx32", " cfg/x32.scc", "" ,d)}" +KERNEL_FEATURES_append = " ${@bb.utils.contains("DISTRO_FEATURES", "ptest", " features/scsi/scsi-debug.scc", "" ,d)}" diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb deleted file mode 100644 index c0df0cab3..000000000 --- a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.1.bb +++ /dev/null @@ -1,42 +0,0 @@ -SECTION = "devel" -SUMMARY = "Linux Trace Toolkit KERNEL MODULE" -DESCRIPTION = "The lttng-modules 2.0 package contains the kernel tracer modules" -LICENSE = "LGPLv2.1 & GPLv2 & MIT" -LIC_FILES_CHKSUM = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" - -inherit module - -COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux' - -SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \ - file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ - file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ - " - -SRC_URI[sha256sum] = "639c03bf8f0e920567f45c96cc7ef749a8fc29ff7fb3fb97263d67f66fb71aef" - -export INSTALL_MOD_DIR="kernel/lttng-modules" - -EXTRA_OEMAKE += "KERNELDIR='${STAGING_KERNEL_DIR}'" - -do_install_append() { - # Delete empty directories to avoid QA failures if no modules were built - find ${D}/${nonarch_base_libdir} -depth -type d -empty -exec rmdir {} \; -} - -python do_package_prepend() { - if not os.path.exists(os.path.join(d.getVar('D'), d.getVar('nonarch_base_libdir')[1:], 'modules')): - bb.warn("%s: no modules were created; this may be due to CONFIG_TRACEPOINTS not being enabled in your kernel." % d.getVar('PN')) -} - -BBCLASSEXTEND = "devupstream:target" -LIC_FILES_CHKSUM_class-devupstream = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" -DEFAULT_PREFERENCE_class-devupstream = "-1" -SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.12 \ - file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ - file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ - " -SRCREV_class-devupstream = "11441f8f17f7825f529e2f6c54d3605771709260" -PV_class-devupstream = "2.12.1+git${SRCPV}" -S_class-devupstream = "${WORKDIR}/git" -SRCREV_FORMAT ?= "lttng_git" diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb new file mode 100644 index 000000000..49b7a116b --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb @@ -0,0 +1,42 @@ +SECTION = "devel" +SUMMARY = "Linux Trace Toolkit KERNEL MODULE" +DESCRIPTION = "The lttng-modules 2.0 package contains the kernel tracer modules" +LICENSE = "LGPLv2.1 & GPLv2 & MIT" +LIC_FILES_CHKSUM = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" + +inherit module + +COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux' + +SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \ + file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ + file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ + " + +SRC_URI[sha256sum] = "df50bc3bd58679705714f17721acf619a8b0cedc694f8a97052aa5099626feca" + +export INSTALL_MOD_DIR="kernel/lttng-modules" + +EXTRA_OEMAKE += "KERNELDIR='${STAGING_KERNEL_DIR}'" + +do_install_append() { + # Delete empty directories to avoid QA failures if no modules were built + find ${D}/${nonarch_base_libdir} -depth -type d -empty -exec rmdir {} \; +} + +python do_package_prepend() { + if not os.path.exists(os.path.join(d.getVar('D'), d.getVar('nonarch_base_libdir')[1:], 'modules')): + bb.warn("%s: no modules were created; this may be due to CONFIG_TRACEPOINTS not being enabled in your kernel." % d.getVar('PN')) +} + +BBCLASSEXTEND = "devupstream:target" +LIC_FILES_CHKSUM_class-devupstream = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" +DEFAULT_PREFERENCE_class-devupstream = "-1" +SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.12 \ + file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ + file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ + " +SRCREV_class-devupstream = "11441f8f17f7825f529e2f6c54d3605771709260" +PV_class-devupstream = "2.12.1+git${SRCPV}" +S_class-devupstream = "${WORKDIR}/git" +SRCREV_FORMAT ?= "lttng_git" diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools/0001-tests-gen-ust-events-ns-tp.h-Fix-build-with-musl-lib.patch b/poky/meta/recipes-kernel/lttng/lttng-tools/0001-tests-gen-ust-events-ns-tp.h-Fix-build-with-musl-lib.patch deleted file mode 100644 index a150d648a..000000000 --- a/poky/meta/recipes-kernel/lttng/lttng-tools/0001-tests-gen-ust-events-ns-tp.h-Fix-build-with-musl-lib.patch +++ /dev/null @@ -1,43 +0,0 @@ -From e5d94cf4882cc6516af52b794c6acb8e4d6469a3 Mon Sep 17 00:00:00 2001 -From: Ovidiu Panait -Date: Mon, 18 May 2020 16:39:26 +0300 -Subject: [PATCH] tests: gen-ust-events-ns/tp.h: Fix build with musl libc - -Fix the following build error with musl libc: -In file included from ../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/tp.h:14, - from ../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/tp.c:10: -../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/tp.h:17:10: error: unknown type name 'ino_t'; did you mean 'int8_t'? - 17 | TP_ARGS(ino_t, ns_ino), - | ^~~~~ -../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/tp.h:17:10: error: unknown type name 'ino_t'; did you mean 'int8_t'? - 17 | TP_ARGS(ino_t, ns_ino), - | ^~~~~ -../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/./tp.h:17:2: error: unknown type name 'ino_t'; did you mean 'int8_t'? - 17 | TP_ARGS(ino_t, ns_ino), - | ^~~~~~~ -../../../../../lttng-tools-2.12.0/tests/utils/testapp/gen-ust-events-ns/./tp.h:17:2: error: unknown type name 'ino_t'; did you mean 'int8_t'? - 17 | TP_ARGS(ino_t, ns_ino), - | ^~~~~~~ - -Upstream-Status: Submitted [https://github.com/lttng/lttng-tools/pull/161] - -Signed-off-by: Ovidiu Panait ---- - tests/utils/testapp/gen-ust-events-ns/tp.h | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/tests/utils/testapp/gen-ust-events-ns/tp.h b/tests/utils/testapp/gen-ust-events-ns/tp.h -index 4dbfed5..e0ddb29 100644 ---- a/tests/utils/testapp/gen-ust-events-ns/tp.h -+++ b/tests/utils/testapp/gen-ust-events-ns/tp.h -@@ -11,6 +11,7 @@ - #if !defined(_TRACEPOINT_TP_H) || defined(TRACEPOINT_HEADER_MULTI_READ) - #define _TRACEPOINT_TP_H - -+#include - #include - - TRACEPOINT_EVENT(tp, tptest, --- -2.17.1 - diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.1.bb b/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.1.bb deleted file mode 100644 index 094e33db7..000000000 --- a/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.1.bb +++ /dev/null @@ -1,165 +0,0 @@ -SECTION = "devel" -SUMMARY = "Linux Trace Toolkit Control" -DESCRIPTION = "The Linux trace toolkit is a suite of tools designed \ -to extract program execution details from the Linux operating system \ -and interpret them." - -LICENSE = "GPLv2 & LGPLv2.1" -LIC_FILES_CHKSUM = "file://LICENSE;md5=40ef17463fbd6f377db3c47b1cbaded8 \ - file://LICENSES/GPL-2.0;md5=e68f69a54b44ba526ad7cb963e18fbce \ - file://LICENSES/LGPL-2.1;md5=9920968d0f2ff585ce61fae30344dd95" - -DEPENDS = "liburcu popt libxml2 util-linux" -RDEPENDS_${PN} = "libgcc" -RDEPENDS_${PN}-ptest += "make perl bash gawk babeltrace procps perl-module-overloading coreutils util-linux kmod lttng-modules sed python3-core" -RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" -RDEPENDS_${PN}-ptest_append_libc-musl = " musl-utils" -# babelstats.pl wants getopt-long -RDEPENDS_${PN}-ptest += "perl-module-getopt-long" - -PYTHON_OPTION = "am_cv_python_pyexecdir='${PYTHON_SITEPACKAGES_DIR}' \ - am_cv_python_pythondir='${PYTHON_SITEPACKAGES_DIR}' \ - PYTHON_INCLUDE='-I${STAGING_INCDIR}/python${PYTHON_BASEVERSION}${PYTHON_ABI}' \ -" -PACKAGECONFIG ??= "lttng-ust" -PACKAGECONFIG[python] = "--enable-python-bindings ${PYTHON_OPTION},,python3 swig-native" -PACKAGECONFIG[lttng-ust] = "--with-lttng-ust, --without-lttng-ust, lttng-ust" -PACKAGECONFIG[kmod] = "--with-kmod, --without-kmod, kmod" -PACKAGECONFIG[manpages] = "--enable-man-pages, --disable-man-pages, asciidoc-native xmlto-native libxslt-native" -PACKAGECONFIG_remove_arc = "lttng-ust" - -SRC_URI = "https://lttng.org/files/lttng-tools/lttng-tools-${PV}.tar.bz2 \ - file://0001-tests-do-not-strip-a-helper-library.patch \ - file://run-ptest \ - file://lttng-sessiond.service \ - file://0001-tests-regression-disable-the-tools-live-tests.patch \ - file://0001-tests-gen-ust-events-ns-tp.h-Fix-build-with-musl-lib.patch \ - " - -SRC_URI[sha256sum] = "0de7afc1f40a5acbede933cdfd6cf47b32ff84d02e170a1321f7fc86141585b8" - -inherit autotools ptest pkgconfig useradd python3-dir manpages systemd - -SYSTEMD_SERVICE_${PN} = "lttng-sessiond.service" -SYSTEMD_AUTO_ENABLE = "disable" - -USERADD_PACKAGES = "${PN}" -GROUPADD_PARAM_${PN} = "tracing" - -FILES_${PN} += "${libdir}/lttng/libexec/* ${datadir}/xml/lttng \ - ${PYTHON_SITEPACKAGES_DIR}/*" -FILES_${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a" -FILES_${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la" - -# Since files are installed into ${libdir}/lttng/libexec we match -# the libexec insane test so skip it. -# Python module needs to keep _lttng.so -INSANE_SKIP_${PN} = "libexec dev-so" -INSANE_SKIP_${PN}-dbg = "libexec" - -PRIVATE_LIBS_${PN}-ptest = "libfoo.so" - -do_install_append () { - # install systemd unit file - install -d ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/lttng-sessiond.service ${D}${systemd_unitdir}/system -} - -do_install_ptest () { - for f in Makefile tests/Makefile tests/utils/utils.sh tests/regression/tools/save-load/load-42*.lttng tests/regression/tools/save-load/configuration/load-42*.lttng tests/regression/tools/health/test_health.sh tests/regression/tools/metadata/utils.sh tests/regression/tools/rotation/rotate_utils.sh; do - install -D "${B}/$f" "${D}${PTEST_PATH}/$f" - done - - for f in config/tap-driver.sh config/test-driver src/common/config/session.xsd src/common/mi-lttng-4.0.xsd; do - install -D "${S}/$f" "${D}${PTEST_PATH}/$f" - done - - # Prevent 'make check' from recursing into non-test subdirectories. - sed -i -e 's!^SUBDIRS = .*!SUBDIRS = tests!' "${D}${PTEST_PATH}/Makefile" - - # We don't need these - sed -i -e '/dist_noinst_SCRIPTS = /,/^$/d' "${D}${PTEST_PATH}/tests/Makefile" - - # We shouldn't need to build anything in tests/utils - sed -i -e 's!am__append_1 = . utils!am__append_1 = . !' \ - "${D}${PTEST_PATH}/tests/Makefile" - - # Copy the tests directory tree and the executables and - # Makefiles found within. - for d in $(find "${B}/tests" -type d -not -name .libs -printf '%P ') ; do - install -d "${D}${PTEST_PATH}/tests/$d" - find "${B}/tests/$d" -maxdepth 1 -executable -type f \ - -exec install -t "${D}${PTEST_PATH}/tests/$d" {} + - # Take all .py scripts for tests using the python bindings. - find "${B}/tests/$d" -maxdepth 1 -type f -name "*.py" \ - -exec install -t "${D}${PTEST_PATH}/tests/$d" {} + - test -r "${B}/tests/$d/Makefile" && \ - install -t "${D}${PTEST_PATH}/tests/$d" "${B}/tests/$d/Makefile" - done - - for d in $(find "${B}/tests" -type d -name .libs -printf '%P ') ; do - for f in $(find "${B}/tests/$d" -maxdepth 1 -executable -type f -printf '%P ') ; do - cp ${B}/tests/$d/$f ${D}${PTEST_PATH}/tests/`dirname $d`/$f - case $f in - *.so) - install -d ${D}${PTEST_PATH}/tests/$d/ - ln -s ../$f ${D}${PTEST_PATH}/tests/$d/$f - # Remove any rpath/runpath to pass QA check. - chrpath --delete ${D}${PTEST_PATH}/tests/$d/$f - ;; - esac - done - done - - chrpath --delete ${D}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-binary/userspace-probe-elf-binary - - # - # Use the versioned libs of liblttng-ust-dl. - # - ustdl="${D}${PTEST_PATH}/tests/regression/ust/ust-dl/test_ust-dl.py" - if [ -e $ustdl ]; then - sed -i -e 's!:liblttng-ust-dl.so!:liblttng-ust-dl.so.0!' $ustdl - fi - - install ${B}/tests/unit/ini_config/sample.ini ${D}${PTEST_PATH}/tests/unit/ini_config/ - - # We shouldn't need to build anything in tests/regression/tools - sed -i -e 's!^SUBDIRS = tools !SUBDIRS = !' \ - "${D}${PTEST_PATH}/tests/regression/Makefile" - - # Prevent attempts to update Makefiles during test runs, and - # silence "Making check in $SUBDIR" messages. - find "${D}${PTEST_PATH}" -name Makefile -type f -exec \ - sed -i -e '/Makefile:/,/^$/d' -e '/%: %.in/,/^$/d' \ - -e '/echo "Making $$target in $$subdir"; \\/d' \ - -e 's/^srcdir = \(.*\)/srcdir = ./' \ - -e 's/^builddir = \(.*\)/builddir = ./' \ - -e 's/^all-am:.*/all-am:/' \ - {} + - - find "${D}${PTEST_PATH}" -name Makefile -type f -exec \ - touch -r "${B}/Makefile" {} + - - # - # Need to stop generated binaries from rebuilding by removing their source dependencies - # - sed -e 's#\(^test.*OBJECTS.=\)#disable\1#g' \ - -e 's#\(^test.*DEPENDENCIES.=\)#disable\1#g' \ - -e 's#\(^test.*SOURCES.=\)#disable\1#g' \ - -e 's#\(^test.*LDADD.=\)#disable\1#g' \ - -i ${D}${PTEST_PATH}/tests/unit/Makefile - - # Substitute links to installed binaries. - for prog in lttng lttng-relayd lttng-sessiond lttng-consumerd lttng-crash; do - exedir="${D}${PTEST_PATH}/src/bin/${prog}" - install -d "$exedir" - case "$prog" in - lttng-consumerd) - ln -s "${libdir}/lttng/libexec/$prog" "$exedir" - ;; - *) - ln -s "${bindir}/$prog" "$exedir" - ;; - esac - done -} diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb b/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb new file mode 100644 index 000000000..e9c8e18e2 --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb @@ -0,0 +1,164 @@ +SECTION = "devel" +SUMMARY = "Linux Trace Toolkit Control" +DESCRIPTION = "The Linux trace toolkit is a suite of tools designed \ +to extract program execution details from the Linux operating system \ +and interpret them." + +LICENSE = "GPLv2 & LGPLv2.1" +LIC_FILES_CHKSUM = "file://LICENSE;md5=40ef17463fbd6f377db3c47b1cbaded8 \ + file://LICENSES/GPL-2.0;md5=e68f69a54b44ba526ad7cb963e18fbce \ + file://LICENSES/LGPL-2.1;md5=9920968d0f2ff585ce61fae30344dd95" + +DEPENDS = "liburcu popt libxml2 util-linux" +RDEPENDS_${PN} = "libgcc" +RDEPENDS_${PN}-ptest += "make perl bash gawk babeltrace procps perl-module-overloading coreutils util-linux kmod lttng-modules sed python3-core" +RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" +RDEPENDS_${PN}-ptest_append_libc-musl = " musl-utils" +# babelstats.pl wants getopt-long +RDEPENDS_${PN}-ptest += "perl-module-getopt-long" + +PYTHON_OPTION = "am_cv_python_pyexecdir='${PYTHON_SITEPACKAGES_DIR}' \ + am_cv_python_pythondir='${PYTHON_SITEPACKAGES_DIR}' \ + PYTHON_INCLUDE='-I${STAGING_INCDIR}/python${PYTHON_BASEVERSION}${PYTHON_ABI}' \ +" +PACKAGECONFIG ??= "lttng-ust" +PACKAGECONFIG[python] = "--enable-python-bindings ${PYTHON_OPTION},,python3 swig-native" +PACKAGECONFIG[lttng-ust] = "--with-lttng-ust, --without-lttng-ust, lttng-ust" +PACKAGECONFIG[kmod] = "--with-kmod, --without-kmod, kmod" +PACKAGECONFIG[manpages] = "--enable-man-pages, --disable-man-pages, asciidoc-native xmlto-native libxslt-native" +PACKAGECONFIG_remove_arc = "lttng-ust" + +SRC_URI = "https://lttng.org/files/lttng-tools/lttng-tools-${PV}.tar.bz2 \ + file://0001-tests-do-not-strip-a-helper-library.patch \ + file://run-ptest \ + file://lttng-sessiond.service \ + file://0001-tests-regression-disable-the-tools-live-tests.patch \ + " + +SRC_URI[sha256sum] = "9ed9161795ff023b076f9f95afaa4f1f822ec42495c0fa04c586ab8fa74e84f1" + +inherit autotools ptest pkgconfig useradd python3-dir manpages systemd + +SYSTEMD_SERVICE_${PN} = "lttng-sessiond.service" +SYSTEMD_AUTO_ENABLE = "disable" + +USERADD_PACKAGES = "${PN}" +GROUPADD_PARAM_${PN} = "tracing" + +FILES_${PN} += "${libdir}/lttng/libexec/* ${datadir}/xml/lttng \ + ${PYTHON_SITEPACKAGES_DIR}/*" +FILES_${PN}-staticdev += "${PYTHON_SITEPACKAGES_DIR}/*.a" +FILES_${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/*.la" + +# Since files are installed into ${libdir}/lttng/libexec we match +# the libexec insane test so skip it. +# Python module needs to keep _lttng.so +INSANE_SKIP_${PN} = "libexec dev-so" +INSANE_SKIP_${PN}-dbg = "libexec" + +PRIVATE_LIBS_${PN}-ptest = "libfoo.so" + +do_install_append () { + # install systemd unit file + install -d ${D}${systemd_unitdir}/system + install -m 0644 ${WORKDIR}/lttng-sessiond.service ${D}${systemd_unitdir}/system +} + +do_install_ptest () { + for f in Makefile tests/Makefile tests/utils/utils.sh tests/regression/tools/save-load/load-42*.lttng tests/regression/tools/save-load/configuration/load-42*.lttng tests/regression/tools/health/test_health.sh tests/regression/tools/metadata/utils.sh tests/regression/tools/rotation/rotate_utils.sh; do + install -D "${B}/$f" "${D}${PTEST_PATH}/$f" + done + + for f in config/tap-driver.sh config/test-driver src/common/config/session.xsd src/common/mi-lttng-4.0.xsd; do + install -D "${S}/$f" "${D}${PTEST_PATH}/$f" + done + + # Prevent 'make check' from recursing into non-test subdirectories. + sed -i -e 's!^SUBDIRS = .*!SUBDIRS = tests!' "${D}${PTEST_PATH}/Makefile" + + # We don't need these + sed -i -e '/dist_noinst_SCRIPTS = /,/^$/d' "${D}${PTEST_PATH}/tests/Makefile" + + # We shouldn't need to build anything in tests/utils + sed -i -e 's!am__append_1 = . utils!am__append_1 = . !' \ + "${D}${PTEST_PATH}/tests/Makefile" + + # Copy the tests directory tree and the executables and + # Makefiles found within. + for d in $(find "${B}/tests" -type d -not -name .libs -printf '%P ') ; do + install -d "${D}${PTEST_PATH}/tests/$d" + find "${B}/tests/$d" -maxdepth 1 -executable -type f \ + -exec install -t "${D}${PTEST_PATH}/tests/$d" {} + + # Take all .py scripts for tests using the python bindings. + find "${B}/tests/$d" -maxdepth 1 -type f -name "*.py" \ + -exec install -t "${D}${PTEST_PATH}/tests/$d" {} + + test -r "${B}/tests/$d/Makefile" && \ + install -t "${D}${PTEST_PATH}/tests/$d" "${B}/tests/$d/Makefile" + done + + for d in $(find "${B}/tests" -type d -name .libs -printf '%P ') ; do + for f in $(find "${B}/tests/$d" -maxdepth 1 -executable -type f -printf '%P ') ; do + cp ${B}/tests/$d/$f ${D}${PTEST_PATH}/tests/`dirname $d`/$f + case $f in + *.so) + install -d ${D}${PTEST_PATH}/tests/$d/ + ln -s ../$f ${D}${PTEST_PATH}/tests/$d/$f + # Remove any rpath/runpath to pass QA check. + chrpath --delete ${D}${PTEST_PATH}/tests/$d/$f + ;; + esac + done + done + + chrpath --delete ${D}${PTEST_PATH}/tests/utils/testapp/userspace-probe-elf-binary/userspace-probe-elf-binary + + # + # Use the versioned libs of liblttng-ust-dl. + # + ustdl="${D}${PTEST_PATH}/tests/regression/ust/ust-dl/test_ust-dl.py" + if [ -e $ustdl ]; then + sed -i -e 's!:liblttng-ust-dl.so!:liblttng-ust-dl.so.0!' $ustdl + fi + + install ${B}/tests/unit/ini_config/sample.ini ${D}${PTEST_PATH}/tests/unit/ini_config/ + + # We shouldn't need to build anything in tests/regression/tools + sed -i -e 's!^SUBDIRS = tools !SUBDIRS = !' \ + "${D}${PTEST_PATH}/tests/regression/Makefile" + + # Prevent attempts to update Makefiles during test runs, and + # silence "Making check in $SUBDIR" messages. + find "${D}${PTEST_PATH}" -name Makefile -type f -exec \ + sed -i -e '/Makefile:/,/^$/d' -e '/%: %.in/,/^$/d' \ + -e '/echo "Making $$target in $$subdir"; \\/d' \ + -e 's/^srcdir = \(.*\)/srcdir = ./' \ + -e 's/^builddir = \(.*\)/builddir = ./' \ + -e 's/^all-am:.*/all-am:/' \ + {} + + + find "${D}${PTEST_PATH}" -name Makefile -type f -exec \ + touch -r "${B}/Makefile" {} + + + # + # Need to stop generated binaries from rebuilding by removing their source dependencies + # + sed -e 's#\(^test.*OBJECTS.=\)#disable\1#g' \ + -e 's#\(^test.*DEPENDENCIES.=\)#disable\1#g' \ + -e 's#\(^test.*SOURCES.=\)#disable\1#g' \ + -e 's#\(^test.*LDADD.=\)#disable\1#g' \ + -i ${D}${PTEST_PATH}/tests/unit/Makefile + + # Substitute links to installed binaries. + for prog in lttng lttng-relayd lttng-sessiond lttng-consumerd lttng-crash; do + exedir="${D}${PTEST_PATH}/src/bin/${prog}" + install -d "$exedir" + case "$prog" in + lttng-consumerd) + ln -s "${libdir}/lttng/libexec/$prog" "$exedir" + ;; + *) + ln -s "${bindir}/$prog" "$exedir" + ;; + esac + done +} diff --git a/poky/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb b/poky/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb index c7edb20ee..1ab248fed 100644 --- a/poky/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb +++ b/poky/meta/recipes-kernel/make-mod-scripts/make-mod-scripts_1.0.bb @@ -15,8 +15,10 @@ do_compile[depends] += "virtual/kernel:do_compile_kernelmodules" RDEPENDS_${PN}-dev = "" DEPENDS += "bc-native bison-native" +DEPENDS += "gmp-native" EXTRA_OEMAKE = " HOSTCC="${BUILD_CC} ${BUILD_CFLAGS} ${BUILD_LDFLAGS}" HOSTCPP="${BUILD_CPP}"" +EXTRA_OEMAKE += " HOSTCXX="${BUILD_CXX} ${BUILD_CXXFLAGS} ${BUILD_LDFLAGS}"" # Build some host tools under work-shared. CC, LD, and AR are probably # not used, but this is the historical way of invoking "make scripts". diff --git a/poky/meta/recipes-kernel/perf/perf.bb b/poky/meta/recipes-kernel/perf/perf.bb index e5bc7dc1b..5faf22a26 100644 --- a/poky/meta/recipes-kernel/perf/perf.bb +++ b/poky/meta/recipes-kernel/perf/perf.bb @@ -70,6 +70,7 @@ SPDX_S = "${S}/tools/perf" LDFLAGS="-ldl -lutil" EXTRA_OEMAKE = '\ + V=1 \ -C ${S}/tools/perf \ O=${B} \ CROSS_COMPILE=${TARGET_PREFIX} \ @@ -201,6 +202,9 @@ do_configure_prepend () { ${S}/tools/perf/Makefile.perf sed -i -e "s,prefix='\$(DESTDIR_SQ)/usr'$,prefix='\$(DESTDIR_SQ)/usr' --install-lib='\$(DESTDIR)\$(PYTHON_SITEPACKAGES_DIR)',g" \ ${S}/tools/perf/Makefile.perf + # backport https://github.com/torvalds/linux/commit/e4ffd066ff440a57097e9140fa9e16ceef905de8 + sed -i -e 's,\($(Q)$(SHELL) .$(arch_errno_tbl).\) $(CC) $(arch_errno_hdr_dir),\1 $(firstword $(CC)) $(arch_errno_hdr_dir),g' \ + ${S}/tools/perf/Makefile.perf fi sed -i -e "s,--root='/\$(DESTDIR_SQ)',--prefix='\$(DESTDIR_SQ)/usr' --install-lib='\$(DESTDIR)\$(PYTHON_SITEPACKAGES_DIR)',g" \ ${S}/tools/perf/Makefile* diff --git a/poky/meta/recipes-kernel/systemtap/systemtap_git.inc b/poky/meta/recipes-kernel/systemtap/systemtap_git.inc index 4ec0703f2..56fa51d61 100644 --- a/poky/meta/recipes-kernel/systemtap/systemtap_git.inc +++ b/poky/meta/recipes-kernel/systemtap/systemtap_git.inc @@ -1,6 +1,6 @@ LICENSE = "GPLv2" LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" -SRCREV = "c9c23c987d819d07c6b96b54f8e03188fecd9e46" +SRCREV = "82b8e1a07a31bf37ed05d6ebc5162b054c0be9fd" PV = "4.3" SRC_URI = "git://sourceware.org/git/systemtap.git \ diff --git a/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb index fd949be8a..3575c460e 100644 --- a/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb +++ b/poky/meta/recipes-multimedia/alsa/alsa-topology-conf_1.2.3.bb @@ -10,8 +10,8 @@ SRC_URI[sha256sum] = "833f99b2cbda34e0cfef867ef1d2e6a74fe276bb7fc525a573be32077f inherit allarch do_install() { - install -d ${D}/usr/share/alsa - cp -r ${S}/topology ${D}/usr/share/alsa + install -d "${D}${datadir}/alsa" + cp -r "${S}/topology" "${D}${datadir}/alsa" } PACKAGES = "${PN}" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb b/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb index 19eeabff7..7be0df885 100644 --- a/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb +++ b/poky/meta/recipes-multimedia/alsa/alsa-ucm-conf_1.2.3.bb @@ -10,9 +10,9 @@ SRC_URI[sha256sum] = "1bc24da04bb27a75e323c9f0fb03e44705b6bb8a8baf255b94b41d457d inherit allarch do_install() { - install -d ${D}/usr/share/alsa - cp -r ${S}/ucm ${D}/usr/share/alsa - cp -r ${S}/ucm2 ${D}/usr/share/alsa + install -d "${D}${datadir}/alsa" + cp -r "${S}/ucm" "${D}${datadir}/alsa" + cp -r "${S}/ucm2" "${D}${datadir}/alsa" } PACKAGES = "${PN}" diff --git a/poky/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb b/poky/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb index 271c2a30a..2061c280e 100644 --- a/poky/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb +++ b/poky/meta/recipes-multimedia/libomxil/libomxil_0.9.3.bb @@ -26,6 +26,8 @@ EXTRA_OECONF += "--disable-doc --disable-Werror" PROVIDES += "virtual/libomxil" +CFLAGS += "-fcommon" + # # The .so files under ${libdir}/bellagio are not intended to be versioned and symlinked. # Make sure they get packaged in the main package. diff --git a/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.2.bb b/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.2.bb deleted file mode 100644 index aaa66e17a..000000000 --- a/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.2.bb +++ /dev/null @@ -1,51 +0,0 @@ -SUMMARY = "Audio decoder for MPEG-1 Layer 1/2/3" -DESCRIPTION = "The core of mpg123 is an MPEG-1 Layer 1/2/3 decoding library, which can be used by other programs. \ -mpg123 also comes with a command-line tool which can playback using ALSA, PulseAudio, OSS, and several other APIs, \ -and also can write the decoded audio to WAV." -HOMEPAGE = "http://mpg123.de/" -BUGTRACKER = "http://sourceforge.net/p/mpg123/bugs/" -SECTION = "multimedia" - -LICENSE = "LGPLv2.1" -LIC_FILES_CHKSUM = "file://COPYING;md5=1e86753638d3cf2512528b99079bc4f3" - -SRC_URI = "https://www.mpg123.de/download/${BP}.tar.bz2" -SRC_URI[sha256sum] = "00f7bf7ea64fcec2c9d07751d6ad8849343ee09c282ea3b0d5dd486e886e2ff3" - -UPSTREAM_CHECK_REGEX = "mpg123-(?P\d+(\.\d+)+)\.tar" - -inherit autotools pkgconfig - -# The options should be mutually exclusive for configuration script. -# If both alsa and pulseaudio are specified (as in the default distro features) -# pulseaudio takes precedence. -PACKAGECONFIG_ALSA = "${@bb.utils.filter('DISTRO_FEATURES', 'alsa', d)}" -PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'pulseaudio', 'pulseaudio', '${PACKAGECONFIG_ALSA}', d)}" - -PACKAGECONFIG[alsa] = "--with-default-audio=alsa,,alsa-lib" -PACKAGECONFIG[esd] = ",,esound" -PACKAGECONFIG[jack] = ",,jack" -PACKAGECONFIG[openal] = ",,openal-soft" -PACKAGECONFIG[portaudio] = ",,portaudio-v19" -PACKAGECONFIG[pulseaudio] = "--with-default-audio=pulse,,pulseaudio" -PACKAGECONFIG[sdl] = ",,libsdl2" - -# Following are possible sound output modules: -# alsa arts coreaudio dummy esd jack nas openal os2 oss portaudio pulse sdl sndio sun tinyalsa win32 win32_wasapi -AUDIOMODS += "${@bb.utils.filter('PACKAGECONFIG', 'alsa esd jack openal portaudio sdl', d)}" -AUDIOMODS += "${@bb.utils.contains('PACKAGECONFIG', 'pulseaudio', 'pulse', '', d)}" - -EXTRA_OECONF = " \ - --enable-shared \ - --with-audio='${AUDIOMODS}' \ - ${@bb.utils.contains('TUNE_FEATURES', 'neon', '--with-cpu=neon', '', d)} \ - ${@bb.utils.contains('TUNE_FEATURES', 'altivec', '--with-cpu=altivec', '', d)} \ -" -# Fails to build with thumb-1 (qemuarm) -#| {standard input}: Assembler messages: -#| {standard input}:47: Error: selected processor does not support Thumb mode `smull r5,r6,r7,r4' -#| {standard input}:48: Error: shifts in CMP/MOV instructions are only supported in unified syntax -- `mov r5,r5,lsr#24' -#... -#| make[3]: *** [equalizer.lo] Error 1 -ARM_INSTRUCTION_SET_armv4 = "arm" -ARM_INSTRUCTION_SET_armv5 = "arm" diff --git a/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.3.bb b/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.3.bb new file mode 100644 index 000000000..0746d8d47 --- /dev/null +++ b/poky/meta/recipes-multimedia/mpg123/mpg123_1.26.3.bb @@ -0,0 +1,51 @@ +SUMMARY = "Audio decoder for MPEG-1 Layer 1/2/3" +DESCRIPTION = "The core of mpg123 is an MPEG-1 Layer 1/2/3 decoding library, which can be used by other programs. \ +mpg123 also comes with a command-line tool which can playback using ALSA, PulseAudio, OSS, and several other APIs, \ +and also can write the decoded audio to WAV." +HOMEPAGE = "http://mpg123.de/" +BUGTRACKER = "http://sourceforge.net/p/mpg123/bugs/" +SECTION = "multimedia" + +LICENSE = "LGPLv2.1" +LIC_FILES_CHKSUM = "file://COPYING;md5=1e86753638d3cf2512528b99079bc4f3" + +SRC_URI = "https://www.mpg123.de/download/${BP}.tar.bz2" +SRC_URI[sha256sum] = "30c998785a898f2846deefc4d17d6e4683a5a550b7eacf6ea506e30a7a736c6e" + +UPSTREAM_CHECK_REGEX = "mpg123-(?P\d+(\.\d+)+)\.tar" + +inherit autotools pkgconfig + +# The options should be mutually exclusive for configuration script. +# If both alsa and pulseaudio are specified (as in the default distro features) +# pulseaudio takes precedence. +PACKAGECONFIG_ALSA = "${@bb.utils.filter('DISTRO_FEATURES', 'alsa', d)}" +PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'pulseaudio', 'pulseaudio', '${PACKAGECONFIG_ALSA}', d)}" + +PACKAGECONFIG[alsa] = "--with-default-audio=alsa,,alsa-lib" +PACKAGECONFIG[esd] = ",,esound" +PACKAGECONFIG[jack] = ",,jack" +PACKAGECONFIG[openal] = ",,openal-soft" +PACKAGECONFIG[portaudio] = ",,portaudio-v19" +PACKAGECONFIG[pulseaudio] = "--with-default-audio=pulse,,pulseaudio" +PACKAGECONFIG[sdl] = ",,libsdl2" + +# Following are possible sound output modules: +# alsa arts coreaudio dummy esd jack nas openal os2 oss portaudio pulse sdl sndio sun tinyalsa win32 win32_wasapi +AUDIOMODS += "${@bb.utils.filter('PACKAGECONFIG', 'alsa esd jack openal portaudio sdl', d)}" +AUDIOMODS += "${@bb.utils.contains('PACKAGECONFIG', 'pulseaudio', 'pulse', '', d)}" + +EXTRA_OECONF = " \ + --enable-shared \ + --with-audio='${AUDIOMODS}' \ + ${@bb.utils.contains('TUNE_FEATURES', 'neon', '--with-cpu=neon', '', d)} \ + ${@bb.utils.contains('TUNE_FEATURES', 'altivec', '--with-cpu=altivec', '', d)} \ +" +# Fails to build with thumb-1 (qemuarm) +#| {standard input}: Assembler messages: +#| {standard input}:47: Error: selected processor does not support Thumb mode `smull r5,r6,r7,r4' +#| {standard input}:48: Error: shifts in CMP/MOV instructions are only supported in unified syntax -- `mov r5,r5,lsr#24' +#... +#| make[3]: *** [equalizer.lo] Error 1 +ARM_INSTRUCTION_SET_armv4 = "arm" +ARM_INSTRUCTION_SET_armv5 = "arm" diff --git a/poky/meta/recipes-multimedia/x264/x264_git.bb b/poky/meta/recipes-multimedia/x264/x264_git.bb index 1ff5348b9..f226fec3c 100644 --- a/poky/meta/recipes-multimedia/x264/x264_git.bb +++ b/poky/meta/recipes-multimedia/x264/x264_git.bb @@ -14,9 +14,9 @@ SRC_URI = "git://github.com/mirror/x264;branch=stable \ " UPSTREAM_CHECK_COMMITS = "1" -SRCREV = "296494a4011f58f32adc54304a2654627558c59a" +SRCREV = "cde9a93319bea766a92e306d69059c76de970190" -PV = "r2991+git${SRCPV}" +PV = "r3011+git${SRCPV}" S = "${WORKDIR}/git" diff --git a/poky/meta/recipes-sato/webkit/libwpe_1.6.0.bb b/poky/meta/recipes-sato/webkit/libwpe_1.6.0.bb deleted file mode 100644 index 09c74089c..000000000 --- a/poky/meta/recipes-sato/webkit/libwpe_1.6.0.bb +++ /dev/null @@ -1,18 +0,0 @@ -SUMMARY = "General-purpose library specifically developed for the WPE-flavored port of WebKit." -HOMEPAGE = "https://github.com/WebPlatformForEmbedded/libwpe" -BUGTRACKER = "https://github.com/WebPlatformForEmbedded/libwpe/issues" - -LICENSE = "BSD" -LIC_FILES_CHKSUM = "file://COPYING;md5=371a616eb4903c6cb79e9893a5f615cc" -DEPENDS = "virtual/egl libxkbcommon" - -# Workaround build issue with RPi userland EGL libraries. -CFLAGS_append_rpi = " ${@bb.utils.contains('MACHINE_FEATURES', 'vc4graphics', '', '-D_GNU_SOURCE', d)}" - -inherit cmake features_check - -REQUIRED_DISTRO_FEATURES = "opengl" - -SRC_URI[md5sum] = "6e8a2c279dcc3617db5ec7ac4c03d628" -SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "3587c6b8a807f4bb76b268ba74ca82c6b395b90235db41ad8252224456193c90" diff --git a/poky/meta/recipes-sato/webkit/libwpe_1.7.1.bb b/poky/meta/recipes-sato/webkit/libwpe_1.7.1.bb new file mode 100644 index 000000000..e25d9404a --- /dev/null +++ b/poky/meta/recipes-sato/webkit/libwpe_1.7.1.bb @@ -0,0 +1,17 @@ +SUMMARY = "General-purpose library specifically developed for the WPE-flavored port of WebKit." +HOMEPAGE = "https://github.com/WebPlatformForEmbedded/libwpe" +BUGTRACKER = "https://github.com/WebPlatformForEmbedded/libwpe/issues" + +LICENSE = "BSD" +LIC_FILES_CHKSUM = "file://COPYING;md5=371a616eb4903c6cb79e9893a5f615cc" +DEPENDS = "virtual/egl libxkbcommon" + +# Workaround build issue with RPi userland EGL libraries. +CFLAGS_append_rpi = " ${@bb.utils.contains('MACHINE_FEATURES', 'vc4graphics', '', '-D_GNU_SOURCE', d)}" + +inherit cmake features_check + +REQUIRED_DISTRO_FEATURES = "opengl" + +SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "a784b7fa0c658b28071100f6f6749b0d85bbcddd82de028e07672ce13982d340" diff --git a/poky/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch b/poky/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch new file mode 100644 index 000000000..d8bb8efb8 --- /dev/null +++ b/poky/meta/recipes-sato/webkit/webkitgtk/0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch @@ -0,0 +1,66 @@ +From cb929f59b527fe890376e47613dfe1434a320bc0 Mon Sep 17 00:00:00 2001 +From: Khem Raj +Date: Tue, 11 Aug 2020 15:44:48 -0700 +Subject: [PATCH] [clang 11] fix build errors due to -WWc++11-narrowing + +https://bugs.webkit.org/show_bug.cgi?id=211193 + +Reviewed by Adrian Perez de Castro. + +Fixes the following errors, + +Source/WebCore/html/MediaElementSession.cpp:1059:9: error: type 'WebCore::RenderMedia *' cannot be narrowed to 'bool' in initializer list [-Wc++11-narrowing] +m_element.renderer(), +^~~~~~~~~~~~~~~~~~~~ + +Source/WebCore/style/StyleResolver.cpp:106:55: error: type 'const char [4]' cannot be narrowed to 'bool' in initializer list [-Wc++11-narrowing] +m_mediaQueryEvaluator = MediaQueryEvaluator { "all" }; + ^~~~~ +Source/WebCore/style/StyleResolver.cpp:106:55: note: insert an explicit cast to silence this issue +m_mediaQueryEvaluator = MediaQueryEvaluator { "all" }; + ^~~~~ + static_cast( ) + +* html/HTMLMediaElement.h: +(WebCore::HTMLMediaElement::hasRenderer const): +MediaElementSession was implicitly casting a pointer to a bool, +which is not allowed with modern Clang checks. Add a helper method +to encapsulate the now required static_cast. +* html/MediaElementSession.cpp: Use the new helper method to see +if the HTMLMediaElement has an associated renderer. +(WebCore::MediaElementSession::updateMediaUsageIfChanged): +* style/StyleResolver.cpp: This was calling MediaQueryEvaluator { +"all" }; and seemingly expecting to cast a const char[] to a bool, +or maybe String? It's confusing because of the MediaQueryEvaluator +API. If it was implicitly converting to bool then that could be +unintentional. Such casts are not allowed either now. The +MediaQueryEvaluator's default constructor says it returns true for +"all", which appears to be the original intent of this call, so I +replaced it with that. +(WebCore::Style::Resolver::Resolver): + +git-svn-id: http://svn.webkit.org/repository/webkit/trunk@260951 268f45cc-cd09-0410-ab3c-d52691b4dbfc + +Upstream-Status: Backport [https://github.com/WebKit/webkit/commit/c3cf651016e4cdcb4350598d4a586821071f91bf.patch] + +Signed-off-by: Khem Raj +--- + Source/WebCore/style/StyleResolver.cpp | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Source/WebCore/style/StyleResolver.cpp b/Source/WebCore/style/StyleResolver.cpp +index 8bf371a0..34580ddb 100644 +--- a/Source/WebCore/style/StyleResolver.cpp ++++ b/Source/WebCore/style/StyleResolver.cpp +@@ -107,7 +107,7 @@ Resolver::Resolver(Document& document) + if (view) + m_mediaQueryEvaluator = MediaQueryEvaluator { view->mediaType() }; + else +- m_mediaQueryEvaluator = MediaQueryEvaluator { "all" }; ++ m_mediaQueryEvaluator = MediaQueryEvaluator { }; + + if (root) { + m_rootDefaultStyle = styleForElement(*root, m_document.renderStyle(), nullptr, RuleMatchingBehavior::MatchOnlyUserAgentRules).renderStyle; +-- +2.28.0 + diff --git a/poky/meta/recipes-sato/webkit/webkitgtk_2.28.3.bb b/poky/meta/recipes-sato/webkit/webkitgtk_2.28.3.bb deleted file mode 100644 index 015809226..000000000 --- a/poky/meta/recipes-sato/webkit/webkitgtk_2.28.3.bb +++ /dev/null @@ -1,131 +0,0 @@ -SUMMARY = "WebKit web rendering engine for the GTK+ platform" -HOMEPAGE = "https://www.webkitgtk.org/" -BUGTRACKER = "https://bugs.webkit.org/" - -LICENSE = "BSD & LGPLv2+" -LIC_FILES_CHKSUM = "file://Source/JavaScriptCore/COPYING.LIB;md5=d0c6d6397a5d84286dda758da57bd691 \ - file://Source/WebCore/LICENSE-APPLE;md5=4646f90082c40bcf298c285f8bab0b12 \ - file://Source/WebCore/LICENSE-LGPL-2;md5=36357ffde2b64ae177b2494445b79d21 \ - file://Source/WebCore/LICENSE-LGPL-2.1;md5=a778a33ef338abbaf8b8a7c36b6eec80 \ - " - -SRC_URI = "https://www.webkitgtk.org/releases/${BPN}-${PV}.tar.xz \ - file://0001-FindGObjectIntrospection.cmake-prefix-variables-obta.patch \ - file://0001-When-building-introspection-files-add-CMAKE_C_FLAGS-.patch \ - file://0001-OptionsGTK.cmake-drop-the-hardcoded-introspection-gt.patch \ - file://0001-Fix-racy-parallel-build-of-WebKit2-4.0.gir.patch \ - file://0001-Tweak-gtkdoc-settings-so-that-gtkdoc-generation-work.patch \ - file://0001-Enable-THREADS_PREFER_PTHREAD_FLAG.patch \ - file://cross-compile.patch \ - file://0001-Fix-build-with-musl.patch \ - file://include_array.patch \ - " -SRC_URI[sha256sum] = "f0898ac072c220e13a4aee819408421a6cb56a6eb89170ceafe52468b0903522" - -inherit cmake pkgconfig gobject-introspection perlnative features_check upstream-version-is-even gtk-doc - -ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" -REQUIRED_DISTRO_FEATURES = "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'opengl', '', d)}" - -CVE_PRODUCT = "webkitgtk webkitgtk\+" - -DEPENDS = "zlib libsoup-2.4 curl libxml2 cairo libxslt libgcrypt \ - gtk+3 gstreamer1.0 gstreamer1.0-plugins-base flex-native gperf-native sqlite3 \ - pango icu bison-native gawk intltool-native libwebp \ - atk udev harfbuzz jpeg libpng librsvg libtheora libvorbis \ - ruby-native libnotify gstreamer1.0-plugins-bad \ - gettext-native glib-2.0 glib-2.0-native libtasn1 \ - " - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'wayland x11', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'x11 opengl', 'webgl opengl', '', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', 'webgl gles2', d)} \ - enchant \ - libsecret \ - " - -PACKAGECONFIG[wayland] = "-DENABLE_WAYLAND_TARGET=ON,-DENABLE_WAYLAND_TARGET=OFF,wayland libwpe wpebackend-fdo wayland-native" -PACKAGECONFIG[x11] = "-DENABLE_X11_TARGET=ON,-DENABLE_X11_TARGET=OFF,virtual/libx11 libxcomposite libxdamage libxrender libxt" -PACKAGECONFIG[geoclue] = "-DENABLE_GEOLOCATION=ON,-DENABLE_GEOLOCATION=OFF,geoclue" -PACKAGECONFIG[enchant] = "-DENABLE_SPELLCHECK=ON,-DENABLE_SPELLCHECK=OFF,enchant2" -PACKAGECONFIG[gles2] = "-DENABLE_GLES2=ON,-DENABLE_GLES2=OFF,virtual/libgles2" -PACKAGECONFIG[webgl] = "-DENABLE_WEBGL=ON,-DENABLE_WEBGL=OFF,virtual/libgl" -PACKAGECONFIG[opengl] = "-DENABLE_OPENGL=ON,-DENABLE_OPENGL=OFF,virtual/libgl" -PACKAGECONFIG[libsecret] = "-DUSE_LIBSECRET=ON,-DUSE_LIBSECRET=OFF,libsecret" -PACKAGECONFIG[libhyphen] = "-DUSE_LIBHYPHEN=ON,-DUSE_LIBHYPHEN=OFF,libhyphen" -PACKAGECONFIG[woff2] = "-DUSE_WOFF2=ON,-DUSE_WOFF2=OFF,woff2" -PACKAGECONFIG[openjpeg] = "-DUSE_OPENJPEG=ON,-DUSE_OPENJPEG=OFF,openjpeg" - -# webkitgtk is full of /usr/bin/env python, particular for generating docs -do_configure[postfuncs] += "setup_python_link" -setup_python_link() { - if [ ! -e ${STAGING_BINDIR_NATIVE}/python ]; then - ln -s `which python3` ${STAGING_BINDIR_NATIVE}/python - fi -} - -EXTRA_OECMAKE = " \ - -DPORT=GTK \ - -DCMAKE_BUILD_TYPE=Release \ - ${@bb.utils.contains('GI_DATA_ENABLED', 'True', '-DENABLE_INTROSPECTION=ON', '-DENABLE_INTROSPECTION=OFF', d)} \ - ${@bb.utils.contains('GTKDOC_ENABLED', 'True', '-DENABLE_GTKDOC=ON', '-DENABLE_GTKDOC=OFF', d)} \ - -DENABLE_MINIBROWSER=ON \ - -DPYTHON_EXECUTABLE=`which python3` \ - -DENABLE_BUBBLEWRAP_SANDBOX=OFF \ - " - -# Javascript JIT is not supported on ARC -EXTRA_OECMAKE_append_arc = " -DENABLE_JIT=OFF " -# By default 25-bit "medium" calls are used on ARC -# which is not enough for binaries larger than 32 MiB -CFLAGS_append_arc = " -mlong-calls" -CXXFLAGS_append_arc = " -mlong-calls" - -# Javascript JIT is not supported on powerpc -EXTRA_OECMAKE_append_powerpc = " -DENABLE_JIT=OFF " -EXTRA_OECMAKE_append_powerpc64 = " -DENABLE_JIT=OFF " - -# ARM JIT code does not build on ARMv4/5/6 anymore -EXTRA_OECMAKE_append_armv5 = " -DENABLE_JIT=OFF " -EXTRA_OECMAKE_append_armv6 = " -DENABLE_JIT=OFF " -EXTRA_OECMAKE_append_armv4 = " -DENABLE_JIT=OFF " - -EXTRA_OECMAKE_append_mipsarch = " -DUSE_LD_GOLD=OFF " -EXTRA_OECMAKE_append_powerpc = " -DUSE_LD_GOLD=OFF " - -# JIT not supported on MIPS either -EXTRA_OECMAKE_append_mipsarch = " -DENABLE_JIT=OFF -DENABLE_C_LOOP=ON " - -# JIT not supported on X32 -# An attempt was made to upstream JIT support for x32 in -# https://bugs.webkit.org/show_bug.cgi?id=100450, but this was closed as -# unresolved due to limited X32 adoption. -EXTRA_OECMAKE_append_x86-x32 = " -DENABLE_JIT=OFF " - -SECURITY_CFLAGS_remove_aarch64 = "-fpie" -SECURITY_CFLAGS_append_aarch64 = " -fPIE" - -FILES_${PN} += "${libdir}/webkit2gtk-4.0/injected-bundle/libwebkit2gtkinjectedbundle.so" - -RRECOMMENDS_${PN} += "ca-certificates shared-mime-info" - -# http://errors.yoctoproject.org/Errors/Details/20370/ -ARM_INSTRUCTION_SET_armv4 = "arm" -ARM_INSTRUCTION_SET_armv5 = "arm" -ARM_INSTRUCTION_SET_armv6 = "arm" - -# https://bugzilla.yoctoproject.org/show_bug.cgi?id=9474 -# https://bugs.webkit.org/show_bug.cgi?id=159880 -# JSC JIT can build on ARMv7 with -marm, but doesn't work on runtime. -# Upstream only tests regularly the JSC JIT on ARMv7 with Thumb2 (-mthumb). -ARM_INSTRUCTION_SET_armv7a = "thumb" -ARM_INSTRUCTION_SET_armv7r = "thumb" -ARM_INSTRUCTION_SET_armv7ve = "thumb" - -# introspection inside qemu-arm hangs forever on musl/arm builds -# therefore disable GI_DATA -GI_DATA_ENABLED_libc-musl_armv7a = "False" -GI_DATA_ENABLED_libc-musl_armv7ve = "False" - -# Can't be built with ccache -CCACHE_DISABLE = "1" diff --git a/poky/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb b/poky/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb new file mode 100644 index 000000000..8ebb3709a --- /dev/null +++ b/poky/meta/recipes-sato/webkit/webkitgtk_2.28.4.bb @@ -0,0 +1,132 @@ +SUMMARY = "WebKit web rendering engine for the GTK+ platform" +HOMEPAGE = "https://www.webkitgtk.org/" +BUGTRACKER = "https://bugs.webkit.org/" + +LICENSE = "BSD & LGPLv2+" +LIC_FILES_CHKSUM = "file://Source/JavaScriptCore/COPYING.LIB;md5=d0c6d6397a5d84286dda758da57bd691 \ + file://Source/WebCore/LICENSE-APPLE;md5=4646f90082c40bcf298c285f8bab0b12 \ + file://Source/WebCore/LICENSE-LGPL-2;md5=36357ffde2b64ae177b2494445b79d21 \ + file://Source/WebCore/LICENSE-LGPL-2.1;md5=a778a33ef338abbaf8b8a7c36b6eec80 \ + " + +SRC_URI = "https://www.webkitgtk.org/releases/${BPN}-${PV}.tar.xz \ + file://0001-FindGObjectIntrospection.cmake-prefix-variables-obta.patch \ + file://0001-When-building-introspection-files-add-CMAKE_C_FLAGS-.patch \ + file://0001-OptionsGTK.cmake-drop-the-hardcoded-introspection-gt.patch \ + file://0001-Fix-racy-parallel-build-of-WebKit2-4.0.gir.patch \ + file://0001-Tweak-gtkdoc-settings-so-that-gtkdoc-generation-work.patch \ + file://0001-Enable-THREADS_PREFER_PTHREAD_FLAG.patch \ + file://cross-compile.patch \ + file://0001-Fix-build-with-musl.patch \ + file://include_array.patch \ + file://0001-clang-11-fix-build-errors-due-to-WWc-11-narrowing.patch \ + " +SRC_URI[sha256sum] = "821952e8c9303ed752f1fb1d4283f612c25249d00d705d2b79c2db1bc49c9464" + +inherit cmake pkgconfig gobject-introspection perlnative features_check upstream-version-is-even gtk-doc + +ANY_OF_DISTRO_FEATURES = "${GTK3DISTROFEATURES}" +REQUIRED_DISTRO_FEATURES = "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'opengl', '', d)}" + +CVE_PRODUCT = "webkitgtk webkitgtk\+" + +DEPENDS = "zlib libsoup-2.4 curl libxml2 cairo libxslt libgcrypt \ + gtk+3 gstreamer1.0 gstreamer1.0-plugins-base flex-native gperf-native sqlite3 \ + pango icu bison-native gawk intltool-native libwebp \ + atk udev harfbuzz jpeg libpng librsvg libtheora libvorbis \ + ruby-native libnotify gstreamer1.0-plugins-bad \ + gettext-native glib-2.0 glib-2.0-native libtasn1 \ + " + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'wayland x11', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'x11 opengl', 'webgl opengl', '', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', 'webgl gles2', d)} \ + enchant \ + libsecret \ + " + +PACKAGECONFIG[wayland] = "-DENABLE_WAYLAND_TARGET=ON,-DENABLE_WAYLAND_TARGET=OFF,wayland libwpe wpebackend-fdo wayland-native" +PACKAGECONFIG[x11] = "-DENABLE_X11_TARGET=ON,-DENABLE_X11_TARGET=OFF,virtual/libx11 libxcomposite libxdamage libxrender libxt" +PACKAGECONFIG[geoclue] = "-DENABLE_GEOLOCATION=ON,-DENABLE_GEOLOCATION=OFF,geoclue" +PACKAGECONFIG[enchant] = "-DENABLE_SPELLCHECK=ON,-DENABLE_SPELLCHECK=OFF,enchant2" +PACKAGECONFIG[gles2] = "-DENABLE_GLES2=ON,-DENABLE_GLES2=OFF,virtual/libgles2" +PACKAGECONFIG[webgl] = "-DENABLE_WEBGL=ON,-DENABLE_WEBGL=OFF,virtual/libgl" +PACKAGECONFIG[opengl] = "-DENABLE_OPENGL=ON,-DENABLE_OPENGL=OFF,virtual/libgl" +PACKAGECONFIG[libsecret] = "-DUSE_LIBSECRET=ON,-DUSE_LIBSECRET=OFF,libsecret" +PACKAGECONFIG[libhyphen] = "-DUSE_LIBHYPHEN=ON,-DUSE_LIBHYPHEN=OFF,libhyphen" +PACKAGECONFIG[woff2] = "-DUSE_WOFF2=ON,-DUSE_WOFF2=OFF,woff2" +PACKAGECONFIG[openjpeg] = "-DUSE_OPENJPEG=ON,-DUSE_OPENJPEG=OFF,openjpeg" + +# webkitgtk is full of /usr/bin/env python, particular for generating docs +do_configure[postfuncs] += "setup_python_link" +setup_python_link() { + if [ ! -e ${STAGING_BINDIR_NATIVE}/python ]; then + ln -s `which python3` ${STAGING_BINDIR_NATIVE}/python + fi +} + +EXTRA_OECMAKE = " \ + -DPORT=GTK \ + -DCMAKE_BUILD_TYPE=Release \ + ${@bb.utils.contains('GI_DATA_ENABLED', 'True', '-DENABLE_INTROSPECTION=ON', '-DENABLE_INTROSPECTION=OFF', d)} \ + ${@bb.utils.contains('GTKDOC_ENABLED', 'True', '-DENABLE_GTKDOC=ON', '-DENABLE_GTKDOC=OFF', d)} \ + -DENABLE_MINIBROWSER=ON \ + -DPYTHON_EXECUTABLE=`which python3` \ + -DENABLE_BUBBLEWRAP_SANDBOX=OFF \ + " + +# Javascript JIT is not supported on ARC +EXTRA_OECMAKE_append_arc = " -DENABLE_JIT=OFF " +# By default 25-bit "medium" calls are used on ARC +# which is not enough for binaries larger than 32 MiB +CFLAGS_append_arc = " -mlong-calls" +CXXFLAGS_append_arc = " -mlong-calls" + +# Javascript JIT is not supported on powerpc +EXTRA_OECMAKE_append_powerpc = " -DENABLE_JIT=OFF " +EXTRA_OECMAKE_append_powerpc64 = " -DENABLE_JIT=OFF " + +# ARM JIT code does not build on ARMv4/5/6 anymore +EXTRA_OECMAKE_append_armv5 = " -DENABLE_JIT=OFF " +EXTRA_OECMAKE_append_armv6 = " -DENABLE_JIT=OFF " +EXTRA_OECMAKE_append_armv4 = " -DENABLE_JIT=OFF " + +EXTRA_OECMAKE_append_mipsarch = " -DUSE_LD_GOLD=OFF " +EXTRA_OECMAKE_append_powerpc = " -DUSE_LD_GOLD=OFF " + +# JIT not supported on MIPS either +EXTRA_OECMAKE_append_mipsarch = " -DENABLE_JIT=OFF -DENABLE_C_LOOP=ON " + +# JIT not supported on X32 +# An attempt was made to upstream JIT support for x32 in +# https://bugs.webkit.org/show_bug.cgi?id=100450, but this was closed as +# unresolved due to limited X32 adoption. +EXTRA_OECMAKE_append_x86-x32 = " -DENABLE_JIT=OFF " + +SECURITY_CFLAGS_remove_aarch64 = "-fpie" +SECURITY_CFLAGS_append_aarch64 = " -fPIE" + +FILES_${PN} += "${libdir}/webkit2gtk-4.0/injected-bundle/libwebkit2gtkinjectedbundle.so" + +RRECOMMENDS_${PN} += "ca-certificates shared-mime-info" + +# http://errors.yoctoproject.org/Errors/Details/20370/ +ARM_INSTRUCTION_SET_armv4 = "arm" +ARM_INSTRUCTION_SET_armv5 = "arm" +ARM_INSTRUCTION_SET_armv6 = "arm" + +# https://bugzilla.yoctoproject.org/show_bug.cgi?id=9474 +# https://bugs.webkit.org/show_bug.cgi?id=159880 +# JSC JIT can build on ARMv7 with -marm, but doesn't work on runtime. +# Upstream only tests regularly the JSC JIT on ARMv7 with Thumb2 (-mthumb). +ARM_INSTRUCTION_SET_armv7a = "thumb" +ARM_INSTRUCTION_SET_armv7r = "thumb" +ARM_INSTRUCTION_SET_armv7ve = "thumb" + +# introspection inside qemu-arm hangs forever on musl/arm builds +# therefore disable GI_DATA +GI_DATA_ENABLED_libc-musl_armv7a = "False" +GI_DATA_ENABLED_libc-musl_armv7ve = "False" + +# Can't be built with ccache +CCACHE_DISABLE = "1" diff --git a/poky/meta/recipes-support/bash-completion/bash-completion_2.10.bb b/poky/meta/recipes-support/bash-completion/bash-completion_2.10.bb deleted file mode 100644 index 041af7490..000000000 --- a/poky/meta/recipes-support/bash-completion/bash-completion_2.10.bb +++ /dev/null @@ -1,37 +0,0 @@ -SUMMARY = "Programmable Completion for Bash 4" -HOMEPAGE = "http://bash-completion.alioth.debian.org/" -BUGTRACKER = "https://alioth.debian.org/projects/bash-completion/" - -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe" - -SECTION = "console/utils" - -SRC_URI = "https://github.com/scop/bash-completion/releases/download/${PV}/${BPN}-${PV}.tar.xz" - -SRC_URI[md5sum] = "f376ae3266cc70017aa833c39b76f984" -SRC_URI[sha256sum] = "123c17998e34b937ce57bb1b111cd817bc369309e9a8047c0bcf06ead4a3ec92" -UPSTREAM_CHECK_REGEX = "bash-completion-(?P(?!2008).+)\.tar" -UPSTREAM_CHECK_URI = "https://github.com/scop/bash-completion/releases" - -PARALLEL_MAKE = "" - -inherit autotools - -do_install_append() { - # compatdir - install -d ${D}${sysconfdir}/bash_completion.d/ - echo '. ${datadir}/${BPN}/bash_completion' >${D}${sysconfdir}/bash_completion - -} - -RDEPENDS_${PN} = "bash" - -# Some recipes are providing ${PN}-bash-completion packages -PACKAGES =+ "${PN}-extra" -FILES_${PN}-extra = "${datadir}/${BPN}/completions/ \ - ${datadir}/${BPN}/helpers/" - -FILES_${PN}-dev += "${datadir}/cmake" - -BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-support/bash-completion/bash-completion_2.11.bb b/poky/meta/recipes-support/bash-completion/bash-completion_2.11.bb new file mode 100644 index 000000000..da0baa7af --- /dev/null +++ b/poky/meta/recipes-support/bash-completion/bash-completion_2.11.bb @@ -0,0 +1,37 @@ +SUMMARY = "Programmable Completion for Bash 4" +HOMEPAGE = "http://bash-completion.alioth.debian.org/" +BUGTRACKER = "https://alioth.debian.org/projects/bash-completion/" + +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe" + +SECTION = "console/utils" + +SRC_URI = "https://github.com/scop/bash-completion/releases/download/${PV}/${BPN}-${PV}.tar.xz" + +SRC_URI[md5sum] = "2514c6772d0de6254758b98c53f91861" +SRC_URI[sha256sum] = "73a8894bad94dee83ab468fa09f628daffd567e8bef1a24277f1e9a0daf911ac" +UPSTREAM_CHECK_REGEX = "bash-completion-(?P(?!2008).+)\.tar" +UPSTREAM_CHECK_URI = "https://github.com/scop/bash-completion/releases" + +PARALLEL_MAKE = "" + +inherit autotools + +do_install_append() { + # compatdir + install -d ${D}${sysconfdir}/bash_completion.d/ + echo '. ${datadir}/${BPN}/bash_completion' >${D}${sysconfdir}/bash_completion + +} + +RDEPENDS_${PN} = "bash" + +# Some recipes are providing ${PN}-bash-completion packages +PACKAGES =+ "${PN}-extra" +FILES_${PN}-extra = "${datadir}/${BPN}/completions/ \ + ${datadir}/${BPN}/helpers/" + +FILES_${PN}-dev += "${datadir}/cmake" + +BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-support/curl/curl_7.71.1.bb b/poky/meta/recipes-support/curl/curl_7.71.1.bb deleted file mode 100644 index f028f1fdd..000000000 --- a/poky/meta/recipes-support/curl/curl_7.71.1.bb +++ /dev/null @@ -1,83 +0,0 @@ -SUMMARY = "Command line tool and library for client-side URL transfers" -HOMEPAGE = "http://curl.haxx.se/" -BUGTRACKER = "http://curl.haxx.se/mail/list.cgi?list=curl-tracker" -SECTION = "console/network" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=2e9fb35867314fe31c6a4977ef7dd531" - -SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \ - file://0001-replace-krb5-config-with-pkg-config.patch \ -" - -SRC_URI[sha256sum] = "9d52a4d80554f9b0d460ea2be5d7be99897a1a9f681ffafe739169afd6b4f224" - -CVE_PRODUCT = "curl libcurl" -inherit autotools pkgconfig binconfig multilib_header - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib" -PACKAGECONFIG_class-native = "ipv6 proxy ssl threaded-resolver verbose zlib" -PACKAGECONFIG_class-nativesdk = "ipv6 proxy ssl threaded-resolver verbose zlib" - -# 'ares' and 'threaded-resolver' are mutually exclusive -PACKAGECONFIG[ares] = "--enable-ares,--disable-ares,c-ares,,,threaded-resolver" -PACKAGECONFIG[brotli] = "--with-brotli,--without-brotli,brotli" -PACKAGECONFIG[builtinmanual] = "--enable-manual,--disable-manual" -PACKAGECONFIG[dict] = "--enable-dict,--disable-dict," -PACKAGECONFIG[gnutls] = "--with-gnutls,--without-gnutls,gnutls" -PACKAGECONFIG[gopher] = "--enable-gopher,--disable-gopher," -PACKAGECONFIG[imap] = "--enable-imap,--disable-imap," -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," -PACKAGECONFIG[krb5] = "--with-gssapi,--without-gssapi,krb5" -PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap," -PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps," -PACKAGECONFIG[libidn] = "--with-libidn2,--without-libidn2,libidn2" -PACKAGECONFIG[libssh2] = "--with-libssh2,--without-libssh2,libssh2" -PACKAGECONFIG[mbedtls] = "--with-mbedtls=${STAGING_DIR_TARGET},--without-mbedtls,mbedtls" -PACKAGECONFIG[mqtt] = "--enable-mqtt,--disable-mqtt," -PACKAGECONFIG[nghttp2] = "--with-nghttp2,--without-nghttp2,nghttp2" -PACKAGECONFIG[pop3] = "--enable-pop3,--disable-pop3," -PACKAGECONFIG[proxy] = "--enable-proxy,--disable-proxy," -PACKAGECONFIG[rtmpdump] = "--with-librtmp,--without-librtmp,rtmpdump" -PACKAGECONFIG[rtsp] = "--enable-rtsp,--disable-rtsp," -PACKAGECONFIG[smb] = "--enable-smb,--disable-smb," -PACKAGECONFIG[smtp] = "--enable-smtp,--disable-smtp," -PACKAGECONFIG[ssl] = "--with-ssl --with-random=/dev/urandom,--without-ssl,openssl" -PACKAGECONFIG[nss] = "--with-nss,--without-nss,nss" -PACKAGECONFIG[telnet] = "--enable-telnet,--disable-telnet," -PACKAGECONFIG[tftp] = "--enable-tftp,--disable-tftp," -PACKAGECONFIG[threaded-resolver] = "--enable-threaded-resolver,--disable-threaded-resolver,,,,ares" -PACKAGECONFIG[verbose] = "--enable-verbose,--disable-verbose" -PACKAGECONFIG[zlib] = "--with-zlib=${STAGING_LIBDIR}/../,--without-zlib,zlib" - -EXTRA_OECONF = " \ - --disable-libcurl-option \ - --disable-ntlm-wb \ - --enable-crypto-auth \ - --with-ca-bundle=${sysconfdir}/ssl/certs/ca-certificates.crt \ - --without-libmetalink \ - --without-libpsl \ - --enable-debug \ - --enable-optimize \ - --disable-curldebug \ -" - -do_install_append_class-target() { - # cleanup buildpaths from curl-config - sed -i \ - -e 's,--sysroot=${STAGING_DIR_TARGET},,g' \ - -e 's,--with-libtool-sysroot=${STAGING_DIR_TARGET},,g' \ - -e 's|${DEBUG_PREFIX_MAP}||g' \ - ${D}${bindir}/curl-config -} - -PACKAGES =+ "lib${BPN}" - -FILES_lib${BPN} = "${libdir}/lib*.so.*" -RRECOMMENDS_lib${BPN} += "ca-certificates" - -FILES_${PN} += "${datadir}/zsh" - -inherit multilib_script -MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/curl-config" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/curl/curl_7.72.0.bb b/poky/meta/recipes-support/curl/curl_7.72.0.bb new file mode 100644 index 000000000..e7f549269 --- /dev/null +++ b/poky/meta/recipes-support/curl/curl_7.72.0.bb @@ -0,0 +1,83 @@ +SUMMARY = "Command line tool and library for client-side URL transfers" +HOMEPAGE = "http://curl.haxx.se/" +BUGTRACKER = "http://curl.haxx.se/mail/list.cgi?list=curl-tracker" +SECTION = "console/network" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=2e9fb35867314fe31c6a4977ef7dd531" + +SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \ + file://0001-replace-krb5-config-with-pkg-config.patch \ +" + +SRC_URI[sha256sum] = "ad91970864102a59765e20ce16216efc9d6ad381471f7accceceab7d905703ef" + +CVE_PRODUCT = "curl libcurl" +inherit autotools pkgconfig binconfig multilib_header + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib" +PACKAGECONFIG_class-native = "ipv6 proxy ssl threaded-resolver verbose zlib" +PACKAGECONFIG_class-nativesdk = "ipv6 proxy ssl threaded-resolver verbose zlib" + +# 'ares' and 'threaded-resolver' are mutually exclusive +PACKAGECONFIG[ares] = "--enable-ares,--disable-ares,c-ares,,,threaded-resolver" +PACKAGECONFIG[brotli] = "--with-brotli,--without-brotli,brotli" +PACKAGECONFIG[builtinmanual] = "--enable-manual,--disable-manual" +PACKAGECONFIG[dict] = "--enable-dict,--disable-dict," +PACKAGECONFIG[gnutls] = "--with-gnutls,--without-gnutls,gnutls" +PACKAGECONFIG[gopher] = "--enable-gopher,--disable-gopher," +PACKAGECONFIG[imap] = "--enable-imap,--disable-imap," +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +PACKAGECONFIG[krb5] = "--with-gssapi,--without-gssapi,krb5" +PACKAGECONFIG[ldap] = "--enable-ldap,--disable-ldap," +PACKAGECONFIG[ldaps] = "--enable-ldaps,--disable-ldaps," +PACKAGECONFIG[libidn] = "--with-libidn2,--without-libidn2,libidn2" +PACKAGECONFIG[libssh2] = "--with-libssh2,--without-libssh2,libssh2" +PACKAGECONFIG[mbedtls] = "--with-mbedtls=${STAGING_DIR_TARGET},--without-mbedtls,mbedtls" +PACKAGECONFIG[mqtt] = "--enable-mqtt,--disable-mqtt," +PACKAGECONFIG[nghttp2] = "--with-nghttp2,--without-nghttp2,nghttp2" +PACKAGECONFIG[pop3] = "--enable-pop3,--disable-pop3," +PACKAGECONFIG[proxy] = "--enable-proxy,--disable-proxy," +PACKAGECONFIG[rtmpdump] = "--with-librtmp,--without-librtmp,rtmpdump" +PACKAGECONFIG[rtsp] = "--enable-rtsp,--disable-rtsp," +PACKAGECONFIG[smb] = "--enable-smb,--disable-smb," +PACKAGECONFIG[smtp] = "--enable-smtp,--disable-smtp," +PACKAGECONFIG[ssl] = "--with-ssl --with-random=/dev/urandom,--without-ssl,openssl" +PACKAGECONFIG[nss] = "--with-nss,--without-nss,nss" +PACKAGECONFIG[telnet] = "--enable-telnet,--disable-telnet," +PACKAGECONFIG[tftp] = "--enable-tftp,--disable-tftp," +PACKAGECONFIG[threaded-resolver] = "--enable-threaded-resolver,--disable-threaded-resolver,,,,ares" +PACKAGECONFIG[verbose] = "--enable-verbose,--disable-verbose" +PACKAGECONFIG[zlib] = "--with-zlib=${STAGING_LIBDIR}/../,--without-zlib,zlib" + +EXTRA_OECONF = " \ + --disable-libcurl-option \ + --disable-ntlm-wb \ + --enable-crypto-auth \ + --with-ca-bundle=${sysconfdir}/ssl/certs/ca-certificates.crt \ + --without-libmetalink \ + --without-libpsl \ + --enable-debug \ + --enable-optimize \ + --disable-curldebug \ +" + +do_install_append_class-target() { + # cleanup buildpaths from curl-config + sed -i \ + -e 's,--sysroot=${STAGING_DIR_TARGET},,g' \ + -e 's,--with-libtool-sysroot=${STAGING_DIR_TARGET},,g' \ + -e 's|${DEBUG_PREFIX_MAP}||g' \ + ${D}${bindir}/curl-config +} + +PACKAGES =+ "lib${BPN}" + +FILES_lib${BPN} = "${libdir}/lib*.so.*" +RRECOMMENDS_lib${BPN} += "ca-certificates" + +FILES_${PN} += "${datadir}/zsh" + +inherit multilib_script +MULTILIB_SCRIPTS = "${PN}-dev:${bindir}/curl-config" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_151.bb b/poky/meta/recipes-support/diffoscope/diffoscope_151.bb deleted file mode 100644 index 9b39b4435..000000000 --- a/poky/meta/recipes-support/diffoscope/diffoscope_151.bb +++ /dev/null @@ -1,17 +0,0 @@ -SUMMARY = "in-depth comparison of files, archives, and directories" -HOMEPAGE = "https://diffoscope.org/" -LICENSE = "GPL-3.0+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -PYPI_PACKAGE = "diffoscope" - -inherit pypi setuptools3 - -SRC_URI[sha256sum] = "f15e04aa537f0a3d91c99bf631c604668f4eea8f6552c5f93ea9be2bf014df84" - -RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" - -# Dependencies don't build for musl -COMPATIBLE_HOST_libc-musl = 'null' - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_153.bb b/poky/meta/recipes-support/diffoscope/diffoscope_153.bb new file mode 100644 index 000000000..77f5254ae --- /dev/null +++ b/poky/meta/recipes-support/diffoscope/diffoscope_153.bb @@ -0,0 +1,17 @@ +SUMMARY = "in-depth comparison of files, archives, and directories" +HOMEPAGE = "https://diffoscope.org/" +LICENSE = "GPL-3.0+" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +PYPI_PACKAGE = "diffoscope" + +inherit pypi setuptools3 + +SRC_URI[sha256sum] = "b5104b5e72252df45ba6b7cbb0169e2e3407715b6b063fa5b38a2649b0d719a2" + +RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" + +# Dependencies don't build for musl +COMPATIBLE_HOST_libc-musl = 'null' + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/fribidi/fribidi_1.0.10.bb b/poky/meta/recipes-support/fribidi/fribidi_1.0.10.bb index ba9e6f06d..53d78b427 100644 --- a/poky/meta/recipes-support/fribidi/fribidi_1.0.10.bb +++ b/poky/meta/recipes-support/fribidi/fribidi_1.0.10.bb @@ -12,6 +12,6 @@ UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" inherit meson lib_package pkgconfig -CVE_PRODUCT = "gnu_fribidi" +CVE_PRODUCT = "gnu_fribidi fribidi" BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb b/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb index 9fc1ae24a..b51534351 100644 --- a/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb +++ b/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb @@ -59,7 +59,7 @@ EXTRA_OECONF += '--enable-languages="${LANGUAGES}" \ --disable-lang-python-test \ ' -inherit autotools texinfo binconfig-disabled pkgconfig distutils-common-base ${PYTHON_INHERIT} +inherit autotools texinfo binconfig-disabled pkgconfig distutils-common-base ${PYTHON_INHERIT} multilib_header export PKG_CONFIG='pkg-config' @@ -83,3 +83,7 @@ do_configure_prepend () { rm -f ${S}/m4/libassuan.m4 rm -f ${S}/m4/python.m4 } + +do_install_append() { + oe_multilib_header gpgme.h +} diff --git a/poky/meta/recipes-support/libcap/files/0001-tests-do-not-statically-link-a-test.patch b/poky/meta/recipes-support/libcap/files/0001-tests-do-not-statically-link-a-test.patch index 912f33f16..81fdd2a15 100644 --- a/poky/meta/recipes-support/libcap/files/0001-tests-do-not-statically-link-a-test.patch +++ b/poky/meta/recipes-support/libcap/files/0001-tests-do-not-statically-link-a-test.patch @@ -1,4 +1,4 @@ -From 24d59c99bcba065f1f40f49f870a5f6483b4b078 Mon Sep 17 00:00:00 2001 +From e4fe6890e07821d60f01f000a95f7944f3d68139 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Wed, 15 Jan 2020 17:16:28 +0100 Subject: [PATCH] tests: do not statically link a test @@ -13,10 +13,10 @@ Signed-off-by: Alexander Kanavin 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Makefile b/tests/Makefile -index f9cec56..aa0d09b 100644 +index 876a8b9..93a0e3a 100644 --- a/tests/Makefile +++ b/tests/Makefile -@@ -29,23 +29,23 @@ psx_test_wrap: psx_test.c $(DEPS) +@@ -26,23 +26,23 @@ psx_test: psx_test.c $(DEPS) run_libcap_psx_test: libcap_psx_test libcap_psx_test: libcap_psx_test.c $(DEPS) @@ -43,4 +43,4 @@ index f9cec56..aa0d09b 100644 + $(CC) $(CFLAGS) $< -o $@ clean: - rm -f psx_test psx_test_wrap libcap_psx_test libcap_launch_test *~ + rm -f psx_test libcap_psx_test libcap_launch_test *~ diff --git a/poky/meta/recipes-support/libcap/files/0002-tests-do-not-run-target-executables.patch b/poky/meta/recipes-support/libcap/files/0002-tests-do-not-run-target-executables.patch index 26d108c20..e8f1df98c 100644 --- a/poky/meta/recipes-support/libcap/files/0002-tests-do-not-run-target-executables.patch +++ b/poky/meta/recipes-support/libcap/files/0002-tests-do-not-run-target-executables.patch @@ -1,4 +1,4 @@ -From 6309554225e05e76167eda4e0df383fb3d1a62c3 Mon Sep 17 00:00:00 2001 +From 5583b48c04d5bf50b56473d88d990c3f0bc45c14 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Fri, 20 Dec 2019 16:54:05 +0100 Subject: [PATCH] tests: do not run target executables @@ -7,23 +7,20 @@ Upstream-Status: Inappropriate [oe-core specific] Signed-off-by: Alexander Kanavin --- - tests/Makefile | 3 --- - 1 file changed, 3 deletions(-) + tests/Makefile | 2 -- + 1 file changed, 2 deletions(-) diff --git a/tests/Makefile b/tests/Makefile -index 95e4ca6..7162cf0 100644 +index bfedbc2..876a8b9 100644 --- a/tests/Makefile +++ b/tests/Makefile -@@ -19,8 +19,6 @@ sudotest: test +@@ -19,13 +19,11 @@ sudotest: test run_libcap_launch_test run_libcap_launch_test install: all - run_psx_test: psx_test psx_test_wrap + run_psx_test: psx_test - ./psx_test -- ./psx_test_wrap psx_test: psx_test.c $(DEPS) - $(CC) $(CFLAGS) $(IPATH) -DNOWRAP $< -o $@ $(LIBPSXLIB) -@@ -29,7 +27,6 @@ psx_test_wrap: psx_test.c $(DEPS) $(CC) $(CFLAGS) $(IPATH) $< -o $@ $(LIBPSXLIB) -Wl,-wrap,pthread_create run_libcap_psx_test: libcap_psx_test diff --git a/poky/meta/recipes-support/libcap/libcap_2.36.bb b/poky/meta/recipes-support/libcap/libcap_2.36.bb deleted file mode 100644 index 2c98db6ee..000000000 --- a/poky/meta/recipes-support/libcap/libcap_2.36.bb +++ /dev/null @@ -1,76 +0,0 @@ -SUMMARY = "Library for getting/setting POSIX.1e capabilities" -HOMEPAGE = "http://sites.google.com/site/fullycapable/" - -# no specific GPL version required -LICENSE = "BSD | GPLv2" -LIC_FILES_CHKSUM = "file://License;md5=3f84fd6f29d453a56514cb7e4ead25f1" - -DEPENDS = "hostperl-runtime-native gperf-native" - -SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${PV}.tar.xz \ - file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \ - file://0002-tests-do-not-run-target-executables.patch \ - file://0001-tests-do-not-statically-link-a-test.patch \ - " -SRC_URI[sha256sum] = "5048c849bdbbe24d2ca59463142cb279abec5edf3ab6731ab35a596bcf538a49" - -UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/linux/libs/security/linux-privs/${BPN}2/" - -inherit lib_package - -# do NOT pass target cflags to host compilations -# -do_configure() { - # libcap uses := for compilers, fortunately, it gives us a hint - # on what should be replaced with ?= - sed -e 's,:=,?=,g' -i Make.Rules - sed -e 's,^BUILD_CFLAGS ?= ,BUILD_CFLAGS := $(BUILD_CFLAGS) ,' -i Make.Rules -} - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" -PACKAGECONFIG_class-native ??= "" - -PACKAGECONFIG[pam] = "PAM_CAP=yes,PAM_CAP=no,libpam" - -EXTRA_OEMAKE = " \ - INDENT= \ - lib='${baselib}' \ - RAISE_SETFCAP=no \ - DYNAMIC=yes \ - BUILD_GPERF=yes \ -" - -EXTRA_OEMAKE_append_class-target = " SYSTEM_HEADERS=${STAGING_INCDIR}" - -# these are present in the libcap defaults, so include in our CFLAGS too -CFLAGS += "-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64" - -do_compile() { - oe_runmake ${PACKAGECONFIG_CONFARGS} -} - -do_install() { - oe_runmake install \ - ${PACKAGECONFIG_CONFARGS} \ - DESTDIR="${D}" \ - prefix="${prefix}" \ - SBINDIR="${sbindir}" -} - -do_install_append() { - # Move the library to base_libdir - install -d ${D}${base_libdir} - if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then - mv ${D}${libdir}/libcap* ${D}${base_libdir} - if [ -d ${D}${libdir}/security ]; then - mv ${D}${libdir}/security ${D}${base_libdir} - fi - fi -} - -FILES_${PN}-dev += "${base_libdir}/*.so" - -# pam files -FILES_${PN} += "${base_libdir}/security/*.so" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libcap/libcap_2.42.bb b/poky/meta/recipes-support/libcap/libcap_2.42.bb new file mode 100644 index 000000000..48f8f9262 --- /dev/null +++ b/poky/meta/recipes-support/libcap/libcap_2.42.bb @@ -0,0 +1,76 @@ +SUMMARY = "Library for getting/setting POSIX.1e capabilities" +HOMEPAGE = "http://sites.google.com/site/fullycapable/" + +# no specific GPL version required +LICENSE = "BSD | GPLv2" +LIC_FILES_CHKSUM = "file://License;md5=3f84fd6f29d453a56514cb7e4ead25f1" + +DEPENDS = "hostperl-runtime-native gperf-native" + +SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${PV}.tar.xz \ + file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \ + file://0002-tests-do-not-run-target-executables.patch \ + file://0001-tests-do-not-statically-link-a-test.patch \ + " +SRC_URI[sha256sum] = "3605a9cb60076547ea9f64989e0ba576da9508e4653e8dc40ae54c0d6f443dfd" + +UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/linux/libs/security/linux-privs/${BPN}2/" + +inherit lib_package + +# do NOT pass target cflags to host compilations +# +do_configure() { + # libcap uses := for compilers, fortunately, it gives us a hint + # on what should be replaced with ?= + sed -e 's,:=,?=,g' -i Make.Rules + sed -e 's,^BUILD_CFLAGS ?= ,BUILD_CFLAGS := $(BUILD_CFLAGS) ,' -i Make.Rules +} + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" +PACKAGECONFIG_class-native ??= "" + +PACKAGECONFIG[pam] = "PAM_CAP=yes,PAM_CAP=no,libpam" + +EXTRA_OEMAKE = " \ + INDENT= \ + lib='${baselib}' \ + RAISE_SETFCAP=no \ + DYNAMIC=yes \ + BUILD_GPERF=yes \ +" + +EXTRA_OEMAKE_append_class-target = " SYSTEM_HEADERS=${STAGING_INCDIR}" + +# these are present in the libcap defaults, so include in our CFLAGS too +CFLAGS += "-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64" + +do_compile() { + oe_runmake ${PACKAGECONFIG_CONFARGS} +} + +do_install() { + oe_runmake install \ + ${PACKAGECONFIG_CONFARGS} \ + DESTDIR="${D}" \ + prefix="${prefix}" \ + SBINDIR="${sbindir}" +} + +do_install_append() { + # Move the library to base_libdir + install -d ${D}${base_libdir} + if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then + mv ${D}${libdir}/libcap* ${D}${base_libdir} + if [ -d ${D}${libdir}/security ]; then + mv ${D}${libdir}/security ${D}${base_libdir} + fi + fi +} + +FILES_${PN}-dev += "${base_libdir}/*.so" + +# pam files +FILES_${PN} += "${base_libdir}/security/*.so" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libevdev/libevdev_1.9.0.bb b/poky/meta/recipes-support/libevdev/libevdev_1.9.0.bb deleted file mode 100644 index a2116a492..000000000 --- a/poky/meta/recipes-support/libevdev/libevdev_1.9.0.bb +++ /dev/null @@ -1,16 +0,0 @@ -SUMMARY = "Wrapper library for evdev devices" -HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libevdev/" -SECTION = "libs" - -LICENSE = "MIT-X" -LIC_FILES_CHKSUM = "file://COPYING;md5=75aae0d38feea6fda97ca381cb9132eb \ - file://libevdev/libevdev.h;endline=21;md5=7ff4f0b5113252c2f1a828e0bbad98d1" - -SRC_URI = "http://www.freedesktop.org/software/libevdev/${BP}.tar.xz \ - file://determinism.patch" -SRC_URI[md5sum] = "13c3f0911f9326d4b9fa103365f84421" -SRC_URI[sha256sum] = "e7e18a64264f2dea19b6c50a481f8c062529d42919ccda0bc861495bce28eb9e" - -inherit autotools pkgconfig - -UPSTREAM_CHECK_REGEX = "libevdev-(?P(\d+\.)+(?!90\d+)\d+)" diff --git a/poky/meta/recipes-support/libevdev/libevdev_1.9.1.bb b/poky/meta/recipes-support/libevdev/libevdev_1.9.1.bb new file mode 100644 index 000000000..633e0af99 --- /dev/null +++ b/poky/meta/recipes-support/libevdev/libevdev_1.9.1.bb @@ -0,0 +1,16 @@ +SUMMARY = "Wrapper library for evdev devices" +HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libevdev/" +SECTION = "libs" + +LICENSE = "MIT-X" +LIC_FILES_CHKSUM = "file://COPYING;md5=75aae0d38feea6fda97ca381cb9132eb \ + file://libevdev/libevdev.h;endline=21;md5=7ff4f0b5113252c2f1a828e0bbad98d1" + +SRC_URI = "http://www.freedesktop.org/software/libevdev/${BP}.tar.xz \ + file://determinism.patch" +SRC_URI[md5sum] = "58286a834ae14536caf9cab8633419cf" +SRC_URI[sha256sum] = "f5603c48c5afd76b14df7a5124e0a94a102f8da0d45826192325069d1bbc7acb" + +inherit autotools pkgconfig + +UPSTREAM_CHECK_REGEX = "libevdev-(?P(\d+\.)+(?!90\d+)\d+)" diff --git a/poky/meta/recipes-support/libexif/libexif_0.6.22.bb b/poky/meta/recipes-support/libexif/libexif_0.6.22.bb index a520d5c9f..2478ba07d 100644 --- a/poky/meta/recipes-support/libexif/libexif_0.6.22.bb +++ b/poky/meta/recipes-support/libexif/libexif_0.6.22.bb @@ -17,3 +17,5 @@ UPSTREAM_CHECK_URI = "https://github.com/libexif/libexif/releases/" inherit autotools gettext EXTRA_OECONF += "--disable-docs" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libffi/libffi_3.3.bb b/poky/meta/recipes-support/libffi/libffi_3.3.bb index e5beb985c..9dfdb9e39 100644 --- a/poky/meta/recipes-support/libffi/libffi_3.3.bb +++ b/poky/meta/recipes-support/libffi/libffi_3.3.bb @@ -28,7 +28,7 @@ EXTRA_OEMAKE_class-target = "LIBTOOLFLAGS='--tag=CC'" inherit autotools texinfo multilib_header do_install_append() { - oe_multilib_header ffi.h + oe_multilib_header ffi.h ffitarget.h } FILES_${PN}-dev += "${libdir}/libffi-${PV}" diff --git a/poky/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch b/poky/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch new file mode 100644 index 000000000..e11487fe9 --- /dev/null +++ b/poky/meta/recipes-support/libunwind/libunwind/0001-Fix-compilation-with-fno-common.patch @@ -0,0 +1,448 @@ +From 00d18b21dcb9723c4f13889a39a760a654782370 Mon Sep 17 00:00:00 2001 +From: Yichao Yu +Date: Tue, 31 Mar 2020 00:43:32 -0400 +Subject: [PATCH] Fix compilation with -fno-common. + +Making all other archs consistent with IA64 which should not have this problem. +Also move the FIXME to the correct place. + +Also add some minimum comments about this... + +Upstream-Status: Backport [https://github.com/libunwind/libunwind/pull/166] +Signed-off-by: Khem Raj +--- + src/aarch64/Ginit.c | 15 +++++++-------- + src/arm/Ginit.c | 15 +++++++-------- + src/coredump/_UPT_get_dyn_info_list_addr.c | 5 +++++ + src/hppa/Ginit.c | 15 +++++++-------- + src/ia64/Ginit.c | 1 + + src/mi/Gfind_dynamic_proc_info.c | 1 + + src/mips/Ginit.c | 15 +++++++-------- + src/ppc32/Ginit.c | 11 +++++++---- + src/ppc64/Ginit.c | 11 +++++++---- + src/ptrace/_UPT_get_dyn_info_list_addr.c | 5 +++++ + src/s390x/Ginit.c | 15 +++++++-------- + src/sh/Ginit.c | 15 +++++++-------- + src/tilegx/Ginit.c | 15 +++++++-------- + src/x86/Ginit.c | 15 +++++++-------- + src/x86_64/Ginit.c | 15 +++++++-------- + 15 files changed, 89 insertions(+), 80 deletions(-) + +diff --git a/src/aarch64/Ginit.c b/src/aarch64/Ginit.c +index dec235c..3538976 100644 +--- a/src/aarch64/Ginit.c ++++ b/src/aarch64/Ginit.c +@@ -61,13 +61,6 @@ tdep_uc_addr (unw_tdep_context_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -78,7 +71,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/arm/Ginit.c b/src/arm/Ginit.c +index 2720d06..0bac0d7 100644 +--- a/src/arm/Ginit.c ++++ b/src/arm/Ginit.c +@@ -57,18 +57,17 @@ tdep_uc_addr (unw_tdep_context_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/coredump/_UPT_get_dyn_info_list_addr.c b/src/coredump/_UPT_get_dyn_info_list_addr.c +index 0d11905..739ed05 100644 +--- a/src/coredump/_UPT_get_dyn_info_list_addr.c ++++ b/src/coredump/_UPT_get_dyn_info_list_addr.c +@@ -74,6 +74,11 @@ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg, + + #else + ++/* XXX fix me: there is currently no way to locate the dyn-info list ++ by a remote unwinder. On ia64, this is done via a special ++ unwind-table entry. Perhaps something similar can be done with ++ DWARF2 unwind info. */ ++ + static inline int + get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg, + int *countp) +diff --git a/src/hppa/Ginit.c b/src/hppa/Ginit.c +index 461e4b9..265455a 100644 +--- a/src/hppa/Ginit.c ++++ b/src/hppa/Ginit.c +@@ -64,13 +64,6 @@ _Uhppa_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -81,7 +74,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/ia64/Ginit.c b/src/ia64/Ginit.c +index b09a2ad..8601bb3 100644 +--- a/src/ia64/Ginit.c ++++ b/src/ia64/Ginit.c +@@ -68,6 +68,7 @@ get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + if (!_U_dyn_info_list_addr) + return -UNW_ENOINFO; + #endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. + *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } +diff --git a/src/mi/Gfind_dynamic_proc_info.c b/src/mi/Gfind_dynamic_proc_info.c +index 98d3501..2e7c62e 100644 +--- a/src/mi/Gfind_dynamic_proc_info.c ++++ b/src/mi/Gfind_dynamic_proc_info.c +@@ -49,6 +49,7 @@ local_find_proc_info (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, + return -UNW_ENOINFO; + #endif + ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. + list = (unw_dyn_info_list_t *) (uintptr_t) _U_dyn_info_list_addr (); + for (di = list->first; di; di = di->next) + if (ip >= di->start_ip && ip < di->end_ip) +diff --git a/src/mips/Ginit.c b/src/mips/Ginit.c +index 3df170c..bf7a8f5 100644 +--- a/src/mips/Ginit.c ++++ b/src/mips/Ginit.c +@@ -69,13 +69,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -86,7 +79,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) (intptr_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/ppc32/Ginit.c b/src/ppc32/Ginit.c +index c5312d9..f8d6886 100644 +--- a/src/ppc32/Ginit.c ++++ b/src/ppc32/Ginit.c +@@ -91,9 +91,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -104,7 +101,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/ppc64/Ginit.c b/src/ppc64/Ginit.c +index 4c88cd6..7bfb395 100644 +--- a/src/ppc64/Ginit.c ++++ b/src/ppc64/Ginit.c +@@ -95,9 +95,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -108,7 +105,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/ptrace/_UPT_get_dyn_info_list_addr.c b/src/ptrace/_UPT_get_dyn_info_list_addr.c +index cc5ed04..16671d4 100644 +--- a/src/ptrace/_UPT_get_dyn_info_list_addr.c ++++ b/src/ptrace/_UPT_get_dyn_info_list_addr.c +@@ -71,6 +71,11 @@ get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg, + + #else + ++/* XXX fix me: there is currently no way to locate the dyn-info list ++ by a remote unwinder. On ia64, this is done via a special ++ unwind-table entry. Perhaps something similar can be done with ++ DWARF2 unwind info. */ ++ + static inline int + get_list_addr (unw_addr_space_t as, unw_word_t *dil_addr, void *arg, + int *countp) +diff --git a/src/s390x/Ginit.c b/src/s390x/Ginit.c +index f0886ac..db01743 100644 +--- a/src/s390x/Ginit.c ++++ b/src/s390x/Ginit.c +@@ -50,8 +50,6 @@ static struct unw_addr_space local_addr_space; + + unw_addr_space_t unw_local_addr_space = &local_addr_space; + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- + static inline void * + uc_addr (ucontext_t *uc, int reg) + { +@@ -75,11 +73,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -90,7 +83,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/sh/Ginit.c b/src/sh/Ginit.c +index 52988a7..9fe96d2 100644 +--- a/src/sh/Ginit.c ++++ b/src/sh/Ginit.c +@@ -58,13 +58,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -75,7 +68,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/tilegx/Ginit.c b/src/tilegx/Ginit.c +index 7564a55..925e641 100644 +--- a/src/tilegx/Ginit.c ++++ b/src/tilegx/Ginit.c +@@ -64,13 +64,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -81,7 +74,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) (intptr_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/x86/Ginit.c b/src/x86/Ginit.c +index f6b8dc2..3cec74a 100644 +--- a/src/x86/Ginit.c ++++ b/src/x86/Ginit.c +@@ -54,13 +54,6 @@ tdep_uc_addr (ucontext_t *uc, int reg) + + # endif /* UNW_LOCAL_ONLY */ + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -71,7 +64,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +diff --git a/src/x86_64/Ginit.c b/src/x86_64/Ginit.c +index 6161da6..5c4e426 100644 +--- a/src/x86_64/Ginit.c ++++ b/src/x86_64/Ginit.c +@@ -49,13 +49,6 @@ static struct unw_addr_space local_addr_space; + + unw_addr_space_t unw_local_addr_space = &local_addr_space; + +-HIDDEN unw_dyn_info_list_t _U_dyn_info_list; +- +-/* XXX fix me: there is currently no way to locate the dyn-info list +- by a remote unwinder. On ia64, this is done via a special +- unwind-table entry. Perhaps something similar can be done with +- DWARF2 unwind info. */ +- + static void + put_unwind_info (unw_addr_space_t as, unw_proc_info_t *proc_info, void *arg) + { +@@ -66,7 +59,13 @@ static int + get_dyn_info_list_addr (unw_addr_space_t as, unw_word_t *dyn_info_list_addr, + void *arg) + { +- *dyn_info_list_addr = (unw_word_t) &_U_dyn_info_list; ++#ifndef UNW_LOCAL_ONLY ++# pragma weak _U_dyn_info_list_addr ++ if (!_U_dyn_info_list_addr) ++ return -UNW_ENOINFO; ++#endif ++ // Access the `_U_dyn_info_list` from `LOCAL_ONLY` library, i.e. libunwind.so. ++ *dyn_info_list_addr = _U_dyn_info_list_addr (); + return 0; + } + +-- +2.28.0 + diff --git a/poky/meta/recipes-support/libunwind/libunwind_1.4.0.bb b/poky/meta/recipes-support/libunwind/libunwind_1.4.0.bb index 34c2249e9..2193bd831 100644 --- a/poky/meta/recipes-support/libunwind/libunwind_1.4.0.bb +++ b/poky/meta/recipes-support/libunwind/libunwind_1.4.0.bb @@ -8,6 +8,7 @@ SRC_URI = "http://download.savannah.nongnu.org/releases/libunwind/libunwind-${PV file://0005-ppc32-Consider-ucontext-mismatches-between-glibc-and.patch \ file://0006-Fix-for-X32.patch \ file://sigset_t.patch \ + file://0001-Fix-compilation-with-fno-common.patch \ " SRC_URI_append_libc-musl = " file://musl-header-conflict.patch" diff --git a/poky/meta/recipes-support/popt/popt/disable_tests.patch b/poky/meta/recipes-support/popt/popt/disable_tests.patch deleted file mode 100644 index 016cf66a2..000000000 --- a/poky/meta/recipes-support/popt/popt/disable_tests.patch +++ /dev/null @@ -1,21 +0,0 @@ -Use of $(top_srcdir) in TESTS is an error which causes -automake-1.13 to abort. Just remove tests. - -Upstream-Status: Inappropriate [disable feature] - -Signed-off-by: Marko Lindqvist -diff -Nurd popt-1.16/Makefile.am popt-1.16/Makefile.am ---- popt-1.16/Makefile.am 2010-05-04 23:55:54.000000000 +0300 -+++ popt-1.16/Makefile.am 2013-01-02 13:34:29.540361391 +0200 -@@ -34,11 +34,6 @@ - - noinst_SCRIPTS = testit.sh - --TESTS_ENVIRONMENT = \ --test1="$(top_builddir)/test1" -- --TESTS = $(top_srcdir)/testit.sh -- - include_HEADERS = popt.h - - usrlibdir = $(libdir) diff --git a/poky/meta/recipes-support/popt/popt/pkgconfig_fix.patch b/poky/meta/recipes-support/popt/popt/pkgconfig_fix.patch deleted file mode 100644 index 0bddbf8c9..000000000 --- a/poky/meta/recipes-support/popt/popt/pkgconfig_fix.patch +++ /dev/null @@ -1,15 +0,0 @@ -Upstream-Status: Pending - -Install the pkgconfig file into libdir. - ---- popt-1.16.orig/Makefile.am 2012-04-26 13:42:54.021139813 +0800 -+++ popt-1.16/Makefile.am 2012-04-26 13:36:03.552096912 +0800 -@@ -47,7 +47,7 @@ - libpopt_la_SOURCES = popt.c poptparse.c poptconfig.c popthelp.c poptint.c - libpopt_la_LDFLAGS = -no-undefined @LTLIBINTL@ @LTLIBICONV@ - --pkgconfigdir = $(prefix)/lib/pkgconfig -+pkgconfigdir = $(libdir)/pkgconfig - pkgconfig_DATA = popt.pc - - if HAVE_LD_VERSION_SCRIPT diff --git a/poky/meta/recipes-support/popt/popt/popt_fix_for_automake-1.12.patch b/poky/meta/recipes-support/popt/popt/popt_fix_for_automake-1.12.patch deleted file mode 100644 index 7d74aadff..000000000 --- a/poky/meta/recipes-support/popt/popt/popt_fix_for_automake-1.12.patch +++ /dev/null @@ -1,21 +0,0 @@ -Upstream-Status: pending - -This patch avoids this error with automake 1.12: - -| configure.ac:49: error: automatic de-ANSI-fication support has been removed - -Signed-off-by: Nitin A Kamble -2012/05/02 - -Index: popt-1.16/configure.ac -=================================================================== ---- popt-1.16.orig/configure.ac -+++ popt-1.16/configure.ac -@@ -46,7 +46,6 @@ AC_GCC_TRADITIONAL - AC_SYS_LARGEFILE - - AC_ISC_POSIX --AM_C_PROTOTYPES - - AC_CHECK_HEADERS(float.h fnmatch.h glob.h langinfo.h libintl.h mcheck.h unistd.h) - diff --git a/poky/meta/recipes-support/popt/popt_1.16.bb b/poky/meta/recipes-support/popt/popt_1.16.bb deleted file mode 100644 index 27e49c2ca..000000000 --- a/poky/meta/recipes-support/popt/popt_1.16.bb +++ /dev/null @@ -1,22 +0,0 @@ -SUMMARY = "Library for parsing command line options" -HOMEPAGE = "http://rpm5.org/" -SECTION = "libs" - -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=cb0613c30af2a8249b8dcc67d3edb06d" -PR = "r3" - -DEPENDS = "virtual/libiconv" - -SRC_URI = "http://anduin.linuxfromscratch.org/BLFS/popt/popt-${PV}.tar.gz \ - file://pkgconfig_fix.patch \ - file://popt_fix_for_automake-1.12.patch \ - file://disable_tests.patch \ - " - -SRC_URI[md5sum] = "3743beefa3dd6247a73f8f7a32c14c33" -SRC_URI[sha256sum] = "e728ed296fe9f069a0e005003c3d6b2dde3d9cad453422a10d6558616d304cc8" - -inherit autotools gettext - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/popt/popt_1.18.bb b/poky/meta/recipes-support/popt/popt_1.18.bb new file mode 100644 index 000000000..022ece5f4 --- /dev/null +++ b/poky/meta/recipes-support/popt/popt_1.18.bb @@ -0,0 +1,15 @@ +SUMMARY = "Library for parsing command line options" +HOMEPAGE = "https://www.rpm.org/" +SECTION = "libs" + +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=cb0613c30af2a8249b8dcc67d3edb06d" + +DEPENDS = "virtual/libiconv" + +SRC_URI = "http://ftp.rpm.org/popt/releases/popt-1.x/${BP}.tar.gz" +SRC_URI[sha256sum] = "5159bc03a20b28ce363aa96765f37df99ea4d8850b1ece17d1e6ad5c24fdc5d1" + +inherit autotools gettext + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/re2c/re2c/CVE-2020-11958.patch b/poky/meta/recipes-support/re2c/re2c/CVE-2020-11958.patch deleted file mode 100644 index 43462e642..000000000 --- a/poky/meta/recipes-support/re2c/re2c/CVE-2020-11958.patch +++ /dev/null @@ -1,41 +0,0 @@ -From c4603ba5ce229db83a2a4fb93e6d4b4e3ec3776a Mon Sep 17 00:00:00 2001 -From: Ulya Trofimovich -Date: Fri, 17 Apr 2020 22:47:14 +0100 -Subject: [PATCH] Fix crash in lexer refill (reported by Agostino Sarubbo). - -The crash happened in a rare case of a very long lexeme that doen't fit -into the buffer, forcing buffer reallocation. - -The crash was caused by an incorrect calculation of the shift offset -(it was smaller than necessary). As a consequence, the data from buffer -start and up to the beginning of the current lexeme was not discarded -(as it should have been), resulting in less free space for new data than -expected. - -Upstream-Status: Backport [https://github.com/skvadrik/re2c/commit/c4603ba5ce229db83a2a4fb93e6d4b4e3ec3776a] -CVE: CVE-2020-11958 -Signed-off-by: Lee Chee Yang ---- - src/parse/scanner.cc | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/src/parse/scanner.cc b/src/parse/scanner.cc -index 1d6e9efa..bd651314 100644 ---- a/src/parse/scanner.cc -+++ b/src/parse/scanner.cc -@@ -155,13 +155,14 @@ bool Scanner::fill(size_t need) - if (!buf) fatal("out of memory"); - - memmove(buf, tok, copy); -- shift_ptrs_and_fpos(buf - bot); -+ shift_ptrs_and_fpos(buf - tok); - delete [] bot; - bot = buf; - - free = BSIZE - copy; - } - -+ DASSERT(lim + free <= bot + BSIZE); - if (!read(free)) { - eof = lim; - memset(lim, 0, YYMAXFILL); diff --git a/poky/meta/recipes-support/re2c/re2c_1.3.bb b/poky/meta/recipes-support/re2c/re2c_1.3.bb deleted file mode 100644 index e9053acdf..000000000 --- a/poky/meta/recipes-support/re2c/re2c_1.3.bb +++ /dev/null @@ -1,16 +0,0 @@ -SUMMARY = "Tool for writing very fast and very flexible scanners" -HOMEPAGE = "http://re2c.sourceforge.net/" -AUTHOR = "Marcus Börger " -SECTION = "devel" -LICENSE = "PD" -LIC_FILES_CHKSUM = "file://LICENSE;md5=64eca4d8a3b67f9dc7656094731a2c8d" - -SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.xz \ - file://CVE-2020-11958.patch \ -" -SRC_URI[sha256sum] = "f37f25ff760e90088e7d03d1232002c2c2672646d5844fdf8e0d51a5cd75a503" -UPSTREAM_CHECK_URI = "https://github.com/skvadrik/re2c/releases" - -BBCLASSEXTEND = "native nativesdk" - -inherit autotools diff --git a/poky/meta/recipes-support/re2c/re2c_2.0.bb b/poky/meta/recipes-support/re2c/re2c_2.0.bb new file mode 100644 index 000000000..b73b02407 --- /dev/null +++ b/poky/meta/recipes-support/re2c/re2c_2.0.bb @@ -0,0 +1,14 @@ +SUMMARY = "Tool for writing very fast and very flexible scanners" +HOMEPAGE = "http://re2c.sourceforge.net/" +AUTHOR = "Marcus Börger " +SECTION = "devel" +LICENSE = "PD" +LIC_FILES_CHKSUM = "file://LICENSE;md5=64eca4d8a3b67f9dc7656094731a2c8d" + +SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "89a9d7ee14be10e3779ea7b2c8ea4a964afce6e76b8dbcd5479940681db46d20" +UPSTREAM_CHECK_URI = "https://github.com/skvadrik/re2c/releases" + +BBCLASSEXTEND = "native nativesdk" + +inherit autotools diff --git a/poky/meta/recipes-support/sqlite/sqlite3_3.32.3.bb b/poky/meta/recipes-support/sqlite/sqlite3_3.32.3.bb deleted file mode 100644 index 1d4e8d477..000000000 --- a/poky/meta/recipes-support/sqlite/sqlite3_3.32.3.bb +++ /dev/null @@ -1,11 +0,0 @@ -require sqlite3.inc - -LICENSE = "PD" -LIC_FILES_CHKSUM = "file://sqlite3.h;endline=11;md5=786d3dc581eff03f4fd9e4a77ed00c66" - -SRC_URI = "http://www.sqlite.org/2020/sqlite-autoconf-${SQLITE_PV}.tar.gz" -SRC_URI[md5sum] = "2e3911a3c15e85c2f2d040154bbe5ce3" -SRC_URI[sha256sum] = "a31507123c1c2e3a210afec19525fd7b5bb1e19a6a34ae5b998fbd7302568b66" - -# -19242 is only an issue in specific development branch commits -CVE_CHECK_WHITELIST += "CVE-2019-19242" diff --git a/poky/meta/recipes-support/sqlite/sqlite3_3.33.0.bb b/poky/meta/recipes-support/sqlite/sqlite3_3.33.0.bb new file mode 100644 index 000000000..611a1bd92 --- /dev/null +++ b/poky/meta/recipes-support/sqlite/sqlite3_3.33.0.bb @@ -0,0 +1,10 @@ +require sqlite3.inc + +LICENSE = "PD" +LIC_FILES_CHKSUM = "file://sqlite3.h;endline=11;md5=786d3dc581eff03f4fd9e4a77ed00c66" + +SRC_URI = "http://www.sqlite.org/2020/sqlite-autoconf-${SQLITE_PV}.tar.gz" +SRC_URI[sha256sum] = "106a2c48c7f75a298a7557bcc0d5f4f454e5b43811cc738b7ca294d6956bbb15" + +# -19242 is only an issue in specific development branch commits +CVE_CHECK_WHITELIST += "CVE-2019-19242" diff --git a/poky/scripts/lib/checklayer/__init__.py b/poky/scripts/lib/checklayer/__init__.py index f625d5989..fe545607b 100644 --- a/poky/scripts/lib/checklayer/__init__.py +++ b/poky/scripts/lib/checklayer/__init__.py @@ -229,6 +229,20 @@ def add_layers(bblayersconf, layers, logger): f.write("\nBBLAYERS += \"%s\"\n" % path) return True +def check_bblayers(bblayersconf, layer_path, logger): + ''' + If layer_path found in BBLAYERS return True + ''' + import bb.parse + import bb.data + + ldata = bb.parse.handle(bblayersconf, bb.data.init(), include=True) + for bblayer in (ldata.getVar('BBLAYERS') or '').split(): + if os.path.normpath(bblayer) == os.path.normpath(layer_path): + return True + + return False + def check_command(error_msg, cmd, cwd=None): ''' Run a command under a shell, capture stdout and stderr in a single stream, diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py index 2cfdc10ec..14c172357 100644 --- a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py +++ b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py @@ -13,6 +13,9 @@ import logging import os import shutil +import re + +from glob import glob from wic import WicError from wic.engine import get_custom_config @@ -209,6 +212,57 @@ class BootimgEFIPlugin(SourcePlugin): except KeyError: raise WicError("bootimg-efi requires a loader, none specified") + if get_bitbake_var("IMAGE_BOOT_FILES") is None: + logger.debug('No boot files defined in IMAGE_BOOT_FILES') + else: + boot_files = None + for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)): + if fmt: + var = fmt % id + else: + var = "" + + boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var) + if boot_files: + break + + logger.debug('Boot files: %s', boot_files) + + # list of tuples (src_name, dst_name) + deploy_files = [] + for src_entry in re.findall(r'[\w;\-\./\*]+', boot_files): + if ';' in src_entry: + dst_entry = tuple(src_entry.split(';')) + if not dst_entry[0] or not dst_entry[1]: + raise WicError('Malformed boot file entry: %s' % src_entry) + else: + dst_entry = (src_entry, src_entry) + + logger.debug('Destination entry: %r', dst_entry) + deploy_files.append(dst_entry) + + cls.install_task = []; + for deploy_entry in deploy_files: + src, dst = deploy_entry + if '*' in src: + # by default install files under their basename + entry_name_fn = os.path.basename + if dst != src: + # unless a target name was given, then treat name + # as a directory and append a basename + entry_name_fn = lambda name: \ + os.path.join(dst, + os.path.basename(name)) + + srcs = glob(os.path.join(kernel_dir, src)) + + logger.debug('Globbed sources: %s', ', '.join(srcs)) + for entry in srcs: + src = os.path.relpath(entry, kernel_dir) + entry_dst_name = entry_name_fn(entry) + cls.install_task.append((src, entry_dst_name)) + else: + cls.install_task.append((src, dst)) @classmethod def do_prepare_partition(cls, part, source_params, creator, cr_workdir, @@ -238,6 +292,12 @@ class BootimgEFIPlugin(SourcePlugin): (staging_kernel_dir, kernel, hdddir, kernel) exec_cmd(install_cmd) + if get_bitbake_var("IMAGE_BOOT_FILES"): + for src_path, dst_path in cls.install_task: + install_cmd = "install -m 0644 -D %s %s" \ + % (os.path.join(kernel_dir, src_path), + os.path.join(hdddir, dst_path)) + exec_cmd(install_cmd) try: if source_params['loader'] == 'grub-efi': diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-partition.py b/poky/scripts/lib/wic/plugins/source/bootimg-partition.py index 138986a71..5dbe2558d 100644 --- a/poky/scripts/lib/wic/plugins/source/bootimg-partition.py +++ b/poky/scripts/lib/wic/plugins/source/bootimg-partition.py @@ -141,7 +141,7 @@ class BootimgPartitionPlugin(SourcePlugin): break if not kernel_name: - raise WicError('No kernel file founded') + raise WicError('No kernel file found') # Compose the extlinux.conf extlinux_conf = "default Yocto\n" diff --git a/poky/scripts/runqemu b/poky/scripts/runqemu index f2168c18a..7fb5f7db5 100755 --- a/poky/scripts/runqemu +++ b/poky/scripts/runqemu @@ -478,7 +478,8 @@ class BaseConfig(object): self.qemu_opt_script += ' -display gtk,show-cursor=on' elif arg == 'gl' or arg == 'gl-es': # These args are handled inside sdl or gtk blocks above - pass + if ('gtk' not in sys.argv) and ('sdl' not in sys.argv): + raise RunQemuError('Option %s also needs gtk or sdl option.' % (arg)) elif arg == 'egl-headless': self.set_dri_path() self.qemu_opt_script += ' -vga virtio -display egl-headless,show-cursor=on' @@ -585,10 +586,10 @@ class BaseConfig(object): logger.error("For further help see:") raise RunQemuError(yocto_paravirt_kvm_wiki) - if not os.access(dev_kvm, os.W_OK|os.R_OK): + if not os.access(dev_vhost, os.W_OK|os.R_OK): logger.error("You have no read or write permission on /dev/vhost-net.") logger.error("Please change the ownership of this file as described at:") - raise RunQemuError(yocto_kvm_wiki) + raise RunQemuError(yocto_paravirt_kvm_wiki) def check_fstype(self): """Check and setup FSTYPE""" diff --git a/poky/scripts/yocto-check-layer b/poky/scripts/yocto-check-layer index ca6c79bc8..b7c83c8b5 100755 --- a/poky/scripts/yocto-check-layer +++ b/poky/scripts/yocto-check-layer @@ -24,7 +24,7 @@ import scriptpath scriptpath.add_oe_lib_path() scriptpath.add_bitbake_lib_path() -from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures +from checklayer import LayerType, detect_layers, add_layers, add_layer_dependencies, get_signatures, check_bblayers from oeqa.utils.commands import get_bb_vars PROGNAME = 'yocto-check-layer' @@ -138,6 +138,13 @@ def main(): layer['type'] == LayerType.ERROR_BSP_DISTRO: continue + if check_bblayers(bblayersconf, layer['path'], logger): + logger.info("%s already in %s. To capture initial signatures, layer under test should not present " + "in BBLAYERS. Please remove %s from BBLAYERS." % (layer['name'], bblayersconf, layer['name'])) + results[layer['name']] = None + results_status[layer['name']] = 'SKIPPED (Layer under test should not present in BBLAYERS)' + continue + logger.info('') logger.info("Setting up for %s(%s), %s" % (layer['name'], layer['type'], layer['path'])) -- cgit v1.2.3 From c9f7865a347606a64696048817b0f09d9c3fcd31 Mon Sep 17 00:00:00 2001 From: Andrew Geissler Date: Fri, 18 Sep 2020 14:11:35 -0500 Subject: poky: subtree update:c67f57c09e..c6bc20857c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adrian Freihofer (2): oe-publish-sdk: fix layers init via ssh oe-publish-sdk: add --keep-orig option Alexander Kanavin (68): meta-selftest: correct the virgl test for 5.8 kernels bison: upgrade 3.6.4 -> 3.7.1 util-linux: upgrade 2.35.2 -> 2.36 python3-numpy: upgrade 1.19.0 -> 1.19.1 python3-setuptools: upgrade 49.3.1 -> 49.6.0 rsync: upgrade 3.2.2 -> 3.2.3 util-linux: merge .inc into .bb acpica: upgrade 20200528 -> 20200717 asciidoc: upgrade 9.0.1 -> 9.0.2 cryptodev: upgrade 1.10 -> 1.11 diffoscope: upgrade 153 -> 156 epiphany: upgrade 3.36.3 -> 3.36.4 font-alias: upgrade 1.0.3 -> 1.0.4 gtk+3: upgrade 3.24.21 -> 3.24.22 libcheck: upgrade 0.15.0 -> 0.15.2 libinput: upgrade 1.16.0 -> 1.16.1 libpipeline: upgrade 1.5.2 -> 1.5.3 libx11: upgrade 1.6.9 -> 1.6.11 linux-firmware: upgrade 20200619 -> 20200721 man-pages: upgrade 5.07 -> 5.08 mc: upgrade 4.8.24 -> 4.8.25 mesa: upgrade 20.1.4 -> 20.1.5 piglit: upgrade to latest revision re2c: upgrade 2.0 -> 2.0.2 sysstat: upgrade 12.2.2 -> 12.4.0 vala: upgrade 0.48.7 -> 0.48.9 bootchart2: update 0.14.8 -> 0.14.9 harfbuzz: convert to meson, enable gobject introspection pango: update 1.44.7 -> 1.46.0 boost: update 1.73.0 -> 1.74.0 xev: update 1.2.3 -> 1.2.4 wpebackend-fdo: update 1.6.1 -> 1.7.1 gpgme: update 1.13.1 -> 1.14.0 libpsl: update 0.21.0 -> 0.21.1. gettext: update 0.20.2 -> 0.21 cmake: update 3.17.3 -> 3.18.1 linux-firmware: update 20200721 -> 20200817 meson: update 0.55.0 -> 0.55.1 systemd-boot: bump version to 246.2 json-glib: inherit upstream-version-is-even packagegroup-core-device-devel: remove oeqa/x32lib: rework to use readelf from the host oeqa/multilib: rework to use readelf from the host oeqa/multilib: un-skip the connman test poky.conf: do not install packagegroup-core-device-devel into qemu images glib-2.0: update 2.64.4 -> 2.64.5 cmake: upgrade 3.18.1 -> 3.18.2 libxcrypt: upgrade 4.4.16 -> 4.4.17 debianutils: upgrade 4.11 -> 4.11.1 enchant2: upgrade 2.2.8 -> 2.2.9 harfbuzz: upgrade 2.7.1 -> 2.7.2 libmpc: upgrade 1.1.0 -> 1.2.0 librepo: upgrade 1.12.0 -> 1.12.1 libuv: upgrade 1.38.1 -> 1.39.0 msmtp: upgrade 1.8.11 -> 1.8.12 ninja: upgrade 1.10.0 -> 1.10.1 p11-kit: upgrade 0.23.20 -> 0.23.21 pango: upgrade 1.46.0 -> 1.46.1 re2c: upgrade 2.0.2 -> 2.0.3 resolvconf: upgrade 1.82 -> 1.83 stress-ng: upgrade 0.11.18 -> 0.11.19 gnu-config: update to latest revision nasm: update 2.15.03 -> 2.15.05 libva-utils: fix upstream version check gnupg: update 2.2.21 -> 2.2.22 libx11: update 1.6.11 -> 1.6.12 mesa: update 20.1.5 -> 20.1.6 xserver-xorg: update 1.20.8 -> 1.20.9 Andrey Zhizhikin (1): insane: check for missing update-alternatives inherit Anibal Limon (1): recipes-kernel: linux-firmware add qcom-venus-{5.2,5.4} packages Aníbal Limón (1): recipes-graphics/xorg-xserver: Add patch to fix segfault when probe Armin Kuster (2): bind: update to 9.11.22 ESV core-image-sato: qemumips use 512 mem Bruce Ashfield (30): linux-yocto/5.4: update to v5.4.59 linux-yocto/5.8: update to v5.8.2 yocto-bsp: update to v5.4.56 yocto-bsp: update to v5.4.58 qemu: bump default reference kernel to v5.8 linux-yocto/5.8: fix perf and virtio_scsi warnings linux-yocto-rt/5.8: fix lttng-modules build linux-yocto/5.8: selftests/bpf: Prevent runqslower from racing on building bpftool linux-yocto/5.8: disable CONFIG_NFS_DISABLE_UDP_SUPPORT poky: set preferred version for linux-yocto to be v5.8 poky-tiny: set preferred version to 5.8 poky: add preferred version for linux-yocto-rt linux-yocto/5.8: update to v5.8.3 linux-yocto/5.4: update to v5.4.60 kernel: config cleanups for 5.8+ linux-yocto/5.4: update to v5.4.61 linux-yocto/5.8: update to v5.8.4 linux-yocto/5.8: disable IKHEADERS in default builds kernel-yocto: allow promotion of configuration warnings to errors kernel-yocto: checksum all modifications to available kernel fragments directories lttng-modules/devupstream: bump to latest 2.12 commits linux-yocto-dev: bump to v5.9+ linux-yocto/5.8: update to v5.8.5 kernel-devsrc: account for HOSTCC and HOSTCXX linux-yocto/config: netfilter: Enable nat for ipv4 and ipv6 linux-yocto/5.8: update to v5.8.8 linux-yocto/5.4: update to v5.4.64 linux-yocto/config: configuration warning cleanup linux-yocto/5.8: update to v5.8.9 linux-yocto/5.4: update to v5.4.65 Changhyeok Bae (2): iw: upgrade 5.4 -> 5.8 iputils: upgrade s20190709 -> s20200821 Chris Laplante (12): bitbake: compat.py: remove file since it no longer actually implements anything bitbake: COW: formatting bitbake: COW: migrate test suite into tests/cow cve-update-db-native: add progress handler cve-check/cve-update-db-native: use lockfile to fix usage under multiconfig cve-update-db-native: use context manager for cve_f cve-check: avoid FileNotFoundError if no do_cve_check task has run bitbake: utils: process_profilelog: use context manager bitbake: utils: fix UnboundLocalError when _print_exception raises cve-update-db-native: be less magical about checking whether the cve-check class is enabled cve-update-db-native: move -journal checking into do_fetch cve-update-db-native: remove unused variable Christophe GUIBOUT (1): initramfs-framework: support kernel cmdline with double quotes Denys Dmytriyenko (2): weston: upgrade 8.0.0 -> 9.0.0 cryptodev: bump 1 commit past 1.11 to fix 5.9-rc1+ Diego Sueiro (2): license_image.bbclass: Create symlink to the image license manifest dir license_image.bbclass: Fix symlink to the image license manifest dir creation Douglas Royds (1): tcmode-default: Drop gcc-cross-initial, gcc-crosssdk-initial references Frazer Clews (1): bitbake: lib: fix most undefined code picked up by pylint Geoff Parker (1): systemd-serialgetty: Replace sed quoting using ' with " to allow var expansion Jacob Kroon (1): gcc10: Don't default back to -fcommon Jean-Francois Dagenais (1): bitbake: siggen: clean_basepath: remove recipe full path when virtual:xyz present Jens Rehsack (1): lttng-modules: backport patches from 2.12.x to fix 5.4.64+ and 5.8.9+ builds Joe Slater (1): pseudo: fix renaming to self Jon Mason (4): cortex-m0plus.inc: change file permissions tune-cortexa55.inc: clean-up ARMv8.2a uses tune-cortexa57-cortexa53.inc: add CRC and set march tune-cortexa*: Cleanups Joshua Watt (8): wic: Add 512 Byte alignment to --offset oeqa: runtime_tests: Extra GPG debugging oeqa: sdk: Capture stderr output oeqa: reproducible: Fix test not producing diffs diffoscope: upgrade 156 -> 158 bitbake: bitbake: Add parsing torture test bitbake: cooker: Block SIGINT in worker processes sphinx: dev-manual: Clarify that virtual providers do not apply to runtime dependencies Kai Kang (1): dhcpcd: 9.1.4 -> 9.2.0 Kevin Hao (1): meta-yocto-bsp: Bump to the v5.8 kernel Khairul Rohaizzat Jamaluddin (1): wic/bootimg-efi: IMAGE_EFI_BOOT_FILES variable added to separate bootimg-efi and bootimg-partition Khem Raj (24): gcc-cross-canadian: Install gcc/g++ wrappers for musl uninative: Upgrade to 2.9 packagegroup-core-tools-profile: Disable lttng-modules for riscv64 lttng-modules: Disable on riscv64 kexec-tools: Fix build with -fno-common on ppc lttng-tools: Do not build for riscv64 util-linux: Allow update alternatives for additional apps lttng-tools: lttng-ust works on riscv64 json-glib: Backport a build fix with clang rpcbind: Use update-alternatives for rpcinfo go: Upgrade to 1.15 major release weston-init: Redefine weston service and add socket activation option musl: Upgrade to latest master libucontext: Recognise riscv32 architecture linuxloader.bbclass: Define riscv32 ldso for musl populate_sdk_ext: Do not assume local.conf will always exist weston: plane_add_prop() calls break musl atomic modesetting weston-init: Enable RDP screen share weston-init: Do not use fbdev backend weston-init: Select drm/fbdev backends for qemu machines oeqa/weston: Fix tests to run with systemd core-image-weston: Bump qemu memory to 512M go: Update to 1.15.2 minor release bind: Inherit update-alternatives Mark Hatle (6): package_tar.bbclass: Sync to the other package_* classes kernel.bbclass: Remove do_install[prefunc] no longer needed buildhistory.bbclass: Rework to use read_subpackage_metadata kernel.bbclass: Move away from calling package_get_auto_pr package.bbclass: hash equivalency and pr service bitbake: process.py: Handle SystemExit exception to eliminate backtrace Mark Morton (1): sphinx: test-manual code block, link, and format update Martin Jansa (7): devtool: expand SRC_URI when guessing recipe update mode image-artifact-names: introduce new bbclass and move some variables into it kernel.bbclass: use bash variables like imageType, base_name without {} kernel.bbclass: eliminate (initramfs_)symlink_name variables kernel.bbclass: use camelCase notation for bash variables in do_deploy *-initramfs: don't use .rootfs IMAGE_NAME_SUFFIX bitbake.conf: use ${TCMODE}-${TCLIBC} directory for CACHE Matt Madison (1): image.bbclass: fix REPRODUCIBLE_TIMESTAMP_ROOTFS reference Michael Gloff (2): sysvinit rc: Use PSPLASH_FIFO_DIR for progress fifo sysvinit: Remove ${B} assignment Michael Tretter (1): devtool: deploy-target: Fix size calculation for hard links Ming Liu (2): systemd: split systemd specific udev rules into its own package libubootenv: inherit uboot-config Mingli Yu (3): qemu: always define unknown_lock_type qemu: override DEBUG_BUILD bison: remove the parallel build patch Naveen Saini (1): lib/oe/recipeutils.py: add support for BBFILES_DYNAMIC Nicolas Dechesne (73): linux-libc-headers: kernel headers are installed in STAGING_KERNEL_BUILDDIR bitbake: sphinx: add initial build infrastructure bitbake: sphinx: initial sphinx support bitbake: sphinx: bitbake-user-manual: use builtin sphinx glossary bitbake: sphinx: switch to readthedocs theme bitbake: sphinx: override theme CSS bitbake: sphinx: fixup for links bitbake: sphinx: fix links inside notes bitbake: sphinx: fixes all remaining warnings bitbake: sphinx: Makefile.sphinx: add clean and publish targets bitbake: sphinx: tweak html output a bit bitbake: sphinx: add SPDX headers bitbake: sphinx: index: move the boilerplate at the end of the page bitbake: sphinx: conf: enable extlinks extension bitbake: sphinx: add releases page bitbake: sphinx: bitbake-user-manual: insert additional blank line after title bitbake: sphinx: last manual round of fixes/improvements bitbake: sphinx: update style for important, caution and warnings bitbake: sphinx: remove leading '/' bitbake: sphinx: theme_override: properly set font for verbatim text bitbake: bitbake-user-manual: fix bad links sphinx: add initial build infrastructure sphinx: initial sphinx support sphinx: ref-variables: use builtin sphinx glossary sphinx: overview-manual: add figures sphinx: switch to readthedocs theme sphinx: Add SPDX license headers sphinx: add CSS theme override sphinx: bsp-guide: add figures sphinx: add Yocto project logo sphinx: conf: update copyright sphinx: conf: add substitutions/global variables sphinx: add boilerplate file sphinx: add boilerplate to manuals sphinx: ref-manual: add revision history table sphinx: add a general index sphinx: conf.py: enable sphinx.ext.autosectionlabel sphinx: ref-manual: use builtin glossary for the Terms section sphinx: fix internal links sphinx: ref-manual: fix typo sphinx: fix custom term links sphinx: manual updates for some links sphinx: dev-manual add figures sphinx: kernel-dev: add figures sphinx: profile-manual: add figures sphinx: fix up bold text for informalexample container sphinx: ref-manual: add figures sphinx: sdk-manual: add figures sphinx: test-manual: add figures sphinx: toaster-manual: add figures sphinx: add links for Yocto project website sphinx: fix links when the link text should be displayed sphinx: add links to terms in the BitBake glossary sphinx: add links to section in the Bitbake manual sphinx: setup extlink for docs.yoctoproject.org sphinx: enable intersphinx extension sphinx: insert blank below between title and toc sphinx: fix up terms related to kernel-fitimage sphinx: conf: a few rendering tweaks sphinx: makefile: add publish target sphinx: conf: include CSS/JS files, the proper way sphinx: convert 'what I wish I'd known' sphinx: convert 'transitioning to a custom environment' sphinx: ref-manual: fix heading for oe-init-build-env sphinx: brief-yoctoprojectqs: fix up all remaining rendering issues sphinx: Makefile.sphinx improvements sphinx: convert bsp-guide sphinx: remove leading '/' sphinx: update style for important, caution and warnings sphinx: profile-manual: convert profile-manual sphinx: theme_override: properly set font for verbatim text sphinx: theme_override: add tying-it-together admonition sphinx: conf: exclude adt-manual/*.rst Oleksandr Kravchuk (1): ell: update to 0.33 Ovidiu Panait (1): libxml2: Fix CVE-2020-24977 Peter A. Bigot (2): bluez5: fix builds that require ell support timezone: include leap second data in tzdata-core Peter Bergin (1): systemd: avoid failing if no udev rules provided Pierre-Jean Texier (2): libubootenv: upgrade 0.3 -> 0.3.1 diffoscope: upgrade 158 -> 160 Quentin Schulz (16): sphinx: brief-yoctoprojectqs: remove redundant welcome sphinx: brief-yoctoprojectqs: fix ambiguous note for cyclone5 example sphinx: brief-yoctoprojectqs: add missing boilerplate sphinx: overview-manual: add link to AUH how-to section sphinx: overview-manual: fix bitbake basic explanation sphinx: brief-yoctoprojectqs: add note on branch consistency between layers sphinx: what-i-wish-id-known: update "don't be fooled by doc search results" sphinx: overview-manual: remove highlight in bold section sphinx: replace special quotes with single and double quotes sphinx: fix incorrect indentations sphinx: brief-yoctoprojectqs: put other distros note after Ubuntu-specific packages sphinx: fix a few typos or missing/too many words sphinx: "highlight" some variables, tasks or files sphinx: fix or add missing links and remove mention of Eclipse workflow ref-manual: examples: hello-autotools: upgrade to 2.10 ref-manual: examples: libxpm: add relative path to .inc Rahul Kumar (1): systemd-serialgetty: Fix sed expression quoting Rasmus Villemoes (1): kernel.bbclass: run do_symlink_kernsrc before do_patch Richard Purdie (74): nativesdk-sdk-provides-dummy: Add /bin/sh bitbake: fetch2/wget: Remove buffering parameter bitbake: cooker: Ensure parse_quit thread is closed down bitbake: cooker: Explictly shut down the sync thread bitbake: fetch2: Drop cups.org from wget status checks bitbake: build/msg: Cleanup verbose option handling bitbake: cooker/cookerdata/main: Improve loglevel handling bitbake: cookerdata: Ensure UI options are updated to the server bitbake: cooker/cookerdata: Ensure UI event log is updated from commandline bitbake: cooker: Defer configuration init to after UI connection bitbake: server/process: Move the socket code to server process only bitbake: main/server/process: Drop configuration object passing bitbake: cooker: Ensure BB_ORIGENV is updated by changes to configuration.env bitbake: server/process: Log extra threads at exit bitbake: server/process: Add bitbake-server and exec() a new server process bitbake: runqueue: Don't use sys.argv bitbake: cooker: Ensure cooker's enviroment is updated on updateConfig connman-gnome/matchbox-desktop: Remove file:// globbing selftest/recipetool: Drop globbing SRC_URI test, no longer supported local.conf.sample: Document memory resident bitbake bitbake: fetch2: Drop globbing supprt in file:// SRC_URIs bitbake: server/process: Use sys.executable for bitbake-server bitbake: process: Avoid bb.utils.timeout bitbake: utils: Drop broken timeout function bitbake: server/process: Fix typo in code causing tracebacks oeqa/selftest: Apply patch to fix cpio build with -fno-common runqemu: Show an error for conflicting graphics options lttng: Move platform logic to dedicated inc file patchelf: upgrade 0.11 -> 0.12 build-appliance/packagegroup-core-base-utils: Replace dhcp-client/dhcp-server with dhcpcd/kea selftest/prservice: Improve test failure message iputils: Adapt ${PN}-tftpd package dependency to PACKAGECONFIG bitbake: process/knotty: Improve early exception handling bitbake: cooker/cookerdata: Use BBHandledException, not sys.exit() bitbake: cookerdata: Fix exception raise statements bitbake: process: Avoid printing binary strings for leftover processes bitbake: server/process: Ensure logging is flushed bitbake: server/process: Don't show tracebacks if the lockfile is removed bitbake: cooker: Ensure parser replacement calls parser final_cleanup bitbake: cooker: Assign a name to the sync thread to aid debugging bitbake: server/process: Ensure we don't keep looping if some other server is started bitbake: server/process: Prefix the log data with pid/time information bitbake: server/process: Note when commands complete in logs bitbake: cooker: Ensure parser is cleaned up runqemu: Add a hook to allow it to renice bitbake: cooker: Avoid parser deadlocks bitbake: cooker: Ensure parser worker signal handlers are default selftest/signing: Ensure build path relocation is safe oeqa/concurrencytest: Improve builddir path manipulations bitbake: cooker/command: Fix disconnection handling bitbake: tinfoil: Ensure sockets don't leak even when exceptions occur bitbake: tests/fetch: Move away from problematic freedesktop.org urls bitbake: sphinx: Enhance the sphinx experience/nagivation with: bitbake: sphinx: theme_override: Use bold for emphasis text Revert "qemu: always define unknown_lock_type" Revert "core-image-sato: qemumips use 512 mem" sphinx: Organize top level docs sphinx: releases.rst: Add index/links to docs for previous releases sphinx: boilerplate.rst: Drop versions notes as we have better navigation now sphinx: boilerplate.rst: Sphinx puts the copyright elsewhere sphinx: history: Move revision history to its own section sphinx: manuals: Move boilerplate after toctree sphinx: Add support for multiple docs version sphinx: index.rst: Fix links sphinx: ref-system-requirements: Improve formatting of the notes sections, merging them sphinx: ref-manual links fixes and many other cleanups to import sphinx: dev-manual: Various URL, code block and other fixes to imported data sphinx: sdk-manual: Various URL, code block and other fixes to imported data sphinx: kernel-dev: Various URL, code block and other fixes to imported data sphinx: theme_override: Use bold for emphasis text sphinx: ref-tasks: Add populate_sdk_ext task definition sphinx: ref-manual/migration: Split each release into its own file sphinx: overview-manual: Various URL, code block and other fixes to imported data build-appliance-image: Update to master head revision Robert Yang (3): bitbake: cooker.py: Save prioritized BBFILES to BBFILES_PRIORITIZED bitbake: utils.py: get_file_layer(): Exit the loop when file is matched bitbake: utils.py: get_file_layer(): Improve performance Ross Burton (25): package.bbclass: explode the RPROVIDES so we don't think the versions are provides elfutils: silence a new QA warning insane: improve gnu-hash-style warning gdk-pixbuf: add tests PACKAGECONFIG debianutils: change SRC_URI to use snapshot.debian.org insane: only load real files as ELF autoconf: consolidate SRC_URI autoconf: consolidate DEPENDS kea: no need to depend on kea-native kea: don't use PACKAGECONFIG inappropriately kea: bump to 1.7.10 help2man: rewrite recipe local.conf.sample.extended: remove help2man reference curl: add vendors to CVE_PRODUCT to exclude false positives harfbuzz: update patch status harfbuzz: fix a build race around hb-version.h cmake: whitelist CVE-2016-10642 ncurses: remove config.cache qemu: fix CVE-2020-14364 cve-update-db-native: remove unused import cve-update-db-native: add more logging when fetching cve-update-db-native: use fetch task alsa-plugins: improve .la removal sato-screenshot: improve .la removal buildhistory-diff: use BUILDDIR to know where buildhistory is Saul Wold (1): gnupg: uprev 2.2.22 -> 2.2.23 Stacy Gaikovaia (2): bison: uprev from 3.7.1 to 3.7.2 valgrind: fix memcheck vgtests remove fullpath-after flags Steve Sakoman (1): xinput-calibrator: change SRC_URI to branch with libinput support Sumit Garg (1): insane: fix gnu-hash-style check TeohJayShen (1): oeqa/runtime: add test for matchbox-terminal Tim Orling (1): sphinx: toaster-manual: fix vars, links, code blocks Vijai Kumar K (2): image_types_wic: Add ASSUME_PROVIDED to WICVARS wic: misc: Add /bin to the list of searchpaths Yanfei Xu (1): kernel-yocto: only replace leading -I in include paths Yi Zhao (1): glib-networking: add ptest Zhixiong Chi (1): gnutls: CVE-2020-24659 akuster (8): log4cplus: move meta-oe pkg to core kea: Move from meta-networking maintainers.inc: Add me as kea & log4plus maintainer. dhcpcd: Move from meta-network as OE-Core needs a client maintainers.inc: Add me as dhcpcd maintainer dhcp: remove from core bind: Add 9.16.x bind: 9.11 remove hongxu (1): sysstat: fix installed-vs-shipped QA Issue in systemd zangrc (4): libcap:upgrade 2.42 -> 2.43 libcap-ng:upgrade 0.7.10 -> 0.7.11 libgpg-error:upgrade 1.38 -> 1.39 at-spi2-core:upgrade 2.36.0 -> 2.36.1 Signed-off-by: Andrew Geissler Change-Id: I5542f5eea751a2641342e945725fd687cd74bebe --- poky/bitbake/bin/bitbake-server | 54 + poky/bitbake/bin/bitbake-worker | 4 +- poky/bitbake/contrib/bbparse-torture.py | 89 + poky/bitbake/doc/.gitignore | 1 + poky/bitbake/doc/Makefile.sphinx | 31 + poky/bitbake/doc/_templates/breadcrumbs.html | 14 + poky/bitbake/doc/_templates/layout.html | 7 + .../bitbake-user-manual-execution.rst | 733 ++ .../bitbake-user-manual-fetching.rst | 652 + .../bitbake-user-manual-hello.rst | 415 + .../bitbake-user-manual-intro.rst | 651 + .../bitbake-user-manual-metadata.rst | 1969 ++++ .../bitbake-user-manual-ref-variables.rst | 1372 +++ poky/bitbake/doc/conf.py | 94 + poky/bitbake/doc/genindex.rst | 3 + poky/bitbake/doc/index.rst | 38 + poky/bitbake/doc/releases.rst | 130 + poky/bitbake/doc/sphinx-static/switchers.js | 233 + poky/bitbake/doc/sphinx-static/theme_overrides.css | 164 + poky/bitbake/lib/bb/COW.py | 150 +- poky/bitbake/lib/bb/__init__.py | 2 +- poky/bitbake/lib/bb/build.py | 7 +- poky/bitbake/lib/bb/cache.py | 2 +- poky/bitbake/lib/bb/command.py | 12 +- poky/bitbake/lib/bb/compat.py | 10 - poky/bitbake/lib/bb/cooker.py | 105 +- poky/bitbake/lib/bb/cookerdata.py | 44 +- poky/bitbake/lib/bb/daemonize.py | 2 + poky/bitbake/lib/bb/data_smart.py | 2 +- poky/bitbake/lib/bb/event.py | 16 +- poky/bitbake/lib/bb/fetch2/__init__.py | 12 - poky/bitbake/lib/bb/fetch2/local.py | 15 +- poky/bitbake/lib/bb/fetch2/osc.py | 3 + poky/bitbake/lib/bb/fetch2/ssh.py | 7 +- poky/bitbake/lib/bb/fetch2/wget.py | 5 +- poky/bitbake/lib/bb/main.py | 18 +- poky/bitbake/lib/bb/msg.py | 7 +- poky/bitbake/lib/bb/namedtuple_with_abc.py | 14 +- poky/bitbake/lib/bb/persist_data.py | 8 +- poky/bitbake/lib/bb/process.py | 1 + poky/bitbake/lib/bb/runqueue.py | 13 +- poky/bitbake/lib/bb/server/process.py | 259 +- poky/bitbake/lib/bb/siggen.py | 2 +- poky/bitbake/lib/bb/tests/cow.py | 218 +- poky/bitbake/lib/bb/tests/data.py | 1 + poky/bitbake/lib/bb/tests/event.py | 17 +- poky/bitbake/lib/bb/tests/fetch.py | 14 +- poky/bitbake/lib/bb/tinfoil.py | 38 +- poky/bitbake/lib/bb/ui/knotty.py | 23 +- poky/bitbake/lib/bb/ui/ncurses.py | 2 + poky/bitbake/lib/bb/ui/uievent.py | 6 +- poky/bitbake/lib/bb/utils.py | 62 +- poky/bitbake/lib/bblayers/query.py | 12 +- poky/bitbake/lib/hashserv/tests.py | 1 + poky/bitbake/lib/layerindexlib/__init__.py | 15 +- poky/bitbake/lib/layerindexlib/cooker.py | 7 +- poky/bitbake/lib/layerindexlib/restapi.py | 6 +- poky/bitbake/lib/layerindexlib/tests/restapi.py | 2 +- poky/bitbake/lib/ply/lex.py | 6 +- poky/bitbake/lib/ply/yacc.py | 2 +- .../toaster/tests/functional/functional_helpers.py | 8 +- poky/documentation/.gitignore | 1 + poky/documentation/Makefile.sphinx | 31 + poky/documentation/_templates/breadcrumbs.html | 14 + poky/documentation/_templates/layout.html | 7 + poky/documentation/adt-manual/adt-command.rst | 180 + poky/documentation/adt-manual/adt-intro.rst | 138 + poky/documentation/adt-manual/adt-manual-intro.rst | 24 + poky/documentation/adt-manual/adt-manual.rst | 17 + poky/documentation/adt-manual/adt-package.rst | 70 + poky/documentation/adt-manual/adt-prepare.rst | 752 ++ poky/documentation/adt-manual/adt-prepare.xml | 4 +- poky/documentation/boilerplate.rst | 18 + .../brief-yoctoprojectqs/brief-yoctoprojectqs.rst | 430 + poky/documentation/bsp-guide/bsp-guide.rst | 16 + poky/documentation/bsp-guide/bsp.rst | 1527 +++ poky/documentation/bsp-guide/history.rst | 73 + poky/documentation/conf.py | 121 + .../dev-manual/dev-manual-common-tasks.rst | 11802 +++++++++++++++++++ .../dev-manual/dev-manual-common-tasks.xml | 6 +- poky/documentation/dev-manual/dev-manual-intro.rst | 61 + poky/documentation/dev-manual/dev-manual-qemu.rst | 470 + poky/documentation/dev-manual/dev-manual-qemu.xml | 8 +- poky/documentation/dev-manual/dev-manual-start.rst | 940 ++ poky/documentation/dev-manual/dev-manual.rst | 19 + poky/documentation/dev-manual/history.rst | 67 + .../figures/yp-how-it-works-new-diagram.png | Bin 0 -> 249657 bytes poky/documentation/genindex.rst | 3 + poky/documentation/index.rst | 53 + poky/documentation/kernel-dev/history.rst | 58 + .../kernel-dev/kernel-dev-advanced.rst | 983 ++ .../documentation/kernel-dev/kernel-dev-common.rst | 2078 ++++ .../kernel-dev/kernel-dev-concepts-appx.rst | 426 + .../kernel-dev/kernel-dev-concepts-appx.xml | 2 +- poky/documentation/kernel-dev/kernel-dev-faq.rst | 81 + poky/documentation/kernel-dev/kernel-dev-intro.rst | 183 + .../kernel-dev/kernel-dev-maint-appx.rst | 239 + poky/documentation/kernel-dev/kernel-dev.rst | 21 + poky/documentation/overview-manual/history.rst | 28 + .../overview-manual/overview-manual-concepts.rst | 2185 ++++ .../overview-manual-development-environment.rst | 672 ++ .../overview-manual-development-environment.xml | 12 +- .../overview-manual/overview-manual-intro.rst | 74 + .../overview-manual/overview-manual-yp-intro.rst | 941 ++ .../overview-manual/overview-manual-yp-intro.xml | 12 +- .../overview-manual/overview-manual.rst | 19 + poky/documentation/poky.yaml | 89 + poky/documentation/profile-manual/history.rst | 58 + .../profile-manual/profile-manual-arch.rst | 29 + .../profile-manual/profile-manual-examples.rst | 24 + .../profile-manual/profile-manual-intro.rst | 79 + .../profile-manual/profile-manual-usage.rst | 2624 +++++ .../profile-manual/profile-manual.rst | 19 + .../examples/hello-autotools/hello_2.10.bb | 9 + .../examples/hello-autotools/hello_2.3.bb | 8 - .../ref-manual/examples/libxpm/libxpm_3.5.6.bb | 2 +- poky/documentation/ref-manual/faq.rst | 451 + poky/documentation/ref-manual/faq.xml | 2 +- poky/documentation/ref-manual/history.rst | 74 + poky/documentation/ref-manual/migration-1.3.rst | 195 + poky/documentation/ref-manual/migration-1.4.rst | 237 + poky/documentation/ref-manual/migration-1.5.rst | 355 + poky/documentation/ref-manual/migration-1.6.rst | 417 + poky/documentation/ref-manual/migration-1.7.rst | 225 + poky/documentation/ref-manual/migration-1.8.rst | 183 + poky/documentation/ref-manual/migration-2.0.rst | 281 + poky/documentation/ref-manual/migration-2.1.rst | 434 + poky/documentation/ref-manual/migration-2.2.rst | 451 + poky/documentation/ref-manual/migration-2.3.rst | 530 + poky/documentation/ref-manual/migration-2.4.rst | 327 + poky/documentation/ref-manual/migration-2.5.rst | 310 + poky/documentation/ref-manual/migration-2.6.rst | 476 + poky/documentation/ref-manual/migration-2.7.rst | 180 + poky/documentation/ref-manual/migration-3.0.rst | 321 + poky/documentation/ref-manual/migration-3.1.rst | 276 + .../documentation/ref-manual/migration-general.rst | 54 + poky/documentation/ref-manual/migration.rst | 30 + poky/documentation/ref-manual/ref-classes.rst | 2963 +++++ .../ref-manual/ref-devtool-reference.rst | 625 + poky/documentation/ref-manual/ref-features.rst | 353 + poky/documentation/ref-manual/ref-images.rst | 139 + poky/documentation/ref-manual/ref-images.xml | 4 +- poky/documentation/ref-manual/ref-kickstart.rst | 212 + poky/documentation/ref-manual/ref-manual.rst | 31 + poky/documentation/ref-manual/ref-qa-checks.rst | 533 + .../ref-manual/ref-release-process.rst | 193 + poky/documentation/ref-manual/ref-structure.rst | 890 ++ .../ref-manual/ref-system-requirements.rst | 437 + poky/documentation/ref-manual/ref-tasks.rst | 875 ++ poky/documentation/ref-manual/ref-terms.rst | 397 + poky/documentation/ref-manual/ref-terms.xml | 2 +- poky/documentation/ref-manual/ref-variables.rst | 8899 ++++++++++++++ poky/documentation/ref-manual/ref-varlocality.rst | 166 + poky/documentation/ref-manual/resources.rst | 197 + poky/documentation/releases.rst | 188 + poky/documentation/sdk-manual/history.rst | 40 + .../sdk-appendix-customizing-standard.rst | 34 + .../sdk-manual/sdk-appendix-customizing.rst | 377 + .../sdk-manual/sdk-appendix-obtain.rst | 321 + poky/documentation/sdk-manual/sdk-extensible.rst | 1356 +++ poky/documentation/sdk-manual/sdk-intro.rst | 231 + poky/documentation/sdk-manual/sdk-manual.rst | 22 + poky/documentation/sdk-manual/sdk-using.rst | 159 + .../sdk-manual/sdk-working-projects.rst | 423 + .../sphinx-static/YoctoProject_Logo_RGB.jpg | Bin 0 -> 49299 bytes poky/documentation/sphinx-static/switchers.js | 233 + .../sphinx-static/theme_overrides.css | 166 + poky/documentation/sphinx/yocto-vars.py | 38 + poky/documentation/test-manual/history.rst | 16 + .../test-manual/test-manual-intro.rst | 550 + .../test-manual/test-manual-intro.xml | 6 +- .../test-manual/test-manual-test-process.rst | 103 + .../test-manual-understand-autobuilder.rst | 305 + .../test-manual-understand-autobuilder.xml | 16 +- poky/documentation/test-manual/test-manual.rst | 18 + poky/documentation/toaster-manual/history.rst | 46 + .../toaster-manual/toaster-manual-intro.rst | 105 + .../toaster-manual/toaster-manual-reference.rst | 662 ++ .../toaster-manual-setup-and-use.rst | 651 + .../toaster-manual-setup-and-use.xml | 12 +- .../toaster-manual/toaster-manual-start.rst | 57 + .../toaster-manual/toaster-manual.rst | 19 + .../transitioning-to-a-custom-environment.rst | 116 + poky/documentation/what-i-wish-id-known.rst | 226 + poky/meta-poky/conf/distro/poky-tiny.conf | 2 +- poky/meta-poky/conf/distro/poky.conf | 11 +- poky/meta-poky/conf/local.conf.sample | 10 + poky/meta-poky/conf/local.conf.sample.extended | 3 - poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py | 2 +- .../files/selftest-replaceme-src-globfile | 1 - .../recipetool/selftest-recipetool-appendfile.bb | 2 - .../conf/machine/beaglebone-yocto.conf | 2 +- poky/meta-yocto-bsp/conf/machine/edgerouter.conf | 2 +- .../recipes-kernel/linux/linux-yocto_5.4.bbappend | 16 +- .../recipes-kernel/linux/linux-yocto_5.8.bbappend | 23 + poky/meta/classes/buildhistory.bbclass | 51 +- poky/meta/classes/cve-check.bbclass | 16 +- poky/meta/classes/image-artifact-names.bbclass | 15 + poky/meta/classes/image-live.bbclass | 2 +- poky/meta/classes/image.bbclass | 2 +- poky/meta/classes/image_types.bbclass | 9 +- poky/meta/classes/image_types_wic.bbclass | 5 +- poky/meta/classes/insane.bbclass | 27 +- poky/meta/classes/kernel-artifact-names.bbclass | 8 + poky/meta/classes/kernel-yocto.bbclass | 31 +- poky/meta/classes/kernel.bbclass | 26 +- poky/meta/classes/license_image.bbclass | 11 + poky/meta/classes/linuxloader.bbclass | 2 + poky/meta/classes/nopackages.bbclass | 1 + poky/meta/classes/package.bbclass | 60 +- poky/meta/classes/package_rpm.bbclass | 2 +- poky/meta/classes/package_tar.bbclass | 6 +- poky/meta/classes/populate_sdk_ext.bbclass | 5 +- poky/meta/classes/qemuboot.bbclass | 2 + poky/meta/classes/rootfs-postcommands.bbclass | 2 + poky/meta/classes/testimage.bbclass | 2 + poky/meta/conf/bitbake.conf | 11 +- poky/meta/conf/distro/defaultsetup.conf | 5 - poky/meta/conf/distro/include/distro_alias.inc | 4 - poky/meta/conf/distro/include/maintainers.inc | 6 +- poky/meta/conf/distro/include/tcmode-default.inc | 4 +- poky/meta/conf/distro/include/yocto-uninative.inc | 10 +- poky/meta/conf/layer.conf | 2 +- .../conf/machine/include/tune-cortex-m0plus.inc | 0 poky/meta/conf/machine/include/tune-cortexa32.inc | 2 +- poky/meta/conf/machine/include/tune-cortexa35.inc | 2 +- poky/meta/conf/machine/include/tune-cortexa53.inc | 2 +- poky/meta/conf/machine/include/tune-cortexa55.inc | 4 +- .../machine/include/tune-cortexa57-cortexa53.inc | 4 +- poky/meta/conf/machine/include/tune-cortexa57.inc | 2 +- .../machine/include/tune-cortexa72-cortexa53.inc | 2 +- .../machine/include/tune-cortexa73-cortexa53.inc | 2 +- poky/meta/conf/machine/include/x86-base.inc | 2 +- poky/meta/conf/machine/qemuarmv5.conf | 2 +- poky/meta/lib/oe/recipeutils.py | 17 + poky/meta/lib/oeqa/runtime/cases/multilib.py | 11 +- poky/meta/lib/oeqa/runtime/cases/terminal.py | 18 + poky/meta/lib/oeqa/runtime/cases/weston.py | 12 +- poky/meta/lib/oeqa/runtime/cases/x32lib.py | 17 +- poky/meta/lib/oeqa/sdk/case.py | 2 +- poky/meta/lib/oeqa/sdk/cases/assimp.py | 2 +- poky/meta/lib/oeqa/sdk/cases/buildcpio.py | 2 +- poky/meta/lib/oeqa/sdk/cases/buildepoxy.py | 2 +- poky/meta/lib/oeqa/sdk/cases/buildgalculator.py | 2 +- poky/meta/lib/oeqa/sdk/cases/buildlzip.py | 2 +- .../lib/oeqa/selftest/cases/diffoscope/A/file.txt | 1 + .../lib/oeqa/selftest/cases/diffoscope/B/file.txt | 1 + poky/meta/lib/oeqa/selftest/cases/meta_ide.py | 2 +- poky/meta/lib/oeqa/selftest/cases/prservice.py | 8 +- poky/meta/lib/oeqa/selftest/cases/recipetool.py | 13 - poky/meta/lib/oeqa/selftest/cases/reproducible.py | 28 +- poky/meta/lib/oeqa/selftest/cases/runtime_test.py | 2 +- poky/meta/lib/oeqa/selftest/cases/signing.py | 4 +- poky/meta/lib/oeqa/selftest/cases/wic.py | 37 +- poky/meta/lib/oeqa/selftest/context.py | 4 +- poky/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb | 30 + poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb | 30 - .../0001-avoid-start-failure-with-bind-user.patch | 27 + ...lwresd-V-and-start-log-hide-build-options.patch | 35 + ...-searching-for-json-headers-searches-sysr.patch | 47 + .../recipes-connectivity/bind/bind-9.16.5/bind9 | 2 + .../bind/bind-9.16.5/conf.patch | 330 + .../bind/bind-9.16.5/generate-rndc-key.sh | 8 + .../init.d-add-support-for-read-only-rootfs.patch | 65 + .../make-etc-initd-bind-stop-work.patch | 42 + .../bind/bind-9.16.5/named.service | 22 + .../0001-avoid-start-failure-with-bind-user.patch | 27 - ...igure.in-remove-useless-L-use_openssl-lib.patch | 30 - ...lwresd-V-and-start-log-hide-build-options.patch | 34 - ...-searching-for-json-headers-searches-sysr.patch | 47 - poky/meta/recipes-connectivity/bind/bind/bind9 | 2 - .../meta/recipes-connectivity/bind/bind/conf.patch | 330 - .../bind/bind/generate-rndc-key.sh | 8 - .../init.d-add-support-for-read-only-rootfs.patch | 65 - .../bind/bind/make-etc-initd-bind-stop-work.patch | 42 - .../recipes-connectivity/bind/bind/named.service | 22 - .../meta/recipes-connectivity/bind/bind_9.11.21.bb | 140 - poky/meta/recipes-connectivity/bind/bind_9.16.5.bb | 123 + poky/meta/recipes-connectivity/bluez5/bluez5.inc | 4 +- .../connman/connman-gnome_0.7.bb | 2 +- poky/meta/recipes-connectivity/dhcp/dhcp.inc | 149 - ...o-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch | 27 - ...sybox-limitation-in-linux-dhclient-script.patch | 65 - .../dhcp/dhcp/0002-dhclient-dbus.patch | 117 - .../dhcp/dhcp/0003-link-with-lcrypto.patch | 35 - .../dhcp/dhcp/0004-Fix-out-of-tree-builds.patch | 95 - ...-fix-invoke-dhclient-script-failed-on-Rea.patch | 36 - ...re-argument-to-make-the-libxml2-dependenc.patch | 62 - ...09-remove-dhclient-script-bash-dependency.patch | 28 - ...correct-the-intention-for-xml2-lib-search.patch | 34 - .../dhcp/dhcp/0013-fixup_use_libbind.patch | 64 - poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb | 23 - .../recipes-connectivity/dhcp/files/default-relay | 12 - .../recipes-connectivity/dhcp/files/default-server | 7 - .../dhcp/files/dhclient-systemd-wrapper | 39 - .../recipes-connectivity/dhcp/files/dhclient.conf | 50 - .../dhcp/files/dhclient.service | 13 - .../recipes-connectivity/dhcp/files/dhcpd.conf | 108 - .../recipes-connectivity/dhcp/files/dhcpd.service | 15 - .../recipes-connectivity/dhcp/files/dhcpd6.service | 15 - .../dhcp/files/dhcrelay.service | 10 - .../recipes-connectivity/dhcp/files/init-relay | 44 - .../recipes-connectivity/dhcp/files/init-server | 44 - .../recipes-connectivity/dhcpcd/dhcpcd_9.2.0.bb | 39 + ...remove-INCLUDEDIR-to-prevent-build-issues.patch | 45 + .../dhcpcd/files/dhcpcd.service | 10 + .../dhcpcd/files/dhcpcd@.service | 15 + poky/meta/recipes-connectivity/iw/iw_5.4.bb | 32 - poky/meta/recipes-connectivity/iw/iw_5.8.bb | 32 + .../kea/files/kea-dhcp-ddns.service | 13 + .../kea/files/kea-dhcp4.service | 13 + .../kea/files/kea-dhcp6.service | 13 + poky/meta/recipes-connectivity/kea/kea_1.7.10.bb | 59 + .../recipes-connectivity/libuv/libuv_1.38.1.bb | 19 - .../recipes-connectivity/libuv/libuv_1.39.0.bb | 19 + .../resolvconf/resolvconf_1.82.bb | 68 - .../resolvconf/resolvconf_1.83.bb | 68 + poky/meta/recipes-core/ell/ell_0.32.bb | 21 - poky/meta/recipes-core/ell/ell_0.33.bb | 21 + ...1-init-env.in-do-not-add-C-CXX-parameters.patch | 29 - .../0001-tests-autopoint-3-unset-MAKEFLAGS.patch | 26 - .../gettext-0.20.2/add-with-bisonlocaledir.patch | 58 - .../gettext/gettext-0.20.2/parallel.patch | 32 - .../recipes-core/gettext/gettext-0.20.2/run-ptest | 6 - .../gettext-0.20.2/serial-tests-config.patch | 56 - .../gettext/gettext-0.20.2/use-pkgconfig.patch | 699 -- ...1-init-env.in-do-not-add-C-CXX-parameters.patch | 29 + .../0001-tests-autopoint-3-unset-MAKEFLAGS.patch | 26 + .../recipes-core/gettext/gettext-0.21/mingw.patch | 28 + .../gettext/gettext-0.21/parallel.patch | 32 + .../recipes-core/gettext/gettext-0.21/run-ptest | 6 + .../gettext/gettext-0.21/serial-tests-config.patch | 56 + .../gettext/gettext-0.21/use-pkgconfig.patch | 699 ++ .../gettext/gettext-minimal-0.20.2/COPYING | 4 - .../gettext/gettext-minimal-0.20.2/Makefile.in.in | 505 - .../gettext-minimal-0.20.2/aclocal/gettext.m4 | 386 - .../aclocal/host-cpu-c-abi.m4 | 675 -- .../gettext-minimal-0.20.2/aclocal/iconv.m4 | 288 - .../gettext-minimal-0.20.2/aclocal/intlmacosx.m4 | 65 - .../gettext-minimal-0.20.2/aclocal/lib-ld.m4 | 168 - .../gettext-minimal-0.20.2/aclocal/lib-link.m4 | 800 -- .../gettext-minimal-0.20.2/aclocal/lib-prefix.m4 | 320 - .../gettext/gettext-minimal-0.20.2/aclocal/nls.m4 | 32 - .../gettext/gettext-minimal-0.20.2/aclocal/po.m4 | 450 - .../gettext-minimal-0.20.2/aclocal/progtest.m4 | 91 - .../gettext/gettext-minimal-0.20.2/config.rpath | 684 -- .../gettext-minimal-0.20.2/remove-potcdate.sin | 25 - .../gettext/gettext-minimal-0.21/COPYING | 4 + .../gettext/gettext-minimal-0.21/Makefile.in.in | 510 + .../gettext-minimal-0.21/aclocal/gettext.m4 | 386 + .../gettext-minimal-0.21/aclocal/host-cpu-c-abi.m4 | 675 ++ .../gettext/gettext-minimal-0.21/aclocal/iconv.m4 | 288 + .../gettext-minimal-0.21/aclocal/intlmacosx.m4 | 65 + .../gettext/gettext-minimal-0.21/aclocal/lib-ld.m4 | 168 + .../gettext-minimal-0.21/aclocal/lib-link.m4 | 800 ++ .../gettext-minimal-0.21/aclocal/lib-prefix.m4 | 320 + .../gettext/gettext-minimal-0.21/aclocal/nls.m4 | 32 + .../gettext/gettext-minimal-0.21/aclocal/po.m4 | 450 + .../gettext-minimal-0.21/aclocal/progtest.m4 | 91 + .../gettext/gettext-minimal-0.21/config.rpath | 684 ++ .../gettext-minimal-0.21/remove-potcdate.sin | 25 + .../gettext/gettext-minimal-native_0.20.2.bb | 30 - .../gettext/gettext-minimal-native_0.21.bb | 30 + poky/meta/recipes-core/gettext/gettext_0.20.2.bb | 210 - poky/meta/recipes-core/gettext/gettext_0.21.bb | 209 + poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.4.bb | 50 - poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.5.bb | 49 + .../glib-networking/glib-networking/run-ptest | 3 + .../glib-networking/glib-networking_2.64.3.bb | 7 +- .../images/build-appliance-image_15.0.0.bb | 4 +- .../images/core-image-minimal-initramfs.bb | 1 + .../images/core-image-tiny-initramfs.bb | 1 + .../initrdscripts/initramfs-framework/init | 13 + .../libxcrypt/libxcrypt-compat_4.4.16.bb | 18 - .../libxcrypt/libxcrypt-compat_4.4.17.bb | 18 + poky/meta/recipes-core/libxcrypt/libxcrypt.inc | 8 +- .../recipes-core/libxcrypt/libxcrypt_4.4.16.bb | 2 - .../recipes-core/libxcrypt/libxcrypt_4.4.17.bb | 2 + .../libxml/libxml2/CVE-2020-24977.patch | 41 + poky/meta/recipes-core/libxml/libxml2_2.9.10.bb | 1 + poky/meta/recipes-core/meta/buildtools-tarball.bb | 1 + .../meta/recipes-core/meta/cve-update-db-native.bb | 125 +- .../meta/nativesdk-sdk-provides-dummy.bb | 2 +- poky/meta/recipes-core/meta/testexport-tarball.bb | 2 +- poky/meta/recipes-core/meta/uninative-tarball.bb | 1 + poky/meta/recipes-core/musl/libucontext_git.bb | 1 + poky/meta/recipes-core/musl/musl_git.bb | 2 +- poky/meta/recipes-core/ncurses/files/config.cache | 4 - poky/meta/recipes-core/ncurses/ncurses.inc | 3 +- poky/meta/recipes-core/ncurses/ncurses_6.2.bb | 3 +- .../packagegroup-core-tools-profile.bb | 9 - .../recipes-core/systemd/systemd-boot_246.1.bb | 70 - .../recipes-core/systemd/systemd-boot_246.2.bb | 70 + .../recipes-core/systemd/systemd-serialgetty.bb | 4 +- poky/meta/recipes-core/systemd/systemd_246.2.bb | 18 +- poky/meta/recipes-core/sysvinit/sysvinit/rc | 9 +- poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb | 1 - poky/meta/recipes-core/util-linux/util-linux.inc | 348 - .../0001-include-cleanup-pidfd-inckudes.patch | 42 - .../recipes-core/util-linux/util-linux_2.35.2.bb | 13 - .../recipes-core/util-linux/util-linux_2.36.bb | 359 + poky/meta/recipes-devtools/autoconf/autoconf.inc | 10 +- .../recipes-devtools/autoconf/autoconf_2.69.bb | 25 +- .../bison/0001-bison-fix-the-parallel-build.patch | 63 - poky/meta/recipes-devtools/bison/bison_3.6.4.bb | 44 - poky/meta/recipes-devtools/bison/bison_3.7.2.bb | 37 + .../bootchart2/bootchart2_0.14.8.bb | 161 - .../bootchart2/bootchart2_0.14.9.bb | 160 + .../recipes-devtools/cmake/cmake-native_3.17.3.bb | 53 - .../recipes-devtools/cmake/cmake-native_3.18.2.bb | 53 + poky/meta/recipes-devtools/cmake/cmake.inc | 6 +- ...ineSystem-use-oe-environment-vars-to-load.patch | 4 +- .../0002-cmake-Prevent-the-detection-of-Qt5.patch | 10 +- ...upport-OpenEmbedded-Qt4-tool-binary-names.patch | 2 +- ...ently-if-system-Qt-installation-is-broken.patch | 2 +- ...-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch | 15 +- poky/meta/recipes-devtools/cmake/cmake_3.17.3.bb | 53 - poky/meta/recipes-devtools/cmake/cmake_3.18.2.bb | 53 + .../recipes-devtools/elfutils/elfutils_0.180.bb | 2 + .../recipes-devtools/gcc/gcc-configure-common.inc | 1 - .../recipes-devtools/gcc/gcc-cross-canadian.inc | 23 + .../recipes-devtools/gnu-config/gnu-config_git.bb | 4 +- poky/meta/recipes-devtools/go/go-1.14.inc | 21 - ...1-allow-CC-and-CXX-to-have-multiple-words.patch | 33 - ...-content-based-hash-generation-less-pedan.patch | 219 - ...OLDIR-to-be-overridden-in-the-environment.patch | 47 - .../0004-ld-add-soname-to-shareable-objects.patch | 45 - ...verride-CC-when-building-dist-and-go_boot.patch | 39 - ...-cmd-dist-separate-host-and-target-builds.patch | 274 - ...07-cmd-go-make-GOROOT-precious-by-default.patch | 104 - .../0008-use-GOBUILDMODE-to-set-buildmode.patch | 42 - ...ld-replace-glibc-dynamic-linker-with-musl.patch | 115 - poky/meta/recipes-devtools/go/go-1.15.inc | 20 + ...1-allow-CC-and-CXX-to-have-multiple-words.patch | 33 + ...-content-based-hash-generation-less-pedan.patch | 219 + ...OLDIR-to-be-overridden-in-the-environment.patch | 47 + .../0004-ld-add-soname-to-shareable-objects.patch | 45 + ...verride-CC-when-building-dist-and-go_boot.patch | 39 + ...-cmd-dist-separate-host-and-target-builds.patch | 274 + ...07-cmd-go-make-GOROOT-precious-by-default.patch | 104 + .../0008-use-GOBUILDMODE-to-set-buildmode.patch | 42 + .../recipes-devtools/go/go-binary-native_1.14.7.bb | 46 - .../recipes-devtools/go/go-binary-native_1.15.2.bb | 46 + .../recipes-devtools/go/go-cross-canadian_1.14.bb | 2 - .../recipes-devtools/go/go-cross-canadian_1.15.bb | 2 + poky/meta/recipes-devtools/go/go-cross_1.14.bb | 2 - poky/meta/recipes-devtools/go/go-cross_1.15.bb | 2 + poky/meta/recipes-devtools/go/go-crosssdk_1.14.bb | 2 - poky/meta/recipes-devtools/go/go-crosssdk_1.15.bb | 2 + poky/meta/recipes-devtools/go/go-native_1.14.bb | 60 - poky/meta/recipes-devtools/go/go-native_1.15.bb | 59 + poky/meta/recipes-devtools/go/go-runtime_1.14.bb | 3 - poky/meta/recipes-devtools/go/go-runtime_1.15.bb | 3 + poky/meta/recipes-devtools/go/go_1.14.bb | 14 - poky/meta/recipes-devtools/go/go_1.15.bb | 15 + .../help2man/help2man-native_1.47.15.bb | 23 - .../recipes-devtools/help2man/help2man_1.47.15.bb | 22 + .../recipes-devtools/librepo/librepo_1.12.0.bb | 27 - .../recipes-devtools/librepo/librepo_1.12.1.bb | 27 + .../recipes-devtools/log4cplus/log4cplus_2.0.5.bb | 19 + poky/meta/recipes-devtools/meson/meson.inc | 2 +- .../0001-Make-CPU-family-warnings-fatal.patch | 7 +- ...02-Support-building-allarch-recipes-again.patch | 4 +- .../meson/meson/0003-native_bindir.patch | 28 +- poky/meta/recipes-devtools/meson/meson_0.55.0.bb | 4 - poky/meta/recipes-devtools/meson/meson_0.55.1.bb | 4 + .../meson/nativesdk-meson_0.55.0.bb | 65 - .../meson/nativesdk-meson_0.55.1.bb | 65 + .../nasm/0002-Add-debug-prefix-map-option.patch | 42 +- poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb | 21 - poky/meta/recipes-devtools/nasm/nasm_2.15.05.bb | 21 + poky/meta/recipes-devtools/ninja/ninja_1.10.0.bb | 30 - poky/meta/recipes-devtools/ninja/ninja_1.10.1.bb | 30 + .../packagegroup-core-device-devel.bb | 16 - .../recipes-devtools/patchelf/patchelf_0.11.bb | 16 - .../recipes-devtools/patchelf/patchelf_0.12.bb | 16 + poky/meta/recipes-devtools/pseudo/pseudo_git.bb | 2 +- .../recipes-devtools/python-numpy/python-numpy.inc | 2 +- .../python-numpy/python3-numpy_1.19.0.bb | 3 - .../python-numpy/python3-numpy_1.19.1.bb | 3 + ...ionally-do-not-fetch-code-by-easy_install.patch | 8 +- .../python/python3-setuptools_49.3.1.bb | 65 - .../python/python3-setuptools_49.6.0.bb | 65 + poky/meta/recipes-devtools/qemu/qemu.inc | 11 +- .../qemu/qemu/usb-fix-setup_len-init.patch | 89 + poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb | 5 - poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb | 58 - poky/meta/recipes-devtools/rsync/rsync_3.2.3.bb | 58 + poky/meta/recipes-devtools/vala/vala_0.48.7.bb | 5 - poky/meta/recipes-devtools/vala/vala_0.48.9.bb | 5 + ...1-adjust-path-filter-for-2-memcheck-tests.patch | 40 - ...check-vgtests-remove-fullpath-after-flags.patch | 42 + .../recipes-devtools/valgrind/valgrind_3.16.1.bb | 16 +- .../recipes-extended/acpica/acpica_20200528.bb | 49 - .../recipes-extended/acpica/acpica_20200717.bb | 49 + .../recipes-extended/asciidoc/asciidoc_9.0.1.bb | 31 - .../recipes-extended/asciidoc/asciidoc_9.0.2.bb | 31 + .../images/core-image-testmaster-initramfs.bb | 1 + .../0001-iputils-Initialize-libgcrypt.patch | 55 - ...ge-variable-name-to-avoid-colliding-with-.patch | 51 - ...infod-fix-systemd-Documentation-url-error.patch | 28 - .../recipes-extended/iputils/iputils_s20190709.bb | 74 - .../recipes-extended/iputils/iputils_s20200821.bb | 72 + .../libpipeline/libpipeline_1.5.2.bb | 15 - .../libpipeline/libpipeline_1.5.3.bb | 14 + .../recipes-extended/man-pages/man-pages_5.07.bb | 37 - .../recipes-extended/man-pages/man-pages_5.08.bb | 36 + ...-misc-Makefile.am-install-mc.lib-only-onc.patch | 59 - poky/meta/recipes-extended/mc/mc_4.8.24.bb | 55 - poky/meta/recipes-extended/mc/mc_4.8.25.bb | 53 + poky/meta/recipes-extended/msmtp/msmtp_1.8.11.bb | 27 - poky/meta/recipes-extended/msmtp/msmtp_1.8.12.bb | 27 + .../packagegroups/packagegroup-core-base-utils.bb | 4 +- .../meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb | 5 +- .../stress-ng/stress-ng_0.11.18.bb | 27 - .../stress-ng/stress-ng_0.11.19.bb | 27 + poky/meta/recipes-extended/sysstat/sysstat.inc | 2 +- .../recipes-extended/sysstat/sysstat_12.2.2.bb | 8 - .../recipes-extended/sysstat/sysstat_12.4.0.bb | 7 + poky/meta/recipes-extended/timezone/tzdata.bb | 4 + .../meta/recipes-gnome/epiphany/epiphany_3.36.3.bb | 20 - .../meta/recipes-gnome/epiphany/epiphany_3.36.4.bb | 20 + .../recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb | 8 +- poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb | 19 - poky/meta/recipes-gnome/gtk+/gtk+3_3.24.22.bb | 18 + ...-macro-instead-of-cast-to-convert-pointer.patch | 33 + .../recipes-gnome/json-glib/json-glib_1.4.4.bb | 6 +- ...not-disable-introspection-in-cross-builds.patch | 27 + ...ect-enums.cc.tmpl-write-out-only-the-file.patch | 28 + .../harfbuzz/harfbuzz/version-race.patch | 121 + .../recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb | 43 - .../recipes-graphics/harfbuzz/harfbuzz_2.7.2.bb | 52 + .../recipes-graphics/images/core-image-weston.bb | 2 + .../recipes-graphics/libva/libva-utils_2.8.0.bb | 2 +- poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb | 15 - poky/meta/recipes-graphics/mesa/mesa-gl_20.1.6.bb | 15 + poky/meta/recipes-graphics/mesa/mesa.inc | 2 +- poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb | 2 - poky/meta/recipes-graphics/mesa/mesa_20.1.6.bb | 2 + .../pango/0001-Fix-build-reproducibility.patch | 31 - poky/meta/recipes-graphics/pango/pango_1.44.7.bb | 49 - poky/meta/recipes-graphics/pango/pango_1.46.1.bb | 47 + poky/meta/recipes-graphics/piglit/piglit_git.bb | 2 +- .../recipes-graphics/wayland/libinput_1.16.0.bb | 50 - .../recipes-graphics/wayland/libinput_1.16.1.bb | 49 + poky/meta/recipes-graphics/wayland/weston-init.bb | 25 +- .../wayland/weston-init/qemuall/weston.ini | 2 - .../wayland/weston-init/qemux86-64/weston.ini | 0 .../wayland/weston-init/qemux86/weston.ini | 0 .../wayland/weston-init/weston-autologin | 11 + .../wayland/weston-init/weston.ini | 4 +- .../wayland/weston-init/weston@.service | 69 +- .../wayland/weston-init/weston@.socket | 10 + ...de-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch | 47 + ...ch-Provide-a-default-version-that-doesn-t.patch | 27 +- .../wayland/weston/dont-use-plane-add-prop.patch | 23 + poky/meta/recipes-graphics/wayland/weston_8.0.0.bb | 128 - poky/meta/recipes-graphics/wayland/weston_9.0.0.bb | 131 + .../xinput-calibrator/xinput-calibrator_git.bb | 4 +- .../recipes-graphics/xorg-app/xev/diet-x11.patch | 35 +- poky/meta/recipes-graphics/xorg-app/xev_1.2.3.bb | 18 - poky/meta/recipes-graphics/xorg-app/xev_1.2.4.bb | 17 + .../xorg-font/font-alias-1.0.3/nocompiler.patch | 32 - .../xorg-font/font-alias-1.0.4/nocompiler.patch | 42 + .../recipes-graphics/xorg-font/font-alias_1.0.3.bb | 24 - .../recipes-graphics/xorg-font/font-alias_1.0.4.bb | 23 + .../recipes-graphics/xorg-lib/libx11_1.6.12.bb | 45 + .../meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb | 46 - ...t-on-probing-a-non-PCI-platform-device-on.patch | 34 + .../xorg-xserver/xserver-xorg_1.20.8.bb | 33 - .../xorg-xserver/xserver-xorg_1.20.9.bb | 33 + .../cryptodev/cryptodev-linux_1.10.bb | 13 - .../cryptodev/cryptodev-linux_1.11.bb | 13 + .../cryptodev/cryptodev-module_1.10.bb | 18 - .../cryptodev/cryptodev-module_1.11.bb | 16 + .../cryptodev/cryptodev-tests_1.10.bb | 21 - .../cryptodev/cryptodev-tests_1.11.bb | 21 + poky/meta/recipes-kernel/cryptodev/cryptodev.inc | 2 +- .../files/0001-Fix-build-for-Linux-5.8-rc1.patch | 49 - .../0001-kexec-Fix-build-with-fno-common.patch | 10 + .../linux-firmware/linux-firmware_20200619.bb | 951 -- .../linux-firmware/linux-firmware_20200817.bb | 955 ++ .../linux-libc-headers/linux-libc-headers.inc | 2 +- poky/meta/recipes-kernel/linux/kernel-devsrc.bb | 2 + poky/meta/recipes-kernel/linux/linux-yocto-dev.bb | 2 +- .../recipes-kernel/linux/linux-yocto-rt_5.4.bb | 6 +- .../recipes-kernel/linux/linux-yocto-rt_5.8.bb | 6 +- .../recipes-kernel/linux/linux-yocto-tiny_5.4.bb | 8 +- .../recipes-kernel/linux/linux-yocto-tiny_5.8.bb | 8 +- poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb | 22 +- poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb | 22 +- ...-dependency-issue-when-building-in-tree-w.patch | 54 + ...mutrace.h-into-the-mmu-sub-directory-v5.9.patch | 41 + ...-mmu-Make-kvm_mmu_page-definition-and-acc.patch | 39 + ...mit-the-length-of-per-inode-prealloc-list.patch | 84 + ...dicate-via-a-block-bitmap-read-is-prefetc.patch | 63 + ...removal-of-smp_-read_barrier_depends-v5.9.patch | 391 + ...x-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch | 59 + ...ck-Fix-sync-livelock-due-to-b_dirty_time-.patch | 117 + ...-ranges-for-ext4_discard_preallocations-a.patch | 52 + .../0010-Fix-system-call-filter-table.patch | 918 ++ .../recipes-kernel/lttng/lttng-modules_2.12.2.bb | 16 +- poky/meta/recipes-kernel/lttng/lttng-platforms.inc | 17 + .../recipes-kernel/lttng/lttng-tools_2.12.2.bb | 8 +- poky/meta/recipes-kernel/lttng/lttng-ust_2.12.0.bb | 2 + .../recipes-multimedia/alsa/alsa-plugins_1.2.2.bb | 2 +- .../matchbox-desktop/matchbox-desktop_2.2.bb | 2 +- .../sato-screenshot/sato-screenshot_0.3.bb | 2 +- .../recipes-sato/webkit/wpebackend-fdo_1.6.1.bb | 17 - .../recipes-sato/webkit/wpebackend-fdo_1.7.1.bb | 17 + .../recipes-support/atk/at-spi2-core_2.36.0.bb | 36 - .../recipes-support/atk/at-spi2-core_2.36.1.bb | 36 + .../recipes-support/boost/bjam-native_1.73.0.bb | 20 - .../recipes-support/boost/bjam-native_1.74.0.bb | 20 + poky/meta/recipes-support/boost/boost-1.73.0.inc | 21 - poky/meta/recipes-support/boost/boost-1.74.0.inc | 20 + ...p-arch-instruction-set-flags-we-do-that-o.patch | 22 +- ...detail-rtree-visitors-insert-base-class-p.patch | 30 - poky/meta/recipes-support/boost/boost_1.73.0.bb | 11 - poky/meta/recipes-support/boost/boost_1.74.0.bb | 10 + poky/meta/recipes-support/curl/curl_7.72.0.bb | 4 +- .../debianutils/debianutils_4.11.1.bb | 54 + .../debianutils/debianutils_4.11.bb | 54 - .../recipes-support/diffoscope/diffoscope_153.bb | 17 - .../recipes-support/diffoscope/diffoscope_160.bb | 17 + .../meta/recipes-support/enchant/enchant2_2.2.8.bb | 29 - .../meta/recipes-support/enchant/enchant2_2.2.9.bb | 28 + ...c-use-a-custom-value-for-the-location-of-.patch | 6 +- .../gnupg/0003-dirmngr-uses-libgpg-error.patch | 16 +- .../recipes-support/gnupg/gnupg/relocate.patch | 2 +- poky/meta/recipes-support/gnupg/gnupg_2.2.21.bb | 80 - poky/meta/recipes-support/gnupg/gnupg_2.2.23.bb | 80 + .../gnutls/gnutls/CVE-2020-24659.patch | 117 + poky/meta/recipes-support/gnutls/gnutls_3.6.14.bb | 1 + .../gpgme/0008-do-not-auto-check-var-PYTHON.patch | 12 +- poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb | 89 - poky/meta/recipes-support/gpgme/gpgme_1.14.0.bb | 88 + .../libcap-ng/libcap-ng-python_0.7.10.bb | 29 - .../libcap-ng/libcap-ng-python_0.7.11.bb | 29 + poky/meta/recipes-support/libcap-ng/libcap-ng.inc | 6 +- ...-configure.ac-add-library-if-header-found.patch | 31 - ...ap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch | 25 - .../libcap-ng/libcap-ng/python.patch | 18 +- .../recipes-support/libcap-ng/libcap-ng_0.7.10.bb | 17 - .../recipes-support/libcap-ng/libcap-ng_0.7.11.bb | 17 + poky/meta/recipes-support/libcap/libcap_2.42.bb | 76 - poky/meta/recipes-support/libcap/libcap_2.43.bb | 76 + .../recipes-support/libcheck/libcheck_0.15.0.bb | 28 - .../recipes-support/libcheck/libcheck_0.15.2.bb | 28 + ...cross-compiling-into-a-separate-build-dir.patch | 43 - .../0005-src-gen-lock-obj.sh-add-a-file.patch | 134 - .../libgpg-error/libgpg-error_1.38.bb | 42 - .../libgpg-error/libgpg-error_1.39.bb | 40 + poky/meta/recipes-support/libmpc/libmpc_1.1.0.bb | 13 - poky/meta/recipes-support/libmpc/libmpc_1.2.0.bb | 12 + ...m-use-PYTHON-when-invoking-psl-make-dafsa.patch | 50 - ...01-gtk-doc-do-not-include-tree_index.sgml.patch | 28 - poky/meta/recipes-support/libpsl/libpsl_0.21.0.bb | 23 - poky/meta/recipes-support/libpsl/libpsl_0.21.1.bb | 20 + .../recipes-support/p11-kit/p11-kit_0.23.20.bb | 29 - .../recipes-support/p11-kit/p11-kit_0.23.21.bb | 29 + poky/meta/recipes-support/re2c/re2c_2.0.3.bb | 14 + poky/meta/recipes-support/re2c/re2c_2.0.bb | 14 - poky/scripts/buildhistory-diff | 9 +- poky/scripts/lib/devtool/deploy.py | 8 +- poky/scripts/lib/devtool/standard.py | 2 +- poky/scripts/lib/wic/ksparser.py | 20 +- poky/scripts/lib/wic/misc.py | 5 +- poky/scripts/lib/wic/plugins/imager/direct.py | 8 +- poky/scripts/lib/wic/plugins/source/bootimg-efi.py | 8 +- poky/scripts/oe-publish-sdk | 8 +- poky/scripts/runqemu | 9 + 672 files changed, 78955 insertions(+), 14202 deletions(-) create mode 100755 poky/bitbake/bin/bitbake-server create mode 100755 poky/bitbake/contrib/bbparse-torture.py create mode 100644 poky/bitbake/doc/.gitignore create mode 100644 poky/bitbake/doc/Makefile.sphinx create mode 100644 poky/bitbake/doc/_templates/breadcrumbs.html create mode 100644 poky/bitbake/doc/_templates/layout.html create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst create mode 100644 poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst create mode 100644 poky/bitbake/doc/conf.py create mode 100644 poky/bitbake/doc/genindex.rst create mode 100644 poky/bitbake/doc/index.rst create mode 100644 poky/bitbake/doc/releases.rst create mode 100644 poky/bitbake/doc/sphinx-static/switchers.js create mode 100644 poky/bitbake/doc/sphinx-static/theme_overrides.css delete mode 100644 poky/bitbake/lib/bb/compat.py create mode 100644 poky/documentation/.gitignore create mode 100644 poky/documentation/Makefile.sphinx create mode 100644 poky/documentation/_templates/breadcrumbs.html create mode 100644 poky/documentation/_templates/layout.html create mode 100644 poky/documentation/adt-manual/adt-command.rst create mode 100644 poky/documentation/adt-manual/adt-intro.rst create mode 100644 poky/documentation/adt-manual/adt-manual-intro.rst create mode 100644 poky/documentation/adt-manual/adt-manual.rst create mode 100644 poky/documentation/adt-manual/adt-package.rst create mode 100644 poky/documentation/adt-manual/adt-prepare.rst create mode 100644 poky/documentation/boilerplate.rst create mode 100644 poky/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst create mode 100644 poky/documentation/bsp-guide/bsp-guide.rst create mode 100644 poky/documentation/bsp-guide/bsp.rst create mode 100644 poky/documentation/bsp-guide/history.rst create mode 100644 poky/documentation/conf.py create mode 100644 poky/documentation/dev-manual/dev-manual-common-tasks.rst create mode 100644 poky/documentation/dev-manual/dev-manual-intro.rst create mode 100644 poky/documentation/dev-manual/dev-manual-qemu.rst create mode 100644 poky/documentation/dev-manual/dev-manual-start.rst create mode 100644 poky/documentation/dev-manual/dev-manual.rst create mode 100644 poky/documentation/dev-manual/history.rst create mode 100644 poky/documentation/figures/yp-how-it-works-new-diagram.png create mode 100644 poky/documentation/genindex.rst create mode 100644 poky/documentation/index.rst create mode 100644 poky/documentation/kernel-dev/history.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-advanced.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-common.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-concepts-appx.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-faq.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-intro.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev-maint-appx.rst create mode 100644 poky/documentation/kernel-dev/kernel-dev.rst create mode 100644 poky/documentation/overview-manual/history.rst create mode 100644 poky/documentation/overview-manual/overview-manual-concepts.rst create mode 100644 poky/documentation/overview-manual/overview-manual-development-environment.rst create mode 100644 poky/documentation/overview-manual/overview-manual-intro.rst create mode 100644 poky/documentation/overview-manual/overview-manual-yp-intro.rst create mode 100644 poky/documentation/overview-manual/overview-manual.rst create mode 100644 poky/documentation/poky.yaml create mode 100644 poky/documentation/profile-manual/history.rst create mode 100644 poky/documentation/profile-manual/profile-manual-arch.rst create mode 100644 poky/documentation/profile-manual/profile-manual-examples.rst create mode 100644 poky/documentation/profile-manual/profile-manual-intro.rst create mode 100644 poky/documentation/profile-manual/profile-manual-usage.rst create mode 100644 poky/documentation/profile-manual/profile-manual.rst create mode 100644 poky/documentation/ref-manual/examples/hello-autotools/hello_2.10.bb delete mode 100644 poky/documentation/ref-manual/examples/hello-autotools/hello_2.3.bb create mode 100644 poky/documentation/ref-manual/faq.rst create mode 100644 poky/documentation/ref-manual/history.rst create mode 100644 poky/documentation/ref-manual/migration-1.3.rst create mode 100644 poky/documentation/ref-manual/migration-1.4.rst create mode 100644 poky/documentation/ref-manual/migration-1.5.rst create mode 100644 poky/documentation/ref-manual/migration-1.6.rst create mode 100644 poky/documentation/ref-manual/migration-1.7.rst create mode 100644 poky/documentation/ref-manual/migration-1.8.rst create mode 100644 poky/documentation/ref-manual/migration-2.0.rst create mode 100644 poky/documentation/ref-manual/migration-2.1.rst create mode 100644 poky/documentation/ref-manual/migration-2.2.rst create mode 100644 poky/documentation/ref-manual/migration-2.3.rst create mode 100644 poky/documentation/ref-manual/migration-2.4.rst create mode 100644 poky/documentation/ref-manual/migration-2.5.rst create mode 100644 poky/documentation/ref-manual/migration-2.6.rst create mode 100644 poky/documentation/ref-manual/migration-2.7.rst create mode 100644 poky/documentation/ref-manual/migration-3.0.rst create mode 100644 poky/documentation/ref-manual/migration-3.1.rst create mode 100644 poky/documentation/ref-manual/migration-general.rst create mode 100644 poky/documentation/ref-manual/migration.rst create mode 100644 poky/documentation/ref-manual/ref-classes.rst create mode 100644 poky/documentation/ref-manual/ref-devtool-reference.rst create mode 100644 poky/documentation/ref-manual/ref-features.rst create mode 100644 poky/documentation/ref-manual/ref-images.rst create mode 100644 poky/documentation/ref-manual/ref-kickstart.rst create mode 100644 poky/documentation/ref-manual/ref-manual.rst create mode 100644 poky/documentation/ref-manual/ref-qa-checks.rst create mode 100644 poky/documentation/ref-manual/ref-release-process.rst create mode 100644 poky/documentation/ref-manual/ref-structure.rst create mode 100644 poky/documentation/ref-manual/ref-system-requirements.rst create mode 100644 poky/documentation/ref-manual/ref-tasks.rst create mode 100644 poky/documentation/ref-manual/ref-terms.rst create mode 100644 poky/documentation/ref-manual/ref-variables.rst create mode 100644 poky/documentation/ref-manual/ref-varlocality.rst create mode 100644 poky/documentation/ref-manual/resources.rst create mode 100644 poky/documentation/releases.rst create mode 100644 poky/documentation/sdk-manual/history.rst create mode 100644 poky/documentation/sdk-manual/sdk-appendix-customizing-standard.rst create mode 100644 poky/documentation/sdk-manual/sdk-appendix-customizing.rst create mode 100644 poky/documentation/sdk-manual/sdk-appendix-obtain.rst create mode 100644 poky/documentation/sdk-manual/sdk-extensible.rst create mode 100644 poky/documentation/sdk-manual/sdk-intro.rst create mode 100644 poky/documentation/sdk-manual/sdk-manual.rst create mode 100644 poky/documentation/sdk-manual/sdk-using.rst create mode 100644 poky/documentation/sdk-manual/sdk-working-projects.rst create mode 100644 poky/documentation/sphinx-static/YoctoProject_Logo_RGB.jpg create mode 100644 poky/documentation/sphinx-static/switchers.js create mode 100644 poky/documentation/sphinx-static/theme_overrides.css create mode 100644 poky/documentation/sphinx/yocto-vars.py create mode 100644 poky/documentation/test-manual/history.rst create mode 100644 poky/documentation/test-manual/test-manual-intro.rst create mode 100644 poky/documentation/test-manual/test-manual-test-process.rst create mode 100644 poky/documentation/test-manual/test-manual-understand-autobuilder.rst create mode 100644 poky/documentation/test-manual/test-manual.rst create mode 100644 poky/documentation/toaster-manual/history.rst create mode 100644 poky/documentation/toaster-manual/toaster-manual-intro.rst create mode 100644 poky/documentation/toaster-manual/toaster-manual-reference.rst create mode 100644 poky/documentation/toaster-manual/toaster-manual-setup-and-use.rst create mode 100644 poky/documentation/toaster-manual/toaster-manual-start.rst create mode 100644 poky/documentation/toaster-manual/toaster-manual.rst create mode 100644 poky/documentation/transitioning-to-a-custom-environment.rst create mode 100644 poky/documentation/what-i-wish-id-known.rst delete mode 100644 poky/meta-selftest/recipes-test/recipetool/files/selftest-replaceme-src-globfile create mode 100644 poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.8.bbappend create mode 100644 poky/meta/classes/image-artifact-names.bbclass mode change 100755 => 100644 poky/meta/conf/machine/include/tune-cortex-m0plus.inc create mode 100644 poky/meta/lib/oeqa/runtime/cases/terminal.py create mode 100644 poky/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt create mode 100644 poky/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt create mode 100644 poky/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb delete mode 100644 poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-avoid-start-failure-with-bind-user.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-named-lwresd-V-and-start-log-hide-build-options.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/bind-ensure-searching-for-json-headers-searches-sysr.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/bind9 create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/conf.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/generate-rndc-key.sh create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/init.d-add-support-for-read-only-rootfs.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/make-etc-initd-bind-stop-work.patch create mode 100644 poky/meta/recipes-connectivity/bind/bind-9.16.5/named.service delete mode 100644 poky/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/bind9 delete mode 100644 poky/meta/recipes-connectivity/bind/bind/conf.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh delete mode 100644 poky/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch delete mode 100644 poky/meta/recipes-connectivity/bind/bind/named.service delete mode 100644 poky/meta/recipes-connectivity/bind/bind_9.11.21.bb create mode 100644 poky/meta/recipes-connectivity/bind/bind_9.16.5.bb delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp.inc delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0001-workaround-busybox-limitation-in-linux-dhclient-script.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0002-dhclient-dbus.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0003-link-with-lcrypto.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0004-Fix-out-of-tree-builds.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0005-dhcp-client-fix-invoke-dhclient-script-failed-on-Rea.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0007-Add-configure-argument-to-make-the-libxml2-dependenc.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0009-remove-dhclient-script-bash-dependency.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0012-dhcp-correct-the-intention-for-xml2-lib-search.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp/0013-fixup_use_libbind.patch delete mode 100644 poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/default-relay delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/default-server delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhclient-systemd-wrapper delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhclient.conf delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhclient.service delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhcpd.conf delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhcpd.service delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhcpd6.service delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/dhcrelay.service delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/init-relay delete mode 100644 poky/meta/recipes-connectivity/dhcp/files/init-server create mode 100644 poky/meta/recipes-connectivity/dhcpcd/dhcpcd_9.2.0.bb create mode 100644 poky/meta/recipes-connectivity/dhcpcd/files/0001-remove-INCLUDEDIR-to-prevent-build-issues.patch create mode 100644 poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd.service create mode 100644 poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd@.service delete mode 100644 poky/meta/recipes-connectivity/iw/iw_5.4.bb create mode 100644 poky/meta/recipes-connectivity/iw/iw_5.8.bb create mode 100644 poky/meta/recipes-connectivity/kea/files/kea-dhcp-ddns.service create mode 100644 poky/meta/recipes-connectivity/kea/files/kea-dhcp4.service create mode 100644 poky/meta/recipes-connectivity/kea/files/kea-dhcp6.service create mode 100644 poky/meta/recipes-connectivity/kea/kea_1.7.10.bb delete mode 100644 poky/meta/recipes-connectivity/libuv/libuv_1.38.1.bb create mode 100644 poky/meta/recipes-connectivity/libuv/libuv_1.39.0.bb delete mode 100644 poky/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb create mode 100644 poky/meta/recipes-connectivity/resolvconf/resolvconf_1.83.bb delete mode 100644 poky/meta/recipes-core/ell/ell_0.32.bb create mode 100644 poky/meta/recipes-core/ell/ell_0.33.bb delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/0001-init-env.in-do-not-add-C-CXX-parameters.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/0001-tests-autopoint-3-unset-MAKEFLAGS.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/add-with-bisonlocaledir.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/parallel.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/run-ptest delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/serial-tests-config.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-0.20.2/use-pkgconfig.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/0001-init-env.in-do-not-add-C-CXX-parameters.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/0001-tests-autopoint-3-unset-MAKEFLAGS.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/mingw.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/parallel.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/run-ptest create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/serial-tests-config.patch create mode 100644 poky/meta/recipes-core/gettext/gettext-0.21/use-pkgconfig.patch delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/COPYING delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/Makefile.in.in delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/gettext.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/host-cpu-c-abi.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/iconv.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/intlmacosx.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-ld.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-link.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-prefix.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/nls.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/po.m4 delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/progtest.m4 delete mode 100755 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/config.rpath delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/remove-potcdate.sin create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/COPYING create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/Makefile.in.in create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/gettext.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/host-cpu-c-abi.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/iconv.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/intlmacosx.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-ld.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-link.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-prefix.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/nls.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/po.m4 create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/progtest.m4 create mode 100755 poky/meta/recipes-core/gettext/gettext-minimal-0.21/config.rpath create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-0.21/remove-potcdate.sin delete mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-native_0.20.2.bb create mode 100644 poky/meta/recipes-core/gettext/gettext-minimal-native_0.21.bb delete mode 100644 poky/meta/recipes-core/gettext/gettext_0.20.2.bb create mode 100644 poky/meta/recipes-core/gettext/gettext_0.21.bb delete mode 100644 poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.4.bb create mode 100644 poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.5.bb create mode 100644 poky/meta/recipes-core/glib-networking/glib-networking/run-ptest delete mode 100644 poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.16.bb create mode 100644 poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.17.bb delete mode 100644 poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.16.bb create mode 100644 poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.17.bb create mode 100644 poky/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch delete mode 100644 poky/meta/recipes-core/ncurses/files/config.cache delete mode 100644 poky/meta/recipes-core/systemd/systemd-boot_246.1.bb create mode 100644 poky/meta/recipes-core/systemd/systemd-boot_246.2.bb delete mode 100644 poky/meta/recipes-core/util-linux/util-linux.inc delete mode 100644 poky/meta/recipes-core/util-linux/util-linux/0001-include-cleanup-pidfd-inckudes.patch delete mode 100644 poky/meta/recipes-core/util-linux/util-linux_2.35.2.bb create mode 100644 poky/meta/recipes-core/util-linux/util-linux_2.36.bb delete mode 100644 poky/meta/recipes-devtools/bison/bison/0001-bison-fix-the-parallel-build.patch delete mode 100644 poky/meta/recipes-devtools/bison/bison_3.6.4.bb create mode 100644 poky/meta/recipes-devtools/bison/bison_3.7.2.bb delete mode 100644 poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb create mode 100644 poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb delete mode 100644 poky/meta/recipes-devtools/cmake/cmake-native_3.17.3.bb create mode 100644 poky/meta/recipes-devtools/cmake/cmake-native_3.18.2.bb delete mode 100644 poky/meta/recipes-devtools/cmake/cmake_3.17.3.bb create mode 100644 poky/meta/recipes-devtools/cmake/cmake_3.18.2.bb delete mode 100644 poky/meta/recipes-devtools/go/go-1.14.inc delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0001-allow-CC-and-CXX-to-have-multiple-words.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0004-ld-add-soname-to-shareable-objects.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0006-cmd-dist-separate-host-and-target-builds.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0007-cmd-go-make-GOROOT-precious-by-default.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0008-use-GOBUILDMODE-to-set-buildmode.patch delete mode 100644 poky/meta/recipes-devtools/go/go-1.14/0009-ld-replace-glibc-dynamic-linker-with-musl.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15.inc create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0001-allow-CC-and-CXX-to-have-multiple-words.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0004-ld-add-soname-to-shareable-objects.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0006-cmd-dist-separate-host-and-target-builds.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0007-cmd-go-make-GOROOT-precious-by-default.patch create mode 100644 poky/meta/recipes-devtools/go/go-1.15/0008-use-GOBUILDMODE-to-set-buildmode.patch delete mode 100644 poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb create mode 100644 poky/meta/recipes-devtools/go/go-binary-native_1.15.2.bb delete mode 100644 poky/meta/recipes-devtools/go/go-cross-canadian_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go-cross-canadian_1.15.bb delete mode 100644 poky/meta/recipes-devtools/go/go-cross_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go-cross_1.15.bb delete mode 100644 poky/meta/recipes-devtools/go/go-crosssdk_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go-crosssdk_1.15.bb delete mode 100644 poky/meta/recipes-devtools/go/go-native_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go-native_1.15.bb delete mode 100644 poky/meta/recipes-devtools/go/go-runtime_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go-runtime_1.15.bb delete mode 100644 poky/meta/recipes-devtools/go/go_1.14.bb create mode 100644 poky/meta/recipes-devtools/go/go_1.15.bb delete mode 100644 poky/meta/recipes-devtools/help2man/help2man-native_1.47.15.bb create mode 100644 poky/meta/recipes-devtools/help2man/help2man_1.47.15.bb delete mode 100644 poky/meta/recipes-devtools/librepo/librepo_1.12.0.bb create mode 100644 poky/meta/recipes-devtools/librepo/librepo_1.12.1.bb create mode 100644 poky/meta/recipes-devtools/log4cplus/log4cplus_2.0.5.bb delete mode 100644 poky/meta/recipes-devtools/meson/meson_0.55.0.bb create mode 100644 poky/meta/recipes-devtools/meson/meson_0.55.1.bb delete mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.0.bb create mode 100644 poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.1.bb delete mode 100644 poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb create mode 100644 poky/meta/recipes-devtools/nasm/nasm_2.15.05.bb delete mode 100644 poky/meta/recipes-devtools/ninja/ninja_1.10.0.bb create mode 100644 poky/meta/recipes-devtools/ninja/ninja_1.10.1.bb delete mode 100644 poky/meta/recipes-devtools/packagegroups/packagegroup-core-device-devel.bb delete mode 100644 poky/meta/recipes-devtools/patchelf/patchelf_0.11.bb create mode 100644 poky/meta/recipes-devtools/patchelf/patchelf_0.12.bb delete mode 100644 poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb create mode 100644 poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.1.bb delete mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb create mode 100644 poky/meta/recipes-devtools/python/python3-setuptools_49.6.0.bb create mode 100644 poky/meta/recipes-devtools/qemu/qemu/usb-fix-setup_len-init.patch delete mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb create mode 100644 poky/meta/recipes-devtools/rsync/rsync_3.2.3.bb delete mode 100644 poky/meta/recipes-devtools/vala/vala_0.48.7.bb create mode 100644 poky/meta/recipes-devtools/vala/vala_0.48.9.bb delete mode 100644 poky/meta/recipes-devtools/valgrind/valgrind/0001-adjust-path-filter-for-2-memcheck-tests.patch create mode 100644 poky/meta/recipes-devtools/valgrind/valgrind/0001-memcheck-vgtests-remove-fullpath-after-flags.patch delete mode 100644 poky/meta/recipes-extended/acpica/acpica_20200528.bb create mode 100644 poky/meta/recipes-extended/acpica/acpica_20200717.bb delete mode 100644 poky/meta/recipes-extended/asciidoc/asciidoc_9.0.1.bb create mode 100644 poky/meta/recipes-extended/asciidoc/asciidoc_9.0.2.bb delete mode 100644 poky/meta/recipes-extended/iputils/iputils/0001-iputils-Initialize-libgcrypt.patch delete mode 100644 poky/meta/recipes-extended/iputils/iputils/0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch delete mode 100644 poky/meta/recipes-extended/iputils/iputils/0001-ninfod-fix-systemd-Documentation-url-error.patch delete mode 100644 poky/meta/recipes-extended/iputils/iputils_s20190709.bb create mode 100644 poky/meta/recipes-extended/iputils/iputils_s20200821.bb delete mode 100644 poky/meta/recipes-extended/libpipeline/libpipeline_1.5.2.bb create mode 100644 poky/meta/recipes-extended/libpipeline/libpipeline_1.5.3.bb delete mode 100644 poky/meta/recipes-extended/man-pages/man-pages_5.07.bb create mode 100644 poky/meta/recipes-extended/man-pages/man-pages_5.08.bb delete mode 100644 poky/meta/recipes-extended/mc/files/0001-Ticket-4070-misc-Makefile.am-install-mc.lib-only-onc.patch delete mode 100644 poky/meta/recipes-extended/mc/mc_4.8.24.bb create mode 100644 poky/meta/recipes-extended/mc/mc_4.8.25.bb delete mode 100644 poky/meta/recipes-extended/msmtp/msmtp_1.8.11.bb create mode 100644 poky/meta/recipes-extended/msmtp/msmtp_1.8.12.bb delete mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb create mode 100644 poky/meta/recipes-extended/stress-ng/stress-ng_0.11.19.bb delete mode 100644 poky/meta/recipes-extended/sysstat/sysstat_12.2.2.bb create mode 100644 poky/meta/recipes-extended/sysstat/sysstat_12.4.0.bb delete mode 100644 poky/meta/recipes-gnome/epiphany/epiphany_3.36.3.bb create mode 100644 poky/meta/recipes-gnome/epiphany/epiphany_3.36.4.bb delete mode 100644 poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb create mode 100644 poky/meta/recipes-gnome/gtk+/gtk+3_3.24.22.bb create mode 100644 poky/meta/recipes-gnome/json-glib/json-glib/0001-scanner-use-macro-instead-of-cast-to-convert-pointer.patch create mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Do-not-disable-introspection-in-cross-builds.patch create mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-src-hb-gobject-enums.cc.tmpl-write-out-only-the-file.patch create mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz/version-race.patch delete mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb create mode 100644 poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.2.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa-gl_20.1.6.bb delete mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb create mode 100644 poky/meta/recipes-graphics/mesa/mesa_20.1.6.bb delete mode 100644 poky/meta/recipes-graphics/pango/pango/0001-Fix-build-reproducibility.patch delete mode 100644 poky/meta/recipes-graphics/pango/pango_1.44.7.bb create mode 100644 poky/meta/recipes-graphics/pango/pango_1.46.1.bb delete mode 100644 poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb create mode 100644 poky/meta/recipes-graphics/wayland/libinput_1.16.1.bb delete mode 100644 poky/meta/recipes-graphics/wayland/weston-init/qemuall/weston.ini delete mode 100644 poky/meta/recipes-graphics/wayland/weston-init/qemux86-64/weston.ini delete mode 100644 poky/meta/recipes-graphics/wayland/weston-init/qemux86/weston.ini create mode 100644 poky/meta/recipes-graphics/wayland/weston-init/weston-autologin create mode 100644 poky/meta/recipes-graphics/wayland/weston-init/weston@.socket create mode 100644 poky/meta/recipes-graphics/wayland/weston/0001-tests-include-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch create mode 100644 poky/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch delete mode 100644 poky/meta/recipes-graphics/wayland/weston_8.0.0.bb create mode 100644 poky/meta/recipes-graphics/wayland/weston_9.0.0.bb delete mode 100644 poky/meta/recipes-graphics/xorg-app/xev_1.2.3.bb create mode 100644 poky/meta/recipes-graphics/xorg-app/xev_1.2.4.bb delete mode 100644 poky/meta/recipes-graphics/xorg-font/font-alias-1.0.3/nocompiler.patch create mode 100644 poky/meta/recipes-graphics/xorg-font/font-alias-1.0.4/nocompiler.patch delete mode 100644 poky/meta/recipes-graphics/xorg-font/font-alias_1.0.3.bb create mode 100644 poky/meta/recipes-graphics/xorg-font/font-alias_1.0.4.bb create mode 100644 poky/meta/recipes-graphics/xorg-lib/libx11_1.6.12.bb delete mode 100644 poky/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb create mode 100644 poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Fix-segfault-on-probing-a-non-PCI-platform-device-on.patch delete mode 100644 poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb create mode 100644 poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.9.bb delete mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.10.bb create mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.11.bb delete mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb create mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.11.bb delete mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.10.bb create mode 100644 poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.11.bb delete mode 100644 poky/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch delete mode 100644 poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb create mode 100644 poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200817.bb create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0001-Kconfig-fix-dependency-issue-when-building-in-tree-w.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0002-fix-Move-mmutrace.h-into-the-mmu-sub-directory-v5.9.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0003-fix-KVM-x86-mmu-Make-kvm_mmu_page-definition-and-acc.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0004-fix-ext4-limit-the-length-of-per-inode-prealloc-list.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-indicate-via-a-block-bitmap-read-is-prefetc.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0006-fix-removal-of-smp_-read_barrier_depends-v5.9.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0007-fix-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0008-fix-writeback-Fix-sync-livelock-due-to-b_dirty_time-.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0009-fix-version-ranges-for-ext4_discard_preallocations-a.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-modules/0010-Fix-system-call-filter-table.patch create mode 100644 poky/meta/recipes-kernel/lttng/lttng-platforms.inc delete mode 100644 poky/meta/recipes-sato/webkit/wpebackend-fdo_1.6.1.bb create mode 100644 poky/meta/recipes-sato/webkit/wpebackend-fdo_1.7.1.bb delete mode 100644 poky/meta/recipes-support/atk/at-spi2-core_2.36.0.bb create mode 100644 poky/meta/recipes-support/atk/at-spi2-core_2.36.1.bb delete mode 100644 poky/meta/recipes-support/boost/bjam-native_1.73.0.bb create mode 100644 poky/meta/recipes-support/boost/bjam-native_1.74.0.bb delete mode 100644 poky/meta/recipes-support/boost/boost-1.73.0.inc create mode 100644 poky/meta/recipes-support/boost/boost-1.74.0.inc delete mode 100644 poky/meta/recipes-support/boost/boost/0001-Make-index-detail-rtree-visitors-insert-base-class-p.patch delete mode 100644 poky/meta/recipes-support/boost/boost_1.73.0.bb create mode 100644 poky/meta/recipes-support/boost/boost_1.74.0.bb create mode 100644 poky/meta/recipes-support/debianutils/debianutils_4.11.1.bb delete mode 100644 poky/meta/recipes-support/debianutils/debianutils_4.11.bb delete mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_153.bb create mode 100644 poky/meta/recipes-support/diffoscope/diffoscope_160.bb delete mode 100644 poky/meta/recipes-support/enchant/enchant2_2.2.8.bb create mode 100644 poky/meta/recipes-support/enchant/enchant2_2.2.9.bb delete mode 100644 poky/meta/recipes-support/gnupg/gnupg_2.2.21.bb create mode 100644 poky/meta/recipes-support/gnupg/gnupg_2.2.23.bb create mode 100644 poky/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch delete mode 100644 poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb create mode 100644 poky/meta/recipes-support/gpgme/gpgme_1.14.0.bb delete mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.10.bb create mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.11.bb delete mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng/0001-configure.ac-add-library-if-header-found.patch delete mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng/0002-Wrap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch delete mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.10.bb create mode 100644 poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.11.bb delete mode 100644 poky/meta/recipes-support/libcap/libcap_2.42.bb create mode 100644 poky/meta/recipes-support/libcap/libcap_2.43.bb delete mode 100644 poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb create mode 100644 poky/meta/recipes-support/libcheck/libcheck_0.15.2.bb delete mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch delete mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch delete mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error_1.38.bb create mode 100644 poky/meta/recipes-support/libgpg-error/libgpg-error_1.39.bb delete mode 100644 poky/meta/recipes-support/libmpc/libmpc_1.1.0.bb create mode 100644 poky/meta/recipes-support/libmpc/libmpc_1.2.0.bb delete mode 100644 poky/meta/recipes-support/libpsl/libpsl/0001-Makefile.am-use-PYTHON-when-invoking-psl-make-dafsa.patch delete mode 100644 poky/meta/recipes-support/libpsl/libpsl/0001-gtk-doc-do-not-include-tree_index.sgml.patch delete mode 100644 poky/meta/recipes-support/libpsl/libpsl_0.21.0.bb create mode 100644 poky/meta/recipes-support/libpsl/libpsl_0.21.1.bb delete mode 100644 poky/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb create mode 100644 poky/meta/recipes-support/p11-kit/p11-kit_0.23.21.bb create mode 100644 poky/meta/recipes-support/re2c/re2c_2.0.3.bb delete mode 100644 poky/meta/recipes-support/re2c/re2c_2.0.bb (limited to 'poky/meta/recipes-devtools/meson/meson.inc') diff --git a/poky/bitbake/bin/bitbake-server b/poky/bitbake/bin/bitbake-server new file mode 100755 index 000000000..ffbc7894e --- /dev/null +++ b/poky/bitbake/bin/bitbake-server @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2020 Richard Purdie +# + +import os +import sys +import warnings +import logging +sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) + +if sys.getfilesystemencoding() != "utf-8": + sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.") + +# Users shouldn't be running this code directly +if len(sys.argv) != 10 or not sys.argv[1].startswith("decafbad"): + print("bitbake-server is meant for internal execution by bitbake itself, please don't use it standalone.") + sys.exit(1) + +import bb.server.process + +lockfd = int(sys.argv[2]) +readypipeinfd = int(sys.argv[3]) +logfile = sys.argv[4] +lockname = sys.argv[5] +sockname = sys.argv[6] +timeout = sys.argv[7] +xmlrpcinterface = (sys.argv[8], int(sys.argv[9])) +if xmlrpcinterface[0] == "None": + xmlrpcinterface = (None, xmlrpcinterface[1]) +if timeout == "None": + timeout = None + +# Replace standard fds with our own +with open('/dev/null', 'r') as si: + os.dup2(si.fileno(), sys.stdin.fileno()) + +so = open(logfile, 'a+') +os.dup2(so.fileno(), sys.stdout.fileno()) +os.dup2(so.fileno(), sys.stderr.fileno()) + +# Have stdout and stderr be the same so log output matches chronologically +# and there aren't two seperate buffers +sys.stderr = sys.stdout + +logger = logging.getLogger("BitBake") +# Ensure logging messages get sent to the UI as events +handler = bb.event.LogHandler() +logger.addHandler(handler) + +bb.server.process.execServer(lockfd, readypipeinfd, lockname, sockname, timeout, xmlrpcinterface) + diff --git a/poky/bitbake/bin/bitbake-worker b/poky/bitbake/bin/bitbake-worker index 97cc0fd60..9334f11fb 100755 --- a/poky/bitbake/bin/bitbake-worker +++ b/poky/bitbake/bin/bitbake-worker @@ -413,9 +413,9 @@ class BitbakeWorker(object): def handle_workerdata(self, data): self.workerdata = pickle.loads(data) + bb.build.verboseShellLogging = self.workerdata["build_verbose_shell"] + bb.build.verboseStdoutLogging = self.workerdata["build_verbose_stdout"] bb.msg.loggerDefaultLogLevel = self.workerdata["logdefaultlevel"] - bb.msg.loggerDefaultVerbose = self.workerdata["logdefaultverbose"] - bb.msg.loggerVerboseLogs = self.workerdata["logdefaultverboselogs"] bb.msg.loggerDefaultDomains = self.workerdata["logdefaultdomain"] for mc in self.databuilder.mcdata: self.databuilder.mcdata[mc].setVar("PRSERV_HOST", self.workerdata["prhost"]) diff --git a/poky/bitbake/contrib/bbparse-torture.py b/poky/bitbake/contrib/bbparse-torture.py new file mode 100755 index 000000000..c25d547bb --- /dev/null +++ b/poky/bitbake/contrib/bbparse-torture.py @@ -0,0 +1,89 @@ +#! /usr/bin/env python3 +# +# Copyright (C) 2020 Joshua Watt +# +# SPDX-License-Identifier: MIT + +import argparse +import os +import random +import shutil +import signal +import subprocess +import sys +import time + + +def try_unlink(path): + try: + os.unlink(path) + except: + pass + + +def main(): + def cleanup(): + shutil.rmtree("tmp/cache", ignore_errors=True) + try_unlink("bitbake-cookerdaemon.log") + try_unlink("bitbake.sock") + try_unlink("bitbake.lock") + + parser = argparse.ArgumentParser( + description="Bitbake parser torture test", + epilog=""" + A torture test for bitbake's parser. Repeatedly interrupts parsing until + bitbake decides to deadlock. + """, + ) + + args = parser.parse_args() + + if not "BUILDDIR" in os.environ: + print( + "'BUILDDIR' not found in the environment. Did you initialize the build environment?" + ) + return 1 + + os.chdir(os.environ["BUILDDIR"]) + + run_num = 0 + while True: + if run_num % 100 == 0: + print("Calibrating wait time...") + cleanup() + + start_time = time.monotonic() + r = subprocess.run(["bitbake", "-p"]) + max_wait_time = time.monotonic() - start_time + + if r.returncode != 0: + print("Calibration run exited with %d" % r.returncode) + return 1 + + print("Maximum wait time is %f seconds" % max_wait_time) + + run_num += 1 + wait_time = random.random() * max_wait_time + + print("Run #%d" % run_num) + print("Will sleep for %f seconds" % wait_time) + + cleanup() + with subprocess.Popen(["bitbake", "-p"]) as proc: + time.sleep(wait_time) + proc.send_signal(signal.SIGINT) + try: + proc.wait(45) + except subprocess.TimeoutExpired: + print("Run #%d: Waited too long. Possible deadlock!" % run_num) + proc.wait() + return 1 + + if proc.returncode == 0: + print("Exited successfully. Timeout too long?") + else: + print("Exited with %d" % proc.returncode) + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/poky/bitbake/doc/.gitignore b/poky/bitbake/doc/.gitignore new file mode 100644 index 000000000..69fa449dd --- /dev/null +++ b/poky/bitbake/doc/.gitignore @@ -0,0 +1 @@ +_build/ diff --git a/poky/bitbake/doc/Makefile.sphinx b/poky/bitbake/doc/Makefile.sphinx new file mode 100644 index 000000000..c663c2954 --- /dev/null +++ b/poky/bitbake/doc/Makefile.sphinx @@ -0,0 +1,31 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build +DESTDIR = final + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile.sphinx clean publish + +publish: Makefile.sphinx html singlehtml + rm -rf $(BUILDDIR)/$(DESTDIR)/ + mkdir -p $(BUILDDIR)/$(DESTDIR)/ + cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/ + cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html + sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html + +clean: + @rm -rf $(BUILDDIR) + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile.sphinx + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/poky/bitbake/doc/_templates/breadcrumbs.html b/poky/bitbake/doc/_templates/breadcrumbs.html new file mode 100644 index 000000000..eb6244b74 --- /dev/null +++ b/poky/bitbake/doc/_templates/breadcrumbs.html @@ -0,0 +1,14 @@ +{% extends "!breadcrumbs.html" %} + +{% block breadcrumbs %} +
  • + {{ doctype or 'single' }} + {{ release }} +
  • +
  • »
  • + {% for doc in parents %} +
  • {{ doc.title }} »
  • + {% endfor %} +
  • {{ title }}
  • +{% endblock %} + diff --git a/poky/bitbake/doc/_templates/layout.html b/poky/bitbake/doc/_templates/layout.html new file mode 100644 index 000000000..308d5c7a2 --- /dev/null +++ b/poky/bitbake/doc/_templates/layout.html @@ -0,0 +1,7 @@ +{% extends "!layout.html" %} + +{% block extrabody %} +
    +
    +{% endblock %} + diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst new file mode 100644 index 000000000..019afd22f --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-execution.rst @@ -0,0 +1,733 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +========= +Execution +========= + +| + +The primary purpose for running BitBake is to produce some kind of +output such as a single installable package, a kernel, a software +development kit, or even a full, board-specific bootable Linux image, +complete with bootloader, kernel, and root filesystem. Of course, you +can execute the ``bitbake`` command with options that cause it to +execute single tasks, compile single recipe files, capture or clear +data, or simply return information about the execution environment. + +This chapter describes BitBake's execution process from start to finish +when you use it to create an image. The execution process is launched +using the following command form: :: + + $ bitbake target + +For information on +the BitBake command and its options, see ":ref:`The BitBake Command +`" section. + +.. note:: + + Prior to executing BitBake, you should take advantage of available + parallel thread execution on your build host by setting the + :term:`BB_NUMBER_THREADS` variable in + your project's ``local.conf`` configuration file. + + A common method to determine this value for your build host is to run + the following: :: + + $ grep processor /proc/cpuinfo + + This command returns + the number of processors, which takes into account hyper-threading. + Thus, a quad-core build host with hyper-threading most likely shows + eight processors, which is the value you would then assign to + ``BB_NUMBER_THREADS``. + + A possibly simpler solution is that some Linux distributions (e.g. + Debian and Ubuntu) provide the ``ncpus`` command. + +Parsing the Base Configuration Metadata +======================================= + +The first thing BitBake does is parse base configuration metadata. Base +configuration metadata consists of your project's ``bblayers.conf`` file +to determine what layers BitBake needs to recognize, all necessary +``layer.conf`` files (one from each layer), and ``bitbake.conf``. The +data itself is of various types: + +- **Recipes:** Details about particular pieces of software. + +- **Class Data:** An abstraction of common build information (e.g. how to + build a Linux kernel). + +- **Configuration Data:** Machine-specific settings, policy decisions, + and so forth. Configuration data acts as the glue to bind everything + together. + +The ``layer.conf`` files are used to construct key variables such as +:term:`BBPATH` and :term:`BBFILES`. +``BBPATH`` is used to search for configuration and class files under the +``conf`` and ``classes`` directories, respectively. ``BBFILES`` is used +to locate both recipe and recipe append files (``.bb`` and +``.bbappend``). If there is no ``bblayers.conf`` file, it is assumed the +user has set the ``BBPATH`` and ``BBFILES`` directly in the environment. + +Next, the ``bitbake.conf`` file is located using the ``BBPATH`` variable +that was just constructed. The ``bitbake.conf`` file may also include +other configuration files using the ``include`` or ``require`` +directives. + +Prior to parsing configuration files, BitBake looks at certain +variables, including: + +- :term:`BB_ENV_WHITELIST` +- :term:`BB_ENV_EXTRAWHITE` +- :term:`BB_PRESERVE_ENV` +- :term:`BB_ORIGENV` +- :term:`BITBAKE_UI` + +The first four variables in this list relate to how BitBake treats shell +environment variables during task execution. By default, BitBake cleans +the environment variables and provides tight control over the shell +execution environment. However, through the use of these first four +variables, you can apply your control regarding the environment +variables allowed to be used by BitBake in the shell during execution of +tasks. See the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:Passing Information Into the Build Task Environment`" +section and the information about these variables in the variable +glossary for more information on how they work and on how to use them. + +The base configuration metadata is global and therefore affects all +recipes and tasks that are executed. + +BitBake first searches the current working directory for an optional +``conf/bblayers.conf`` configuration file. This file is expected to +contain a :term:`BBLAYERS` variable that is a +space-delimited list of 'layer' directories. Recall that if BitBake +cannot find a ``bblayers.conf`` file, then it is assumed the user has +set the ``BBPATH`` and ``BBFILES`` variables directly in the +environment. + +For each directory (layer) in this list, a ``conf/layer.conf`` file is +located and parsed with the :term:`LAYERDIR` variable +being set to the directory where the layer was found. The idea is these +files automatically set up :term:`BBPATH` and other +variables correctly for a given build directory. + +BitBake then expects to find the ``conf/bitbake.conf`` file somewhere in +the user-specified ``BBPATH``. That configuration file generally has +include directives to pull in any other metadata such as files specific +to the architecture, the machine, the local environment, and so forth. + +Only variable definitions and include directives are allowed in BitBake +``.conf`` files. Some variables directly influence BitBake's behavior. +These variables might have been set from the environment depending on +the environment variables previously mentioned or set in the +configuration files. The ":ref:`bitbake-user-manual/bitbake-user-manual-ref-variables:Variables Glossary`" +chapter presents a full list of +variables. + +After parsing configuration files, BitBake uses its rudimentary +inheritance mechanism, which is through class files, to inherit some +standard classes. BitBake parses a class when the inherit directive +responsible for getting that class is encountered. + +The ``base.bbclass`` file is always included. Other classes that are +specified in the configuration using the +:term:`INHERIT` variable are also included. BitBake +searches for class files in a ``classes`` subdirectory under the paths +in ``BBPATH`` in the same way as configuration files. + +A good way to get an idea of the configuration files and the class files +used in your execution environment is to run the following BitBake +command: :: + + $ bitbake -e > mybb.log + +Examining the top of the ``mybb.log`` +shows you the many configuration files and class files used in your +execution environment. + +.. note:: + + You need to be aware of how BitBake parses curly braces. If a recipe + uses a closing curly brace within the function and the character has + no leading spaces, BitBake produces a parsing error. If you use a + pair of curly braces in a shell function, the closing curly brace + must not be located at the start of the line without leading spaces. + + Here is an example that causes BitBake to produce a parsing error: :: + + fakeroot create_shar() { + cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + usage() + { + echo "test" + ###### The following "}" at the start of the line causes a parsing error ###### + } + EOF + } + + Writing the recipe this way avoids the error: + fakeroot create_shar() { + cat << "EOF" > ${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.sh + usage() + { + echo "test" + ###### The following "}" with a leading space at the start of the line avoids the error ###### + } + EOF + } + +Locating and Parsing Recipes +============================ + +During the configuration phase, BitBake will have set +:term:`BBFILES`. BitBake now uses it to construct a +list of recipes to parse, along with any append files (``.bbappend``) to +apply. ``BBFILES`` is a space-separated list of available files and +supports wildcards. An example would be: :: + + BBFILES = "/path/to/bbfiles/*.bb /path/to/appends/*.bbappend" + +BitBake parses each +recipe and append file located with ``BBFILES`` and stores the values of +various variables into the datastore. + +.. note:: + + Append files are applied in the order they are encountered in BBFILES. + +For each file, a fresh copy of the base configuration is made, then the +recipe is parsed line by line. Any inherit statements cause BitBake to +find and then parse class files (``.bbclass``) using +:term:`BBPATH` as the search path. Finally, BitBake +parses in order any append files found in ``BBFILES``. + +One common convention is to use the recipe filename to define pieces of +metadata. For example, in ``bitbake.conf`` the recipe name and version +are used to set the variables :term:`PN` and +:term:`PV`: :: + + PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" + PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}" + +In this example, a recipe called "something_1.2.3.bb" would set +``PN`` to "something" and ``PV`` to "1.2.3". + +By the time parsing is complete for a recipe, BitBake has a list of +tasks that the recipe defines and a set of data consisting of keys and +values as well as dependency information about the tasks. + +BitBake does not need all of this information. It only needs a small +subset of the information to make decisions about the recipe. +Consequently, BitBake caches the values in which it is interested and +does not store the rest of the information. Experience has shown it is +faster to re-parse the metadata than to try and write it out to the disk +and then reload it. + +Where possible, subsequent BitBake commands reuse this cache of recipe +information. The validity of this cache is determined by first computing +a checksum of the base configuration data (see +:term:`BB_HASHCONFIG_WHITELIST`) and +then checking if the checksum matches. If that checksum matches what is +in the cache and the recipe and class files have not changed, BitBake is +able to use the cache. BitBake then reloads the cached information about +the recipe instead of reparsing it from scratch. + +Recipe file collections exist to allow the user to have multiple +repositories of ``.bb`` files that contain the same exact package. For +example, one could easily use them to make one's own local copy of an +upstream repository, but with custom modifications that one does not +want upstream. Here is an example: :: + + BBFILES = "/stuff/openembedded/*/*.bb /stuff/openembedded.modified/*/*.bb" + BBFILE_COLLECTIONS = "upstream local" + BBFILE_PATTERN_upstream = "^/stuff/openembedded/" + BBFILE_PATTERN_local = "^/stuff/openembedded.modified/" + BBFILE_PRIORITY_upstream = "5" BBFILE_PRIORITY_local = "10" + +.. note:: + + The layers mechanism is now the preferred method of collecting code. + While the collections code remains, its main use is to set layer + priorities and to deal with overlap (conflicts) between layers. + +.. _bb-bitbake-providers: + +Providers +========= + +Assuming BitBake has been instructed to execute a target and that all +the recipe files have been parsed, BitBake starts to figure out how to +build the target. BitBake looks through the ``PROVIDES`` list for each +of the recipes. A ``PROVIDES`` list is the list of names by which the +recipe can be known. Each recipe's ``PROVIDES`` list is created +implicitly through the recipe's :term:`PN` variable and +explicitly through the recipe's :term:`PROVIDES` +variable, which is optional. + +When a recipe uses ``PROVIDES``, that recipe's functionality can be +found under an alternative name or names other than the implicit ``PN`` +name. As an example, suppose a recipe named ``keyboard_1.0.bb`` +contained the following: :: + + PROVIDES += "fullkeyboard" + +The ``PROVIDES`` +list for this recipe becomes "keyboard", which is implicit, and +"fullkeyboard", which is explicit. Consequently, the functionality found +in ``keyboard_1.0.bb`` can be found under two different names. + +.. _bb-bitbake-preferences: + +Preferences +=========== + +The ``PROVIDES`` list is only part of the solution for figuring out a +target's recipes. Because targets might have multiple providers, BitBake +needs to prioritize providers by determining provider preferences. + +A common example in which a target has multiple providers is +"virtual/kernel", which is on the ``PROVIDES`` list for each kernel +recipe. Each machine often selects the best kernel provider by using a +line similar to the following in the machine configuration file: :: + + PREFERRED_PROVIDER_virtual/kernel = "linux-yocto" + +The default :term:`PREFERRED_PROVIDER` is the provider +with the same name as the target. BitBake iterates through each target +it needs to build and resolves them and their dependencies using this +process. + +Understanding how providers are chosen is made complicated by the fact +that multiple versions might exist for a given provider. BitBake +defaults to the highest version of a provider. Version comparisons are +made using the same method as Debian. You can use the +:term:`PREFERRED_VERSION` variable to +specify a particular version. You can influence the order by using the +:term:`DEFAULT_PREFERENCE` variable. + +By default, files have a preference of "0". Setting +``DEFAULT_PREFERENCE`` to "-1" makes the recipe unlikely to be used +unless it is explicitly referenced. Setting ``DEFAULT_PREFERENCE`` to +"1" makes it likely the recipe is used. ``PREFERRED_VERSION`` overrides +any ``DEFAULT_PREFERENCE`` setting. ``DEFAULT_PREFERENCE`` is often used +to mark newer and more experimental recipe versions until they have +undergone sufficient testing to be considered stable. + +When there are multiple “versions” of a given recipe, BitBake defaults +to selecting the most recent version, unless otherwise specified. If the +recipe in question has a +:term:`DEFAULT_PREFERENCE` set lower than +the other recipes (default is 0), then it will not be selected. This +allows the person or persons maintaining the repository of recipe files +to specify their preference for the default selected version. +Additionally, the user can specify their preferred version. + +If the first recipe is named ``a_1.1.bb``, then the +:term:`PN` variable will be set to “a”, and the +:term:`PV` variable will be set to 1.1. + +Thus, if a recipe named ``a_1.2.bb`` exists, BitBake will choose 1.2 by +default. However, if you define the following variable in a ``.conf`` +file that BitBake parses, you can change that preference: :: + + PREFERRED_VERSION_a = "1.1" + +.. note:: + + It is common for a recipe to provide two versions -- a stable, + numbered (and preferred) version, and a version that is automatically + checked out from a source code repository that is considered more + "bleeding edge" but can be selected only explicitly. + + For example, in the OpenEmbedded codebase, there is a standard, + versioned recipe file for BusyBox, ``busybox_1.22.1.bb``, but there + is also a Git-based version, ``busybox_git.bb``, which explicitly + contains the line :: + + DEFAULT_PREFERENCE = "-1" + + to ensure that the + numbered, stable version is always preferred unless the developer + selects otherwise. + +.. _bb-bitbake-dependencies: + +Dependencies +============ + +Each target BitBake builds consists of multiple tasks such as ``fetch``, +``unpack``, ``patch``, ``configure``, and ``compile``. For best +performance on multi-core systems, BitBake considers each task as an +independent entity with its own set of dependencies. + +Dependencies are defined through several variables. You can find +information about variables BitBake uses in the +:doc:`bitbake-user-manual-ref-variables` near the end of this manual. At a +basic level, it is sufficient to know that BitBake uses the +:term:`DEPENDS` and +:term:`RDEPENDS` variables when calculating +dependencies. + +For more information on how BitBake handles dependencies, see the +:ref:`bitbake-user-manual/bitbake-user-manual-metadata:Dependencies` +section. + +.. _ref-bitbake-tasklist: + +The Task List +============= + +Based on the generated list of providers and the dependency information, +BitBake can now calculate exactly what tasks it needs to run and in what +order it needs to run them. The +:ref:`bitbake-user-manual/bitbake-user-manual-execution:executing tasks` +section has more information on how BitBake chooses which task to +execute next. + +The build now starts with BitBake forking off threads up to the limit +set in the :term:`BB_NUMBER_THREADS` +variable. BitBake continues to fork threads as long as there are tasks +ready to run, those tasks have all their dependencies met, and the +thread threshold has not been exceeded. + +It is worth noting that you can greatly speed up the build time by +properly setting the ``BB_NUMBER_THREADS`` variable. + +As each task completes, a timestamp is written to the directory +specified by the :term:`STAMP` variable. On subsequent +runs, BitBake looks in the build directory within ``tmp/stamps`` and +does not rerun tasks that are already completed unless a timestamp is +found to be invalid. Currently, invalid timestamps are only considered +on a per recipe file basis. So, for example, if the configure stamp has +a timestamp greater than the compile timestamp for a given target, then +the compile task would rerun. Running the compile task again, however, +has no effect on other providers that depend on that target. + +The exact format of the stamps is partly configurable. In modern +versions of BitBake, a hash is appended to the stamp so that if the +configuration changes, the stamp becomes invalid and the task is +automatically rerun. This hash, or signature used, is governed by the +signature policy that is configured (see the +:ref:`bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)` +section for information). It is also +possible to append extra metadata to the stamp using the +``[stamp-extra-info]`` task flag. For example, OpenEmbedded uses this +flag to make some tasks machine-specific. + +.. note:: + + Some tasks are marked as "nostamp" tasks. No timestamp file is + created when these tasks are run. Consequently, "nostamp" tasks are + always rerun. + +For more information on tasks, see the +:ref:`bitbake-user-manual/bitbake-user-manual-metadata:tasks` section. + +Executing Tasks +=============== + +Tasks can be either a shell task or a Python task. For shell tasks, +BitBake writes a shell script to +``${``\ :term:`T`\ ``}/run.do_taskname.pid`` and then +executes the script. The generated shell script contains all the +exported variables, and the shell functions with all variables expanded. +Output from the shell script goes to the file +``${T}/log.do_taskname.pid``. Looking at the expanded shell functions in +the run file and the output in the log files is a useful debugging +technique. + +For Python tasks, BitBake executes the task internally and logs +information to the controlling terminal. Future versions of BitBake will +write the functions to files similar to the way shell tasks are handled. +Logging will be handled in a way similar to shell tasks as well. + +The order in which BitBake runs the tasks is controlled by its task +scheduler. It is possible to configure the scheduler and define custom +implementations for specific use cases. For more information, see these +variables that control the behavior: + +- :term:`BB_SCHEDULER` + +- :term:`BB_SCHEDULERS` + +It is possible to have functions run before and after a task's main +function. This is done using the ``[prefuncs]`` and ``[postfuncs]`` +flags of the task that lists the functions to run. + +.. _checksums: + +Checksums (Signatures) +====================== + +A checksum is a unique signature of a task's inputs. The signature of a +task can be used to determine if a task needs to be run. Because it is a +change in a task's inputs that triggers running the task, BitBake needs +to detect all the inputs to a given task. For shell tasks, this turns +out to be fairly easy because BitBake generates a "run" shell script for +each task and it is possible to create a checksum that gives you a good +idea of when the task's data changes. + +To complicate the problem, some things should not be included in the +checksum. First, there is the actual specific build path of a given task +- the working directory. It does not matter if the working directory +changes because it should not affect the output for target packages. The +simplistic approach for excluding the working directory is to set it to +some fixed value and create the checksum for the "run" script. BitBake +goes one step better and uses the +:term:`BB_HASHBASE_WHITELIST` variable +to define a list of variables that should never be included when +generating the signatures. + +Another problem results from the "run" scripts containing functions that +might or might not get called. The incremental build solution contains +code that figures out dependencies between shell functions. This code is +used to prune the "run" scripts down to the minimum set, thereby +alleviating this problem and making the "run" scripts much more readable +as a bonus. + +So far we have solutions for shell scripts. What about Python tasks? The +same approach applies even though these tasks are more difficult. The +process needs to figure out what variables a Python function accesses +and what functions it calls. Again, the incremental build solution +contains code that first figures out the variable and function +dependencies, and then creates a checksum for the data used as the input +to the task. + +Like the working directory case, situations exist where dependencies +should be ignored. For these cases, you can instruct the build process +to ignore a dependency by using a line like the following: :: + + PACKAGE_ARCHS[vardepsexclude] = "MACHINE" + +This example ensures that the +``PACKAGE_ARCHS`` variable does not depend on the value of ``MACHINE``, +even if it does reference it. + +Equally, there are cases where we need to add dependencies BitBake is +not able to find. You can accomplish this by using a line like the +following: :: + + PACKAGE_ARCHS[vardeps] = "MACHINE" + +This example explicitly +adds the ``MACHINE`` variable as a dependency for ``PACKAGE_ARCHS``. + +Consider a case with in-line Python, for example, where BitBake is not +able to figure out dependencies. When running in debug mode (i.e. using +``-DDD``), BitBake produces output when it discovers something for which +it cannot figure out dependencies. + +Thus far, this section has limited discussion to the direct inputs into +a task. Information based on direct inputs is referred to as the +"basehash" in the code. However, there is still the question of a task's +indirect inputs - the things that were already built and present in the +build directory. The checksum (or signature) for a particular task needs +to add the hashes of all the tasks on which the particular task depends. +Choosing which dependencies to add is a policy decision. However, the +effect is to generate a master checksum that combines the basehash and +the hashes of the task's dependencies. + +At the code level, there are a variety of ways both the basehash and the +dependent task hashes can be influenced. Within the BitBake +configuration file, we can give BitBake some extra information to help +it construct the basehash. The following statement effectively results +in a list of global variable dependency excludes - variables never +included in any checksum. This example uses variables from OpenEmbedded +to help illustrate the concept: :: + + BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \ + SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL \ + USER FILESPATH STAGING_DIR_HOST STAGING_DIR_TARGET COREBASE PRSERV_HOST \ + PRSERV_DUMPDIR PRSERV_DUMPFILE PRSERV_LOCKDOWN PARALLEL_MAKE \ + CCACHE_DIR EXTERNAL_TOOLCHAIN CCACHE CCACHE_DISABLE LICENSE_PATH SDKPKGSUFFIX" + +The previous example excludes the work directory, which is part of +``TMPDIR``. + +The rules for deciding which hashes of dependent tasks to include +through dependency chains are more complex and are generally +accomplished with a Python function. The code in +``meta/lib/oe/sstatesig.py`` shows two examples of this and also +illustrates how you can insert your own policy into the system if so +desired. This file defines the two basic signature generators +OpenEmbedded-Core uses: "OEBasic" and "OEBasicHash". By default, there +is a dummy "noop" signature handler enabled in BitBake. This means that +behavior is unchanged from previous versions. ``OE-Core`` uses the +"OEBasicHash" signature handler by default through this setting in the +``bitbake.conf`` file: :: + + BB_SIGNATURE_HANDLER ?= "OEBasicHash" + +The "OEBasicHash" ``BB_SIGNATURE_HANDLER`` is the same as the "OEBasic" +version but adds the task hash to the stamp files. This results in any +metadata change that changes the task hash, automatically causing the +task to be run again. This removes the need to bump +:term:`PR` values, and changes to metadata automatically +ripple across the build. + +It is also worth noting that the end result of these signature +generators is to make some dependency and hash information available to +the build. This information includes: + +- ``BB_BASEHASH_task-``\ *taskname*: The base hashes for each task in the + recipe. + +- ``BB_BASEHASH_``\ *filename:taskname*: The base hashes for each + dependent task. + +- ``BBHASHDEPS_``\ *filename:taskname*: The task dependencies for + each task. + +- ``BB_TASKHASH``: The hash of the currently running task. + +It is worth noting that BitBake's "-S" option lets you debug BitBake's +processing of signatures. The options passed to -S allow different +debugging modes to be used, either using BitBake's own debug functions +or possibly those defined in the metadata/signature handler itself. The +simplest parameter to pass is "none", which causes a set of signature +information to be written out into ``STAMPS_DIR`` corresponding to the +targets specified. The other currently available parameter is +"printdiff", which causes BitBake to try to establish the closest +signature match it can (e.g. in the sstate cache) and then run +``bitbake-diffsigs`` over the matches to determine the stamps and delta +where these two stamp trees diverge. + +.. note:: + + It is likely that future versions of BitBake will provide other + signature handlers triggered through additional "-S" parameters. + +You can find more information on checksum metadata in the +:ref:`bitbake-user-manual/bitbake-user-manual-metadata:task checksums and setscene` +section. + +Setscene +======== + +The setscene process enables BitBake to handle "pre-built" artifacts. +The ability to handle and reuse these artifacts allows BitBake the +luxury of not having to build something from scratch every time. +Instead, BitBake can use, when possible, existing build artifacts. + +BitBake needs to have reliable data indicating whether or not an +artifact is compatible. Signatures, described in the previous section, +provide an ideal way of representing whether an artifact is compatible. +If a signature is the same, an object can be reused. + +If an object can be reused, the problem then becomes how to replace a +given task or set of tasks with the pre-built artifact. BitBake solves +the problem with the "setscene" process. + +When BitBake is asked to build a given target, before building anything, +it first asks whether cached information is available for any of the +targets it's building, or any of the intermediate targets. If cached +information is available, BitBake uses this information instead of +running the main tasks. + +BitBake first calls the function defined by the +:term:`BB_HASHCHECK_FUNCTION` variable +with a list of tasks and corresponding hashes it wants to build. This +function is designed to be fast and returns a list of the tasks for +which it believes in can obtain artifacts. + +Next, for each of the tasks that were returned as possibilities, BitBake +executes a setscene version of the task that the possible artifact +covers. Setscene versions of a task have the string "_setscene" appended +to the task name. So, for example, the task with the name ``xxx`` has a +setscene task named ``xxx_setscene``. The setscene version of the task +executes and provides the necessary artifacts returning either success +or failure. + +As previously mentioned, an artifact can cover more than one task. For +example, it is pointless to obtain a compiler if you already have the +compiled binary. To handle this, BitBake calls the +:term:`BB_SETSCENE_DEPVALID` function for +each successful setscene task to know whether or not it needs to obtain +the dependencies of that task. + +Finally, after all the setscene tasks have executed, BitBake calls the +function listed in +:term:`BB_SETSCENE_VERIFY_FUNCTION2` +with the list of tasks BitBake thinks has been "covered". The metadata +can then ensure that this list is correct and can inform BitBake that it +wants specific tasks to be run regardless of the setscene result. + +You can find more information on setscene metadata in the +:ref:`bitbake-user-manual/bitbake-user-manual-metadata:task checksums and setscene` +section. + +Logging +======= + +In addition to the standard command line option to control how verbose +builds are when execute, bitbake also supports user defined +configuration of the `Python +logging `__ facilities +through the :term:`BB_LOGCONFIG` variable. This +variable defines a json or yaml `logging +configuration `__ +that will be intelligently merged into the default configuration. The +logging configuration is merged using the following rules: + +- The user defined configuration will completely replace the default + configuration if top level key ``bitbake_merge`` is set to the value + ``False``. In this case, all other rules are ignored. + +- The user configuration must have a top level ``version`` which must + match the value of the default configuration. + +- Any keys defined in the ``handlers``, ``formatters``, or ``filters``, + will be merged into the same section in the default configuration, + with the user specified keys taking replacing a default one if there + is a conflict. In practice, this means that if both the default + configuration and user configuration specify a handler named + ``myhandler``, the user defined one will replace the default. To + prevent the user from inadvertently replacing a default handler, + formatter, or filter, all of the default ones are named with a prefix + of "``BitBake.``" + +- If a logger is defined by the user with the key ``bitbake_merge`` set + to ``False``, that logger will be completely replaced by user + configuration. In this case, no other rules will apply to that + logger. + +- All user defined ``filter`` and ``handlers`` properties for a given + logger will be merged with corresponding properties from the default + logger. For example, if the user configuration adds a filter called + ``myFilter`` to the ``BitBake.SigGen``, and the default configuration + adds a filter called ``BitBake.defaultFilter``, both filters will be + applied to the logger + +As an example, consider the following user logging configuration file +which logs all Hash Equivalence related messages of VERBOSE or higher to +a file called ``hashequiv.log`` :: + + { + "version": 1, + "handlers": { + "autobuilderlog": { + "class": "logging.FileHandler", + "formatter": "logfileFormatter", + "level": "DEBUG", + "filename": "hashequiv.log", + "mode": "w" + } + }, + "formatters": { + "logfileFormatter": { + "format": "%(name)s: %(levelname)s: %(message)s" + } + }, + "loggers": { + "BitBake.SigGen.HashEquiv": { + "level": "VERBOSE", + "handlers": ["autobuilderlog"] + }, + "BitBake.RunQueue.HashEquiv": { + "level": "VERBOSE", + "handlers": ["autobuilderlog"] + } + } + } diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst new file mode 100644 index 000000000..f62ddffe8 --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-fetching.rst @@ -0,0 +1,652 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +===================== +File Download Support +===================== + +| + +BitBake's fetch module is a standalone piece of library code that deals +with the intricacies of downloading source code and files from remote +systems. Fetching source code is one of the cornerstones of building +software. As such, this module forms an important part of BitBake. + +The current fetch module is called "fetch2" and refers to the fact that +it is the second major version of the API. The original version is +obsolete and has been removed from the codebase. Thus, in all cases, +"fetch" refers to "fetch2" in this manual. + +The Download (Fetch) +==================== + +BitBake takes several steps when fetching source code or files. The +fetcher codebase deals with two distinct processes in order: obtaining +the files from somewhere (cached or otherwise) and then unpacking those +files into a specific location and perhaps in a specific way. Getting +and unpacking the files is often optionally followed by patching. +Patching, however, is not covered by this module. + +The code to execute the first part of this process, a fetch, looks +something like the following: :: + + src_uri = (d.getVar('SRC_URI') or "").split() + fetcher = bb.fetch2.Fetch(src_uri, d) + fetcher.download() + +This code sets up an instance of the fetch class. The instance uses a +space-separated list of URLs from the :term:`SRC_URI` +variable and then calls the ``download`` method to download the files. + +The instantiation of the fetch class is usually followed by: :: + + rootdir = l.getVar('WORKDIR') + fetcher.unpack(rootdir) + +This code unpacks the downloaded files to the specified by ``WORKDIR``. + +.. note:: + + For convenience, the naming in these examples matches the variables + used by OpenEmbedded. If you want to see the above code in action, + examine the OpenEmbedded class file ``base.bbclass`` + . + +The ``SRC_URI`` and ``WORKDIR`` variables are not hardcoded into the +fetcher, since those fetcher methods can be (and are) called with +different variable names. In OpenEmbedded for example, the shared state +(sstate) code uses the fetch module to fetch the sstate files. + +When the ``download()`` method is called, BitBake tries to resolve the +URLs by looking for source files in a specific search order: + +- *Pre-mirror Sites:* BitBake first uses pre-mirrors to try and find + source files. These locations are defined using the + :term:`PREMIRRORS` variable. + +- *Source URI:* If pre-mirrors fail, BitBake uses the original URL (e.g + from ``SRC_URI``). + +- *Mirror Sites:* If fetch failures occur, BitBake next uses mirror + locations as defined by the :term:`MIRRORS` variable. + +For each URL passed to the fetcher, the fetcher calls the submodule that +handles that particular URL type. This behavior can be the source of +some confusion when you are providing URLs for the ``SRC_URI`` variable. +Consider the following two URLs: :: + + http://git.yoctoproject.org/git/poky;protocol=git + git://git.yoctoproject.org/git/poky;protocol=http + +In the former case, the URL is passed to the ``wget`` fetcher, which does not +understand "git". Therefore, the latter case is the correct form since the Git +fetcher does know how to use HTTP as a transport. + +Here are some examples that show commonly used mirror definitions: :: + + PREMIRRORS ?= "\ + bzr://.*/.\* http://somemirror.org/sources/ \\n \ + cvs://.*/.\* http://somemirror.org/sources/ \\n \ + git://.*/.\* http://somemirror.org/sources/ \\n \ + hg://.*/.\* http://somemirror.org/sources/ \\n \ + osc://.*/.\* http://somemirror.org/sources/ \\n \ + p4://.*/.\* http://somemirror.org/sources/ \\n \ + svn://.*/.\* http://somemirror.org/sources/ \\n" + + MIRRORS =+ "\ + ftp://.*/.\* http://somemirror.org/sources/ \\n \ + http://.*/.\* http://somemirror.org/sources/ \\n \ + https://.*/.\* http://somemirror.org/sources/ \\n" + +It is useful to note that BitBake +supports cross-URLs. It is possible to mirror a Git repository on an +HTTP server as a tarball. This is what the ``git://`` mapping in the +previous example does. + +Since network accesses are slow, BitBake maintains a cache of files +downloaded from the network. Any source files that are not local (i.e. +downloaded from the Internet) are placed into the download directory, +which is specified by the :term:`DL_DIR` variable. + +File integrity is of key importance for reproducing builds. For +non-local archive downloads, the fetcher code can verify SHA-256 and MD5 +checksums to ensure the archives have been downloaded correctly. You can +specify these checksums by using the ``SRC_URI`` variable with the +appropriate varflags as follows: :: + + SRC_URI[md5sum] = "value" + SRC_URI[sha256sum] = "value" + +You can also specify the checksums as +parameters on the ``SRC_URI`` as shown below: :: + + SRC_URI = "http://example.com/foobar.tar.bz2;md5sum=4a8e0f237e961fd7785d19d07fdb994d" + +If multiple URIs exist, you can specify the checksums either directly as +in the previous example, or you can name the URLs. The following syntax +shows how you name the URIs: :: + + SRC_URI = "http://example.com/foobar.tar.bz2;name=foo" + SRC_URI[foo.md5sum] = 4a8e0f237e961fd7785d19d07fdb994d + +After a file has been downloaded and +has had its checksum checked, a ".done" stamp is placed in ``DL_DIR``. +BitBake uses this stamp during subsequent builds to avoid downloading or +comparing a checksum for the file again. + +.. note:: + + It is assumed that local storage is safe from data corruption. If + this were not the case, there would be bigger issues to worry about. + +If :term:`BB_STRICT_CHECKSUM` is set, any +download without a checksum triggers an error message. The +:term:`BB_NO_NETWORK` variable can be used to +make any attempted network access a fatal error, which is useful for +checking that mirrors are complete as well as other things. + +.. _bb-the-unpack: + +The Unpack +========== + +The unpack process usually immediately follows the download. For all +URLs except Git URLs, BitBake uses the common ``unpack`` method. + +A number of parameters exist that you can specify within the URL to +govern the behavior of the unpack stage: + +- *unpack:* Controls whether the URL components are unpacked. If set to + "1", which is the default, the components are unpacked. If set to + "0", the unpack stage leaves the file alone. This parameter is useful + when you want an archive to be copied in and not be unpacked. + +- *dos:* Applies to ``.zip`` and ``.jar`` files and specifies whether + to use DOS line ending conversion on text files. + +- *basepath:* Instructs the unpack stage to strip the specified + directories from the source path when unpacking. + +- *subdir:* Unpacks the specific URL to the specified subdirectory + within the root directory. + +The unpack call automatically decompresses and extracts files with ".Z", +".z", ".gz", ".xz", ".zip", ".jar", ".ipk", ".rpm". ".srpm", ".deb" and +".bz2" extensions as well as various combinations of tarball extensions. + +As mentioned, the Git fetcher has its own unpack method that is +optimized to work with Git trees. Basically, this method works by +cloning the tree into the final directory. The process is completed +using references so that there is only one central copy of the Git +metadata needed. + +.. _bb-fetchers: + +Fetchers +======== + +As mentioned earlier, the URL prefix determines which fetcher submodule +BitBake uses. Each submodule can support different URL parameters, which +are described in the following sections. + +.. _local-file-fetcher: + +Local file fetcher (``file://``) +-------------------------------- + +This submodule handles URLs that begin with ``file://``. The filename +you specify within the URL can be either an absolute or relative path to +a file. If the filename is relative, the contents of the +:term:`FILESPATH` variable is used in the same way +``PATH`` is used to find executables. If the file cannot be found, it is +assumed that it is available in :term:`DL_DIR` by the +time the ``download()`` method is called. + +If you specify a directory, the entire directory is unpacked. + +Here are a couple of example URLs, the first relative and the second +absolute: :: + + SRC_URI = "file://relativefile.patch" + SRC_URI = "file:///Users/ich/very_important_software" + +.. _http-ftp-fetcher: + +HTTP/FTP wget fetcher (``http://``, ``ftp://``, ``https://``) +------------------------------------------------------------- + +This fetcher obtains files from web and FTP servers. Internally, the +fetcher uses the wget utility. + +The executable and parameters used are specified by the +``FETCHCMD_wget`` variable, which defaults to sensible values. The +fetcher supports a parameter "downloadfilename" that allows the name of +the downloaded file to be specified. Specifying the name of the +downloaded file is useful for avoiding collisions in +:term:`DL_DIR` when dealing with multiple files that +have the same name. + +Some example URLs are as follows: :: + + SRC_URI = "http://oe.handhelds.org/not_there.aac" + SRC_URI = "ftp://oe.handhelds.org/not_there_as_well.aac" + SRC_URI = "ftp://you@oe.handhelds.org/home/you/secret.plan" + +.. note:: + + Because URL parameters are delimited by semi-colons, this can + introduce ambiguity when parsing URLs that also contain semi-colons, + for example: + :: + + SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git;a=snapshot;h=a5dd47" + + + Such URLs should should be modified by replacing semi-colons with '&' + characters: + :: + + SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47" + + + In most cases this should work. Treating semi-colons and '&' in + queries identically is recommended by the World Wide Web Consortium + (W3C). Note that due to the nature of the URL, you may have to + specify the name of the downloaded file as well: + :: + + SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47;downloadfilename=myfile.bz2" + + +.. _cvs-fetcher: + +CVS fetcher (``(cvs://``) +------------------------- + +This submodule handles checking out files from the CVS version control +system. You can configure it using a number of different variables: + +- :term:`FETCHCMD_cvs `: The name of the executable to use when running + the ``cvs`` command. This name is usually "cvs". + +- :term:`SRCDATE`: The date to use when fetching the CVS source code. A + special value of "now" causes the checkout to be updated on every + build. + +- :term:`CVSDIR`: Specifies where a temporary + checkout is saved. The location is often ``DL_DIR/cvs``. + +- CVS_PROXY_HOST: The name to use as a "proxy=" parameter to the + ``cvs`` command. + +- CVS_PROXY_PORT: The port number to use as a "proxyport=" + parameter to the ``cvs`` command. + +As well as the standard username and password URL syntax, you can also +configure the fetcher with various URL parameters: + +The supported parameters are as follows: + +- *"method":* The protocol over which to communicate with the CVS + server. By default, this protocol is "pserver". If "method" is set to + "ext", BitBake examines the "rsh" parameter and sets ``CVS_RSH``. You + can use "dir" for local directories. + +- *"module":* Specifies the module to check out. You must supply this + parameter. + +- *"tag":* Describes which CVS TAG should be used for the checkout. By + default, the TAG is empty. + +- *"date":* Specifies a date. If no "date" is specified, the + :term:`SRCDATE` of the configuration is used to + checkout a specific date. The special value of "now" causes the + checkout to be updated on every build. + +- *"localdir":* Used to rename the module. Effectively, you are + renaming the output directory to which the module is unpacked. You + are forcing the module into a special directory relative to + :term:`CVSDIR`. + +- *"rsh":* Used in conjunction with the "method" parameter. + +- *"scmdata":* Causes the CVS metadata to be maintained in the tarball + the fetcher creates when set to "keep". The tarball is expanded into + the work directory. By default, the CVS metadata is removed. + +- *"fullpath":* Controls whether the resulting checkout is at the + module level, which is the default, or is at deeper paths. + +- *"norecurse":* Causes the fetcher to only checkout the specified + directory with no recurse into any subdirectories. + +- *"port":* The port to which the CVS server connects. + +Some example URLs are as follows: :: + + SRC_URI = "cvs://CVSROOT;module=mymodule;tag=some-version;method=ext" + SRC_URI = "cvs://CVSROOT;module=mymodule;date=20060126;localdir=usethat" + +.. _svn-fetcher: + +Subversion (SVN) Fetcher (``svn://``) +------------------------------------- + +This fetcher submodule fetches code from the Subversion source control +system. The executable used is specified by ``FETCHCMD_svn``, which +defaults to "svn". The fetcher's temporary working directory is set by +:term:`SVNDIR`, which is usually ``DL_DIR/svn``. + +The supported parameters are as follows: + +- *"module":* The name of the svn module to checkout. You must provide + this parameter. You can think of this parameter as the top-level + directory of the repository data you want. + +- *"path_spec":* A specific directory in which to checkout the + specified svn module. + +- *"protocol":* The protocol to use, which defaults to "svn". If + "protocol" is set to "svn+ssh", the "ssh" parameter is also used. + +- *"rev":* The revision of the source code to checkout. + +- *"scmdata":* Causes the “.svn” directories to be available during + compile-time when set to "keep". By default, these directories are + removed. + +- *"ssh":* An optional parameter used when "protocol" is set to + "svn+ssh". You can use this parameter to specify the ssh program used + by svn. + +- *"transportuser":* When required, sets the username for the + transport. By default, this parameter is empty. The transport + username is different than the username used in the main URL, which + is passed to the subversion command. + +Following are three examples using svn: :: + + SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667" + SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh" + SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1" + +.. _git-fetcher: + +Git Fetcher (``git://``) +------------------------ + +This fetcher submodule fetches code from the Git source control system. +The fetcher works by creating a bare clone of the remote into +:term:`GITDIR`, which is usually ``DL_DIR/git2``. This +bare clone is then cloned into the work directory during the unpack +stage when a specific tree is checked out. This is done using alternates +and by reference to minimize the amount of duplicate data on the disk +and make the unpack process fast. The executable used can be set with +``FETCHCMD_git``. + +This fetcher supports the following parameters: + +- *"protocol":* The protocol used to fetch the files. The default is + "git" when a hostname is set. If a hostname is not set, the Git + protocol is "file". You can also use "http", "https", "ssh" and + "rsync". + +- *"nocheckout":* Tells the fetcher to not checkout source code when + unpacking when set to "1". Set this option for the URL where there is + a custom routine to checkout code. The default is "0". + +- *"rebaseable":* Indicates that the upstream Git repository can be + rebased. You should set this parameter to "1" if revisions can become + detached from branches. In this case, the source mirror tarball is + done per revision, which has a loss of efficiency. Rebasing the + upstream Git repository could cause the current revision to disappear + from the upstream repository. This option reminds the fetcher to + preserve the local cache carefully for future use. The default value + for this parameter is "0". + +- *"nobranch":* Tells the fetcher to not check the SHA validation for + the branch when set to "1". The default is "0". Set this option for + the recipe that refers to the commit that is valid for a tag instead + of the branch. + +- *"bareclone":* Tells the fetcher to clone a bare clone into the + destination directory without checking out a working tree. Only the + raw Git metadata is provided. This parameter implies the "nocheckout" + parameter as well. + +- *"branch":* The branch(es) of the Git tree to clone. If unset, this + is assumed to be "master". The number of branch parameters much match + the number of name parameters. + +- *"rev":* The revision to use for the checkout. The default is + "master". + +- *"tag":* Specifies a tag to use for the checkout. To correctly + resolve tags, BitBake must access the network. For that reason, tags + are often not used. As far as Git is concerned, the "tag" parameter + behaves effectively the same as the "rev" parameter. + +- *"subpath":* Limits the checkout to a specific subpath of the tree. + By default, the whole tree is checked out. + +- *"destsuffix":* The name of the path in which to place the checkout. + By default, the path is ``git/``. + +- *"usehead":* Enables local ``git://`` URLs to use the current branch + HEAD as the revision for use with ``AUTOREV``. The "usehead" + parameter implies no branch and only works when the transfer protocol + is ``file://``. + +Here are some example URLs: :: + + SRC_URI = "git://git.oe.handhelds.org/git/vip.git;tag=version-1" + SRC_URI = "git://git.oe.handhelds.org/git/vip.git;protocol=http" + +.. _gitsm-fetcher: + +Git Submodule Fetcher (``gitsm://``) +------------------------------------ + +This fetcher submodule inherits from the :ref:`Git +fetcher` and extends that fetcher's behavior by fetching a +repository's submodules. :term:`SRC_URI` is passed to the Git fetcher as +described in the :ref:`bitbake-user-manual/bitbake-user-manual-fetching:git +fetcher (\`\`git://\`\`)` section. + +.. note:: + + You must clean a recipe when switching between '``git://``' and + '``gitsm://``' URLs. + + The Git Submodules fetcher is not a complete fetcher implementation. + The fetcher has known issues where it does not use the normal source + mirroring infrastructure properly. Further, the submodule sources it + fetches are not visible to the licensing and source archiving + infrastructures. + +.. _clearcase-fetcher: + +ClearCase Fetcher (``ccrc://``) +------------------------------- + +This fetcher submodule fetches code from a +`ClearCase `__ +repository. + +To use this fetcher, make sure your recipe has proper +:term:`SRC_URI`, :term:`SRCREV`, and +:term:`PV` settings. Here is an example: :: + + SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module" + SRCREV = "EXAMPLE_CLEARCASE_TAG" + PV = "${@d.getVar("SRCREV", False).replace("/", "+")}" + +The fetcher uses the ``rcleartool`` or +``cleartool`` remote client, depending on which one is available. + +Following are options for the ``SRC_URI`` statement: + +- *vob*: The name, which must include the prepending "/" character, + of the ClearCase VOB. This option is required. + +- *module*: The module, which must include the prepending "/" + character, in the selected VOB. + + .. note:: + + The module and vob options are combined to create the load rule in the + view config spec. As an example, consider the vob and module values from + the SRC_URI statement at the start of this section. Combining those values + results in the following: :: + + load /example_vob/example_module + +- *proto*: The protocol, which can be either ``http`` or ``https``. + +By default, the fetcher creates a configuration specification. If you +want this specification written to an area other than the default, use +the ``CCASE_CUSTOM_CONFIG_SPEC`` variable in your recipe to define where +the specification is written. + +.. note:: + + the SRCREV loses its functionality if you specify this variable. However, + SRCREV is still used to label the archive after a fetch even though it does + not define what is fetched. + +Here are a couple of other behaviors worth mentioning: + +- When using ``cleartool``, the login of ``cleartool`` is handled by + the system. The login require no special steps. + +- In order to use ``rcleartool`` with authenticated users, an + "rcleartool login" is necessary before using the fetcher. + +.. _perforce-fetcher: + +Perforce Fetcher (``p4://``) +---------------------------- + +This fetcher submodule fetches code from the +`Perforce `__ source control system. The +executable used is specified by ``FETCHCMD_p4``, which defaults to "p4". +The fetcher's temporary working directory is set by +:term:`P4DIR`, which defaults to "DL_DIR/p4". +The fetcher does not make use of a perforce client, instead it +relies on ``p4 files`` to retrieve a list of +files and ``p4 print`` to transfer the content +of those files locally. + +To use this fetcher, make sure your recipe has proper +:term:`SRC_URI`, :term:`SRCREV`, and +:term:`PV` values. The p4 executable is able to use the +config file defined by your system's ``P4CONFIG`` environment variable +in order to define the Perforce server URL and port, username, and +password if you do not wish to keep those values in a recipe itself. If +you choose not to use ``P4CONFIG``, or to explicitly set variables that +``P4CONFIG`` can contain, you can specify the ``P4PORT`` value, which is +the server's URL and port number, and you can specify a username and +password directly in your recipe within ``SRC_URI``. + +Here is an example that relies on ``P4CONFIG`` to specify the server URL +and port, username, and password, and fetches the Head Revision: :: + + SRC_URI = "p4://example-depot/main/source/..." + SRCREV = "${AUTOREV}" + PV = "p4-${SRCPV}" + S = "${WORKDIR}/p4" + +Here is an example that specifies the server URL and port, username, and +password, and fetches a Revision based on a Label: :: + + P4PORT = "tcp:p4server.example.net:1666" + SRC_URI = "p4://user:passwd@example-depot/main/source/..." + SRCREV = "release-1.0" + PV = "p4-${SRCPV}" + S = "${WORKDIR}/p4" + +.. note:: + + You should always set S to "${WORKDIR}/p4" in your recipe. + +By default, the fetcher strips the depot location from the local file paths. In +the above example, the content of ``example-depot/main/source/`` will be placed +in ``${WORKDIR}/p4``. For situations where preserving parts of the remote depot +paths locally is desirable, the fetcher supports two parameters: + +- *"module":* + The top-level depot location or directory to fetch. The value of this + parameter can also point to a single file within the depot, in which case + the local file path will include the module path. +- *"remotepath":* + When used with the value "``keep``", the fetcher will mirror the full depot + paths locally for the specified location, even in combination with the + ``module`` parameter. + +Here is an example use of the the ``module`` parameter: :: + + SRC_URI = "p4://user:passwd@example-depot/main;module=source/..." + +In this case, the content of the top-level directory ``source/`` will be fetched +to ``${P4DIR}``, including the directory itself. The top-level directory will +be accesible at ``${P4DIR}/source/``. + +Here is an example use of the the ``remotepath`` parameter: :: + + SRC_URI = "p4://user:passwd@example-depot/main;module=source/...;remotepath=keep" + +In this case, the content of the top-level directory ``source/`` will be fetched +to ``${P4DIR}``, but the complete depot paths will be mirrored locally. The +top-level directory will be accessible at +``${P4DIR}/example-depot/main/source/``. + +.. _repo-fetcher: + +Repo Fetcher (``repo://``) +-------------------------- + +This fetcher submodule fetches code from ``google-repo`` source control +system. The fetcher works by initiating and syncing sources of the +repository into :term:`REPODIR`, which is usually +``${DL_DIR}/repo``. + +This fetcher supports the following parameters: + +- *"protocol":* Protocol to fetch the repository manifest (default: + git). + +- *"branch":* Branch or tag of repository to get (default: master). + +- *"manifest":* Name of the manifest file (default: ``default.xml``). + +Here are some example URLs: :: + + SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml" + SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml" + +Other Fetchers +-------------- + +Fetch submodules also exist for the following: + +- Bazaar (``bzr://``) + +- Mercurial (``hg://``) + +- npm (``npm://``) + +- OSC (``osc://``) + +- Secure FTP (``sftp://``) + +- Secure Shell (``ssh://``) + +- Trees using Git Annex (``gitannex://``) + +No documentation currently exists for these lesser used fetcher +submodules. However, you might find the code helpful and readable. + +Auto Revisions +============== + +We need to document ``AUTOREV`` and ``SRCREV_FORMAT`` here. diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst new file mode 100644 index 000000000..e3fd32158 --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-hello.rst @@ -0,0 +1,415 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +=================== +Hello World Example +=================== + +BitBake Hello World +=================== + +The simplest example commonly used to demonstrate any new programming +language or tool is the "`Hello +World `__" example. +This appendix demonstrates, in tutorial form, Hello World within the +context of BitBake. The tutorial describes how to create a new project +and the applicable metadata files necessary to allow BitBake to build +it. + +Obtaining BitBake +================= + +See the :ref:`bitbake-user-manual/bitbake-user-manual-hello:obtaining bitbake` section for +information on how to obtain BitBake. Once you have the source code on +your machine, the BitBake directory appears as follows: :: + + $ ls -al + total 100 + drwxrwxr-x. 9 wmat wmat 4096 Jan 31 13:44 . + drwxrwxr-x. 3 wmat wmat 4096 Feb 4 10:45 .. + -rw-rw-r--. 1 wmat wmat 365 Nov 26 04:55 AUTHORS + drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 bin + drwxrwxr-x. 4 wmat wmat 4096 Jan 31 13:44 build + -rw-rw-r--. 1 wmat wmat 16501 Nov 26 04:55 ChangeLog + drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 classes + drwxrwxr-x. 2 wmat wmat 4096 Nov 26 04:55 conf + drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 contrib + -rw-rw-r--. 1 wmat wmat 17987 Nov 26 04:55 COPYING + drwxrwxr-x. 3 wmat wmat 4096 Nov 26 04:55 doc + -rw-rw-r--. 1 wmat wmat 69 Nov 26 04:55 .gitignore + -rw-rw-r--. 1 wmat wmat 849 Nov 26 04:55 HEADER + drwxrwxr-x. 5 wmat wmat 4096 Jan 31 13:44 lib + -rw-rw-r--. 1 wmat wmat 195 Nov 26 04:55 MANIFEST.in + -rw-rw-r--. 1 wmat wmat 2887 Nov 26 04:55 TODO + +At this point, you should have BitBake cloned to a directory that +matches the previous listing except for dates and user names. + +Setting Up the BitBake Environment +================================== + +First, you need to be sure that you can run BitBake. Set your working +directory to where your local BitBake files are and run the following +command: :: + + $ ./bin/bitbake --version + BitBake Build Tool Core version 1.23.0, bitbake version 1.23.0 + +The console output tells you what version +you are running. + +The recommended method to run BitBake is from a directory of your +choice. To be able to run BitBake from any directory, you need to add +the executable binary to your binary to your shell's environment +``PATH`` variable. First, look at your current ``PATH`` variable by +entering the following: :: + + $ echo $PATH + +Next, add the directory location +for the BitBake binary to the ``PATH``. Here is an example that adds the +``/home/scott-lenovo/bitbake/bin`` directory to the front of the +``PATH`` variable: :: + + $ export PATH=/home/scott-lenovo/bitbake/bin:$PATH + +You should now be able to enter the ``bitbake`` command from the command +line while working from any directory. + +The Hello World Example +======================= + +The overall goal of this exercise is to build a complete "Hello World" +example utilizing task and layer concepts. Because this is how modern +projects such as OpenEmbedded and the Yocto Project utilize BitBake, the +example provides an excellent starting point for understanding BitBake. + +To help you understand how to use BitBake to build targets, the example +starts with nothing but the ``bitbake`` command, which causes BitBake to +fail and report problems. The example progresses by adding pieces to the +build to eventually conclude with a working, minimal "Hello World" +example. + +While every attempt is made to explain what is happening during the +example, the descriptions cannot cover everything. You can find further +information throughout this manual. Also, you can actively participate +in the :oe_lists:`/g/bitbake-devel` +discussion mailing list about the BitBake build tool. + +.. note:: + + This example was inspired by and drew heavily from + `Mailing List post - The BitBake equivalent of "Hello, World!" + `_. + +As stated earlier, the goal of this example is to eventually compile +"Hello World". However, it is unknown what BitBake needs and what you +have to provide in order to achieve that goal. Recall that BitBake +utilizes three types of metadata files: +:ref:`bitbake-user-manual/bitbake-user-manual-intro:configuration files`, +:ref:`bitbake-user-manual/bitbake-user-manual-intro:classes`, and +:ref:`bitbake-user-manual/bitbake-user-manual-intro:recipes`. +But where do they go? How does BitBake find +them? BitBake's error messaging helps you answer these types of +questions and helps you better understand exactly what is going on. + +Following is the complete "Hello World" example. + +#. **Create a Project Directory:** First, set up a directory for the + "Hello World" project. Here is how you can do so in your home + directory: :: + + $ mkdir ~/hello + $ cd ~/hello + + This is the directory that + BitBake will use to do all of its work. You can use this directory + to keep all the metafiles needed by BitBake. Having a project + directory is a good way to isolate your project. + +#. **Run BitBake:** At this point, you have nothing but a project + directory. Run the ``bitbake`` command and see what it does: :: + + $ bitbake + The BBPATH variable is not set and bitbake did not + find a conf/bblayers.conf file in the expected location. + Maybe you accidentally invoked bitbake from the wrong directory? + DEBUG: Removed the following variables from the environment: + GNOME_DESKTOP_SESSION_ID, XDG_CURRENT_DESKTOP, + GNOME_KEYRING_CONTROL, DISPLAY, SSH_AGENT_PID, LANG, no_proxy, + XDG_SESSION_PATH, XAUTHORITY, SESSION_MANAGER, SHLVL, + MANDATORY_PATH, COMPIZ_CONFIG_PROFILE, WINDOWID, EDITOR, + GPG_AGENT_INFO, SSH_AUTH_SOCK, GDMSESSION, GNOME_KEYRING_PID, + XDG_SEAT_PATH, XDG_CONFIG_DIRS, LESSOPEN, DBUS_SESSION_BUS_ADDRESS, + _, XDG_SESSION_COOKIE, DESKTOP_SESSION, LESSCLOSE, DEFAULTS_PATH, + UBUNTU_MENUPROXY, OLDPWD, XDG_DATA_DIRS, COLORTERM, LS_COLORS + + The majority of this output is specific to environment variables that + are not directly relevant to BitBake. However, the very first + message regarding the ``BBPATH`` variable and the + ``conf/bblayers.conf`` file is relevant. + + When you run BitBake, it begins looking for metadata files. The + :term:`BBPATH` variable is what tells BitBake where + to look for those files. ``BBPATH`` is not set and you need to set + it. Without ``BBPATH``, BitBake cannot find any configuration files + (``.conf``) or recipe files (``.bb``) at all. BitBake also cannot + find the ``bitbake.conf`` file. + +#. **Setting BBPATH:** For this example, you can set ``BBPATH`` in + the same manner that you set ``PATH`` earlier in the appendix. You + should realize, though, that it is much more flexible to set the + ``BBPATH`` variable up in a configuration file for each project. + + From your shell, enter the following commands to set and export the + ``BBPATH`` variable: :: + + $ BBPATH="projectdirectory" + $ export BBPATH + + Use your actual project directory in the command. BitBake uses that + directory to find the metadata it needs for your project. + + .. note:: + + When specifying your project directory, do not use the tilde + ("~") character as BitBake does not expand that character as the + shell would. + +#. **Run BitBake:** Now that you have ``BBPATH`` defined, run the + ``bitbake`` command again: :: + + $ bitbake + ERROR: Traceback (most recent call last): + File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped + return func(fn, *args) + File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 173, in parse_config_file + return bb.parse.handle(fn, data, include) + File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 99, in handle + return h['handle'](fn, data, include) + File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 120, in handle + abs_fn = resolve_file(fn, data) + File "/home/scott-lenovo/bitbake/lib/bb/parse/__init__.py", line 117, in resolve_file + raise IOError("file %s not found in %s" % (fn, bbpath)) + IOError: file conf/bitbake.conf not found in /home/scott-lenovo/hello + + ERROR: Unable to parse conf/bitbake.conf: file conf/bitbake.conf not found in /home/scott-lenovo/hello + + This sample output shows that BitBake could not find the + ``conf/bitbake.conf`` file in the project directory. This file is + the first thing BitBake must find in order to build a target. And, + since the project directory for this example is empty, you need to + provide a ``conf/bitbake.conf`` file. + +#. **Creating conf/bitbake.conf:** The ``conf/bitbake.conf`` includes + a number of configuration variables BitBake uses for metadata and + recipe files. For this example, you need to create the file in your + project directory and define some key BitBake variables. For more + information on the ``bitbake.conf`` file, see + http://git.openembedded.org/bitbake/tree/conf/bitbake.conf. + + Use the following commands to create the ``conf`` directory in the + project directory: :: + + $ mkdir conf + + From within the ``conf`` directory, + use some editor to create the ``bitbake.conf`` so that it contains + the following: :: + + PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" + + TMPDIR = "${TOPDIR}/tmp" + CACHE = "${TMPDIR}/cache" + STAMP = "${TMPDIR}/${PN}/stamps" + T = "${TMPDIR}/${PN}/work" + B = "${TMPDIR}/${PN}" + + .. note:: + + Without a value for PN , the variables STAMP , T , and B , prevent more + than one recipe from working. You can fix this by either setting PN to + have a value similar to what OpenEmbedded and BitBake use in the default + bitbake.conf file (see previous example). Or, by manually updating each + recipe to set PN . You will also need to include PN as part of the STAMP + , T , and B variable definitions in the local.conf file. + + The ``TMPDIR`` variable establishes a directory that BitBake uses + for build output and intermediate files other than the cached + information used by the + :ref:`bitbake-user-manual/bitbake-user-manual-execution:setscene` + process. Here, the ``TMPDIR`` directory is set to ``hello/tmp``. + + .. tip:: + + You can always safely delete the tmp directory in order to rebuild a + BitBake target. The build process creates the directory for you when you + run BitBake. + + For information about each of the other variables defined in this + example, check :term:`PN`, :term:`TOPDIR`, :term:`CACHE`, :term:`STAMP`, + :term:`T` or :term:`B` to take you to the definitions in the + glossary. + +#. **Run BitBake:** After making sure that the ``conf/bitbake.conf`` file + exists, you can run the ``bitbake`` command again: :: + + $ bitbake + ERROR: Traceback (most recent call last): + File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped + return func(fn, *args) + File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit + bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data) + File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit + include(fn, file, lineno, d, "inherit") + File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include + raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno) + ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass + + ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass + + In the sample output, + BitBake could not find the ``classes/base.bbclass`` file. You need + to create that file next. + +#. **Creating classes/base.bbclass:** BitBake uses class files to + provide common code and functionality. The minimally required class + for BitBake is the ``classes/base.bbclass`` file. The ``base`` class + is implicitly inherited by every recipe. BitBake looks for the class + in the ``classes`` directory of the project (i.e ``hello/classes`` + in this example). + + Create the ``classes`` directory as follows: :: + + $ cd $HOME/hello + $ mkdir classes + + Move to the ``classes`` directory and then create the + ``base.bbclass`` file by inserting this single line: addtask build + The minimal task that BitBake runs is the ``do_build`` task. This is + all the example needs in order to build the project. Of course, the + ``base.bbclass`` can have much more depending on which build + environments BitBake is supporting. + +#. **Run BitBake:** After making sure that the ``classes/base.bbclass`` + file exists, you can run the ``bitbake`` command again: :: + + $ bitbake + Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information. + + BitBake is finally reporting + no errors. However, you can see that it really does not have + anything to do. You need to create a recipe that gives BitBake + something to do. + +#. **Creating a Layer:** While it is not really necessary for such a + small example, it is good practice to create a layer in which to + keep your code separate from the general metadata used by BitBake. + Thus, this example creates and uses a layer called "mylayer". + + .. note:: + + You can find additional information on layers in the + ":ref:`bitbake-user-manual/bitbake-user-manual-intro:Layers`" section. + + Minimally, you need a recipe file and a layer configuration file in + your layer. The configuration file needs to be in the ``conf`` + directory inside the layer. Use these commands to set up the layer + and the ``conf`` directory: :: + + $ cd $HOME + $ mkdir mylayer + $ cd mylayer + $ mkdir conf + + Move to the ``conf`` directory and create a ``layer.conf`` file that has the + following: :: + + BBPATH .= ":${LAYERDIR}" + BBFILES += "${LAYERDIR}/\*.bb" + BBFILE_COLLECTIONS += "mylayer" + `BBFILE_PATTERN_mylayer := "^${LAYERDIR_RE}/" + + For information on these variables, click on :term:`BBFILES`, + :term:`LAYERDIR`, :term:`BBFILE_COLLECTIONS` or :term:`BBFILE_PATTERN_mylayer ` + to go to the definitions in the glossary. + + You need to create the recipe file next. Inside your layer at the + top-level, use an editor and create a recipe file named + ``printhello.bb`` that has the following: :: + + DESCRIPTION = "Prints Hello World" + PN = 'printhello' + PV = '1' + + python do_build() { + bb.plain("********************"); + bb.plain("* *"); + bb.plain("* Hello, World! *"); + bb.plain("* *"); + bb.plain("********************"); + } + + The recipe file simply provides + a description of the recipe, the name, version, and the ``do_build`` + task, which prints out "Hello World" to the console. For more + information on :term:`DESCRIPTION`, :term:`PN` or :term:`PV` + follow the links to the glossary. + +#. **Run BitBake With a Target:** Now that a BitBake target exists, run + the command and provide that target: :: + + $ cd $HOME/hello + $ bitbake printhello + ERROR: no recipe files to build, check your BBPATH and BBFILES? + + Summary: There was 1 ERROR message shown, returning a non-zero exit code. + + We have created the layer with the recipe and + the layer configuration file but it still seems that BitBake cannot + find the recipe. BitBake needs a ``conf/bblayers.conf`` that lists + the layers for the project. Without this file, BitBake cannot find + the recipe. + +#. **Creating conf/bblayers.conf:** BitBake uses the + ``conf/bblayers.conf`` file to locate layers needed for the project. + This file must reside in the ``conf`` directory of the project (i.e. + ``hello/conf`` for this example). + + Set your working directory to the ``hello/conf`` directory and then + create the ``bblayers.conf`` file so that it contains the following: :: + + BBLAYERS ?= " \ + /home//mylayer \ + " + + You need to provide your own information for ``you`` in the file. + +#. **Run BitBake With a Target:** Now that you have supplied the + ``bblayers.conf`` file, run the ``bitbake`` command and provide the + target: :: + + $ bitbake printhello + Parsing recipes: 100% |##################################################################################| + Time: 00:00:00 + Parsing of 1 .bb files complete (0 cached, 1 parsed). 1 targets, 0 skipped, 0 masked, 0 errors. + NOTE: Resolving any missing task queue dependencies + NOTE: Preparing RunQueue + NOTE: Executing RunQueue Tasks + ******************** + * * + * Hello, World! * + * * + ******************** + NOTE: Tasks Summary: Attempted 1 tasks of which 0 didn't need to be rerun and all succeeded. + + .. note:: + + After the first execution, re-running bitbake printhello again will not + result in a BitBake run that prints the same console output. The reason + for this is that the first time the printhello.bb recipe's do_build task + executes successfully, BitBake writes a stamp file for the task. Thus, + the next time you attempt to run the task using that same bitbake + command, BitBake notices the stamp and therefore determines that the task + does not need to be re-run. If you delete the tmp directory or run + bitbake -c clean printhello and then re-run the build, the "Hello, + World!" message will be printed again. diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst new file mode 100644 index 000000000..77dc9668a --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-intro.rst @@ -0,0 +1,651 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +======== +Overview +======== + +| + +Welcome to the BitBake User Manual. This manual provides information on +the BitBake tool. The information attempts to be as independent as +possible regarding systems that use BitBake, such as OpenEmbedded and +the Yocto Project. In some cases, scenarios or examples within the +context of a build system are used in the manual to help with +understanding. For these cases, the manual clearly states the context. + +.. _intro: + +Introduction +============ + +Fundamentally, BitBake is a generic task execution engine that allows +shell and Python tasks to be run efficiently and in parallel while +working within complex inter-task dependency constraints. One of +BitBake's main users, OpenEmbedded, takes this core and builds embedded +Linux software stacks using a task-oriented approach. + +Conceptually, BitBake is similar to GNU Make in some regards but has +significant differences: + +- BitBake executes tasks according to provided metadata that builds up + the tasks. Metadata is stored in recipe (``.bb``) and related recipe + "append" (``.bbappend``) files, configuration (``.conf``) and + underlying include (``.inc``) files, and in class (``.bbclass``) + files. The metadata provides BitBake with instructions on what tasks + to run and the dependencies between those tasks. + +- BitBake includes a fetcher library for obtaining source code from + various places such as local files, source control systems, or + websites. + +- The instructions for each unit to be built (e.g. a piece of software) + are known as "recipe" files and contain all the information about the + unit (dependencies, source file locations, checksums, description and + so on). + +- BitBake includes a client/server abstraction and can be used from a + command line or used as a service over XML-RPC and has several + different user interfaces. + +History and Goals +================= + +BitBake was originally a part of the OpenEmbedded project. It was +inspired by the Portage package management system used by the Gentoo +Linux distribution. On December 7, 2004, OpenEmbedded project team +member Chris Larson split the project into two distinct pieces: + +- BitBake, a generic task executor + +- OpenEmbedded, a metadata set utilized by BitBake + +Today, BitBake is the primary basis of the +`OpenEmbedded `__ project, which is being +used to build and maintain Linux distributions such as the `Angstrom +Distribution `__, and which is +also being used as the build tool for Linux projects such as the `Yocto +Project `__. + +Prior to BitBake, no other build tool adequately met the needs of an +aspiring embedded Linux distribution. All of the build systems used by +traditional desktop Linux distributions lacked important functionality, +and none of the ad hoc Buildroot-based systems, prevalent in the +embedded space, were scalable or maintainable. + +Some important original goals for BitBake were: + +- Handle cross-compilation. + +- Handle inter-package dependencies (build time on target architecture, + build time on native architecture, and runtime). + +- Support running any number of tasks within a given package, + including, but not limited to, fetching upstream sources, unpacking + them, patching them, configuring them, and so forth. + +- Be Linux distribution agnostic for both build and target systems. + +- Be architecture agnostic. + +- Support multiple build and target operating systems (e.g. Cygwin, the + BSDs, and so forth). + +- Be self-contained, rather than tightly integrated into the build + machine's root filesystem. + +- Handle conditional metadata on the target architecture, operating + system, distribution, and machine. + +- Be easy to use the tools to supply local metadata and packages + against which to operate. + +- Be easy to use BitBake to collaborate between multiple projects for + their builds. + +- Provide an inheritance mechanism to share common metadata between + many packages. + +Over time it became apparent that some further requirements were +necessary: + +- Handle variants of a base recipe (e.g. native, sdk, and multilib). + +- Split metadata into layers and allow layers to enhance or override + other layers. + +- Allow representation of a given set of input variables to a task as a + checksum. Based on that checksum, allow acceleration of builds with + prebuilt components. + +BitBake satisfies all the original requirements and many more with +extensions being made to the basic functionality to reflect the +additional requirements. Flexibility and power have always been the +priorities. BitBake is highly extensible and supports embedded Python +code and execution of any arbitrary tasks. + +.. _Concepts: + +Concepts +======== + +BitBake is a program written in the Python language. At the highest +level, BitBake interprets metadata, decides what tasks are required to +run, and executes those tasks. Similar to GNU Make, BitBake controls how +software is built. GNU Make achieves its control through "makefiles", +while BitBake uses "recipes". + +BitBake extends the capabilities of a simple tool like GNU Make by +allowing for the definition of much more complex tasks, such as +assembling entire embedded Linux distributions. + +The remainder of this section introduces several concepts that should be +understood in order to better leverage the power of BitBake. + +Recipes +------- + +BitBake Recipes, which are denoted by the file extension ``.bb``, are +the most basic metadata files. These recipe files provide BitBake with +the following: + +- Descriptive information about the package (author, homepage, license, + and so on) + +- The version of the recipe + +- Existing dependencies (both build and runtime dependencies) + +- Where the source code resides and how to fetch it + +- Whether the source code requires any patches, where to find them, and + how to apply them + +- How to configure and compile the source code + +- How to assemble the generated artifacts into one or more installable + packages + +- Where on the target machine to install the package or packages + created + +Within the context of BitBake, or any project utilizing BitBake as its +build system, files with the ``.bb`` extension are referred to as +recipes. + +.. note:: + + The term "package" is also commonly used to describe recipes. + However, since the same word is used to describe packaged output from + a project, it is best to maintain a single descriptive term - + "recipes". Put another way, a single "recipe" file is quite capable + of generating a number of related but separately installable + "packages". In fact, that ability is fairly common. + +Configuration Files +------------------- + +Configuration files, which are denoted by the ``.conf`` extension, +define various configuration variables that govern the project's build +process. These files fall into several areas that define machine +configuration, distribution configuration, possible compiler tuning, +general common configuration, and user configuration. The main +configuration file is the sample ``bitbake.conf`` file, which is located +within the BitBake source tree ``conf`` directory. + +Classes +------- + +Class files, which are denoted by the ``.bbclass`` extension, contain +information that is useful to share between metadata files. The BitBake +source tree currently comes with one class metadata file called +``base.bbclass``. You can find this file in the ``classes`` directory. +The ``base.bbclass`` class files is special since it is always included +automatically for all recipes and classes. This class contains +definitions for standard basic tasks such as fetching, unpacking, +configuring (empty by default), compiling (runs any Makefile present), +installing (empty by default) and packaging (empty by default). These +tasks are often overridden or extended by other classes added during the +project development process. + +Layers +------ + +Layers allow you to isolate different types of customizations from each +other. While you might find it tempting to keep everything in one layer +when working on a single project, the more modular your metadata, the +easier it is to cope with future changes. + +To illustrate how you can use layers to keep things modular, consider +customizations you might make to support a specific target machine. +These types of customizations typically reside in a special layer, +rather than a general layer, called a Board Support Package (BSP) layer. +Furthermore, the machine customizations should be isolated from recipes +and metadata that support a new GUI environment, for example. This +situation gives you a couple of layers: one for the machine +configurations and one for the GUI environment. It is important to +understand, however, that the BSP layer can still make machine-specific +additions to recipes within the GUI environment layer without polluting +the GUI layer itself with those machine-specific changes. You can +accomplish this through a recipe that is a BitBake append +(``.bbappend``) file. + +.. _append-bbappend-files: + +Append Files +------------ + +Append files, which are files that have the ``.bbappend`` file +extension, extend or override information in an existing recipe file. + +BitBake expects every append file to have a corresponding recipe file. +Furthermore, the append file and corresponding recipe file must use the +same root filename. The filenames can differ only in the file type +suffix used (e.g. ``formfactor_0.0.bb`` and +``formfactor_0.0.bbappend``). + +Information in append files extends or overrides the information in the +underlying, similarly-named recipe files. + +When you name an append file, you can use the "``%``" wildcard character +to allow for matching recipe names. For example, suppose you have an +append file named as follows: :: + + busybox_1.21.%.bbappend + +That append file +would match any ``busybox_1.21.``\ x\ ``.bb`` version of the recipe. So, +the append file would match the following recipe names: :: + + busybox_1.21.1.bb + busybox_1.21.2.bb + busybox_1.21.3.bb + +.. note:: + + The use of the " % " character is limited in that it only works directly in + front of the .bbappend portion of the append file's name. You cannot use the + wildcard character in any other location of the name. + +If the ``busybox`` recipe was updated to ``busybox_1.3.0.bb``, the +append name would not match. However, if you named the append file +``busybox_1.%.bbappend``, then you would have a match. + +In the most general case, you could name the append file something as +simple as ``busybox_%.bbappend`` to be entirely version independent. + +Obtaining BitBake +================= + +You can obtain BitBake several different ways: + +- **Cloning BitBake:** Using Git to clone the BitBake source code + repository is the recommended method for obtaining BitBake. Cloning + the repository makes it easy to get bug fixes and have access to + stable branches and the master branch. Once you have cloned BitBake, + you should use the latest stable branch for development since the + master branch is for BitBake development and might contain less + stable changes. + + You usually need a version of BitBake that matches the metadata you + are using. The metadata is generally backwards compatible but not + forward compatible. + + Here is an example that clones the BitBake repository: :: + + $ git clone git://git.openembedded.org/bitbake + + This command clones the BitBake + Git repository into a directory called ``bitbake``. Alternatively, + you can designate a directory after the ``git clone`` command if you + want to call the new directory something other than ``bitbake``. Here + is an example that names the directory ``bbdev``: :: + + $ git clone git://git.openembedded.org/bitbake bbdev + +- **Installation using your Distribution Package Management System:** + This method is not recommended because the BitBake version that is + provided by your distribution, in most cases, is several releases + behind a snapshot of the BitBake repository. + +- **Taking a snapshot of BitBake:** Downloading a snapshot of BitBake + from the source code repository gives you access to a known branch or + release of BitBake. + + .. note:: + + Cloning the Git repository, as described earlier, is the preferred + method for getting BitBake. Cloning the repository makes it easier + to update as patches are added to the stable branches. + + The following example downloads a snapshot of BitBake version 1.17.0: :: + + $ wget http://git.openembedded.org/bitbake/snapshot/bitbake-1.17.0.tar.gz + $ tar zxpvf bitbake-1.17.0.tar.gz + + After extraction of the tarball using + the tar utility, you have a directory entitled ``bitbake-1.17.0``. + +- **Using the BitBake that Comes With Your Build Checkout:** A final + possibility for getting a copy of BitBake is that it already comes + with your checkout of a larger BitBake-based build system, such as + Poky. Rather than manually checking out individual layers and gluing + them together yourself, you can check out an entire build system. The + checkout will already include a version of BitBake that has been + thoroughly tested for compatibility with the other components. For + information on how to check out a particular BitBake-based build + system, consult that build system's supporting documentation. + +.. _bitbake-user-manual-command: + +The BitBake Command +=================== + +The ``bitbake`` command is the primary interface to the BitBake tool. +This section presents the BitBake command syntax and provides several +execution examples. + +Usage and syntax +---------------- + +Following is the usage and syntax for BitBake: :: + + $ bitbake -h + Usage: bitbake [options] [recipename/target recipe:do_task ...] + + Executes the specified task (default is 'build') for a given set of target recipes (.bb files). + It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which + will provide the layer, BBFILES and other configuration information. + + Options: + --version show program's version number and exit + -h, --help show this help message and exit + -b BUILDFILE, --buildfile=BUILDFILE + Execute tasks from a specific .bb recipe directly. + WARNING: Does not handle any dependencies from other + recipes. + -k, --continue Continue as much as possible after an error. While the + target that failed and anything depending on it cannot + be built, as much as possible will be built before + stopping. + -f, --force Force the specified targets/task to run (invalidating + any existing stamp file). + -c CMD, --cmd=CMD Specify the task to execute. The exact options + available depend on the metadata. Some examples might + be 'compile' or 'populate_sysroot' or 'listtasks' may + give a list of the tasks available. + -C INVALIDATE_STAMP, --clear-stamp=INVALIDATE_STAMP + Invalidate the stamp for the specified task such as + 'compile' and then run the default task for the + specified target(s). + -r PREFILE, --read=PREFILE + Read the specified file before bitbake.conf. + -R POSTFILE, --postread=POSTFILE + Read the specified file after bitbake.conf. + -v, --verbose Enable tracing of shell tasks (with 'set -x'). Also + print bb.note(...) messages to stdout (in addition to + writing them to ${T}/log.do_<task>). + -D, --debug Increase the debug level. You can specify this more + than once. -D sets the debug level to 1, where only + bb.debug(1, ...) messages are printed to stdout; -DD + sets the debug level to 2, where both bb.debug(1, ...) + and bb.debug(2, ...) messages are printed; etc. + Without -D, no debug messages are printed. Note that + -D only affects output to stdout. All debug messages + are written to ${T}/log.do_taskname, regardless of the + debug level. + -q, --quiet Output less log message data to the terminal. You can + specify this more than once. + -n, --dry-run Don't execute, just go through the motions. + -S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER + Dump out the signature construction information, with + no task execution. The SIGNATURE_HANDLER parameter is + passed to the handler. Two common values are none and + printdiff but the handler may define more/less. none + means only dump the signature, printdiff means compare + the dumped signature with the cached one. + -p, --parse-only Quit after parsing the BB recipes. + -s, --show-versions Show current and preferred versions of all recipes. + -e, --environment Show the global or per-recipe environment complete + with information about where variables were + set/changed. + -g, --graphviz Save dependency tree information for the specified + targets in the dot syntax. + -I EXTRA_ASSUME_PROVIDED, --ignore-deps=EXTRA_ASSUME_PROVIDED + Assume these dependencies don't exist and are already + provided (equivalent to ASSUME_PROVIDED). Useful to + make dependency graphs more appealing + -l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS + Show debug logging for the specified logging domains + -P, --profile Profile the command and save reports. + -u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp + - default knotty). + --token=XMLRPCTOKEN Specify the connection token to be used when + connecting to a remote server. + --revisions-changed Set the exit code depending on whether upstream + floating revisions have changed or not. + --server-only Run bitbake without a UI, only starting a server + (cooker) process. + -B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind + to. + -T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT + Set timeout to unload bitbake server due to + inactivity, set to -1 means no unload, default: + Environment variable BB_SERVER_TIMEOUT. + --no-setscene Do not run any setscene tasks. sstate will be ignored + and everything needed, built. + --setscene-only Only run setscene tasks, don't run any real tasks. + --remote-server=REMOTE_SERVER + Connect to the specified server. + -m, --kill-server Terminate any running bitbake server. + --observe-only Connect to a server as an observing-only client. + --status-only Check the status of the remote bitbake server. + -w WRITEEVENTLOG, --write-log=WRITEEVENTLOG + Writes the event log of the build to a bitbake event + json file. Use '' (empty string) to assign the name + automatically. + --runall=RUNALL Run the specified task for any recipe in the taskgraph + of the specified target (even if it wouldn't otherwise + have run). + --runonly=RUNONLY Run only the specified task within the taskgraph of + the specified targets (and any task dependencies those + tasks may have). + +.. _bitbake-examples: + +Examples +-------- + +This section presents some examples showing how to use BitBake. + +.. _example-executing-a-task-against-a-single-recipe: + +Executing a Task Against a Single Recipe +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Executing tasks for a single recipe file is relatively simple. You +specify the file in question, and BitBake parses it and executes the +specified task. If you do not specify a task, BitBake executes the +default task, which is "build”. BitBake obeys inter-task dependencies +when doing so. + +The following command runs the build task, which is the default task, on +the ``foo_1.0.bb`` recipe file: :: + + $ bitbake -b foo_1.0.bb + +The following command runs the clean task on the ``foo.bb`` recipe file: :: + + $ bitbake -b foo.bb -c clean + +.. note:: + + The "-b" option explicitly does not handle recipe dependencies. Other + than for debugging purposes, it is instead recommended that you use + the syntax presented in the next section. + +Executing Tasks Against a Set of Recipe Files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +There are a number of additional complexities introduced when one wants +to manage multiple ``.bb`` files. Clearly there needs to be a way to +tell BitBake what files are available and, of those, which you want to +execute. There also needs to be a way for each recipe to express its +dependencies, both for build-time and runtime. There must be a way for +you to express recipe preferences when multiple recipes provide the same +functionality, or when there are multiple versions of a recipe. + +The ``bitbake`` command, when not using "--buildfile" or "-b" only +accepts a "PROVIDES". You cannot provide anything else. By default, a +recipe file generally "PROVIDES" its "packagename" as shown in the +following example: :: + + $ bitbake foo + +This next example "PROVIDES" the +package name and also uses the "-c" option to tell BitBake to just +execute the ``do_clean`` task: :: + + $ bitbake -c clean foo + +Executing a List of Task and Recipe Combinations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The BitBake command line supports specifying different tasks for +individual targets when you specify multiple targets. For example, +suppose you had two targets (or recipes) ``myfirstrecipe`` and +``mysecondrecipe`` and you needed BitBake to run ``taskA`` for the first +recipe and ``taskB`` for the second recipe: :: + + $ bitbake myfirstrecipe:do_taskA mysecondrecipe:do_taskB + +Generating Dependency Graphs +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BitBake is able to generate dependency graphs using the ``dot`` syntax. +You can convert these graphs into images using the ``dot`` tool from +`Graphviz `__. + +When you generate a dependency graph, BitBake writes two files to the +current working directory: + +- ``task-depends.dot``: Shows dependencies between tasks. These + dependencies match BitBake's internal task execution list. + +- ``pn-buildlist``: Shows a simple list of targets that are to be + built. + +To stop depending on common depends, use the "-I" depend option and +BitBake omits them from the graph. Leaving this information out can +produce more readable graphs. This way, you can remove from the graph +``DEPENDS`` from inherited classes such as ``base.bbclass``. + +Here are two examples that create dependency graphs. The second example +omits depends common in OpenEmbedded from the graph: :: + + $ bitbake -g foo + + $ bitbake -g -I virtual/kernel -I eglibc foo + +Executing a Multiple Configuration Build +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BitBake is able to build multiple images or packages using a single +command where the different targets require different configurations +(multiple configuration builds). Each target, in this scenario, is +referred to as a "multiconfig". + +To accomplish a multiple configuration build, you must define each +target's configuration separately using a parallel configuration file in +the build directory. The location for these multiconfig configuration +files is specific. They must reside in the current build directory in a +sub-directory of ``conf`` named ``multiconfig``. Following is an example +for two separate targets: + +.. image:: figures/bb_multiconfig_files.png + :align: center + +The reason for this required file hierarchy is because the ``BBPATH`` +variable is not constructed until the layers are parsed. Consequently, +using the configuration file as a pre-configuration file is not possible +unless it is located in the current working directory. + +Minimally, each configuration file must define the machine and the +temporary directory BitBake uses for the build. Suggested practice +dictates that you do not overlap the temporary directories used during +the builds. + +Aside from separate configuration files for each target, you must also +enable BitBake to perform multiple configuration builds. Enabling is +accomplished by setting the +:term:`BBMULTICONFIG` variable in the +``local.conf`` configuration file. As an example, suppose you had +configuration files for ``target1`` and ``target2`` defined in the build +directory. The following statement in the ``local.conf`` file both +enables BitBake to perform multiple configuration builds and specifies +the two extra multiconfigs: :: + + BBMULTICONFIG = "target1 target2" + +Once the target configuration files are in place and BitBake has been +enabled to perform multiple configuration builds, use the following +command form to start the builds: :: + + $ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ] + +Here is an example for two extra multiconfigs: ``target1`` and ``target2``: :: + + $ bitbake mc::target mc:target1:target mc:target2:target + +.. _bb-enabling-multiple-configuration-build-dependencies: + +Enabling Multiple Configuration Build Dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes dependencies can exist between targets (multiconfigs) in a +multiple configuration build. For example, suppose that in order to +build an image for a particular architecture, the root filesystem of +another build for a different architecture needs to exist. In other +words, the image for the first multiconfig depends on the root +filesystem of the second multiconfig. This dependency is essentially +that the task in the recipe that builds one multiconfig is dependent on +the completion of the task in the recipe that builds another +multiconfig. + +To enable dependencies in a multiple configuration build, you must +declare the dependencies in the recipe using the following statement +form: :: + + task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend" + +To better show how to use this statement, consider an example with two +multiconfigs: ``target1`` and ``target2``: :: + + image_task[mcdepends] = "mc:target1:target2:image2:rootfs_task" + +In this example, the +``from_multiconfig`` is "target1" and the ``to_multiconfig`` is "target2". The +task on which the image whose recipe contains image_task depends on the +completion of the rootfs_task used to build out image2, which is +associated with the "target2" multiconfig. + +Once you set up this dependency, you can build the "target1" multiconfig +using a BitBake command as follows: :: + + $ bitbake mc:target1:image1 + +This command executes all the tasks needed to create ``image1`` for the "target1" +multiconfig. Because of the dependency, BitBake also executes through +the ``rootfs_task`` for the "target2" multiconfig build. + +Having a recipe depend on the root filesystem of another build might not +seem that useful. Consider this change to the statement in the image1 +recipe: :: + + image_task[mcdepends] = "mc:target1:target2:image2:image_task" + +In this case, BitBake must create ``image2`` for the "target2" build since +the "target1" build depends on it. + +Because "target1" and "target2" are enabled for multiple configuration +builds and have separate configuration files, BitBake places the +artifacts for each build in the respective temporary build directories. diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst new file mode 100644 index 000000000..ec5d08590 --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-metadata.rst @@ -0,0 +1,1969 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +==================== +Syntax and Operators +==================== + +| + +BitBake files have their own syntax. The syntax has similarities to +several other languages but also has some unique features. This section +describes the available syntax and operators as well as provides +examples. + +Basic Syntax +============ + +This section provides some basic syntax examples. + +Basic Variable Setting +---------------------- + +The following example sets ``VARIABLE`` to "value". This assignment +occurs immediately as the statement is parsed. It is a "hard" +assignment. :: + + VARIABLE = "value" + +As expected, if you include leading or +trailing spaces as part of an assignment, the spaces are retained: :: + + VARIABLE = " value" + VARIABLE = "value " + +Setting ``VARIABLE`` to "" sets +it to an empty string, while setting the variable to " " sets it to a +blank space (i.e. these are not the same values). :: + + VARIABLE = "" + VARIABLE = " " + +You can use single quotes instead of double quotes when setting a +variable's value. Doing so allows you to use values that contain the +double quote character: :: + + VARIABLE = 'I have a " in my value' + +.. note:: + + Unlike in Bourne shells, single quotes work identically to double + quotes in all other ways. They do not suppress variable expansions. + +Modifying Existing Variables +---------------------------- + +Sometimes you need to modify existing variables. Following are some +cases where you might find you want to modify an existing variable: + +- Customize a recipe that uses the variable. + +- Change a variable's default value used in a ``*.bbclass`` file. + +- Change the variable in a ``*.bbappend`` file to override the variable + in the original recipe. + +- Change the variable in a configuration file so that the value + overrides an existing configuration. + +Changing a variable value can sometimes depend on how the value was +originally assigned and also on the desired intent of the change. In +particular, when you append a value to a variable that has a default +value, the resulting value might not be what you expect. In this case, +the value you provide might replace the value rather than append to the +default value. + +If after you have changed a variable's value and something unexplained +occurs, you can use BitBake to check the actual value of the suspect +variable. You can make these checks for both configuration and recipe +level changes: + +- For configuration changes, use the following: :: + + $ bitbake -e + + This + command displays variable values after the configuration files (i.e. + ``local.conf``, ``bblayers.conf``, ``bitbake.conf`` and so forth) + have been parsed. + + .. note:: + + Variables that are exported to the environment are preceded by the + string "export" in the command's output. + +- For recipe changes, use the following: :: + + $ bitbake recipe -e \| grep VARIABLE=" + + This command checks to see if the variable actually makes + it into a specific recipe. + +Line Joining +------------ + +Outside of :ref:`functions `, +BitBake joins any line ending in +a backslash character ("\") with the following line before parsing +statements. The most common use for the "\" character is to split +variable assignments over multiple lines, as in the following example: :: + + FOO = "bar \ + baz \ + qaz" + +Both the "\" character and the newline +character that follow it are removed when joining lines. Thus, no +newline characters end up in the value of ``FOO``. + +Consider this additional example where the two assignments both assign +"barbaz" to ``FOO``: :: + + FOO = "barbaz" + FOO = "bar\ + baz" + +.. note:: + + BitBake does not interpret escape sequences like "\n" in variable + values. For these to have an effect, the value must be passed to some + utility that interprets escape sequences, such as + ``printf`` or ``echo -n``. + +Variable Expansion +------------------ + +Variables can reference the contents of other variables using a syntax +that is similar to variable expansion in Bourne shells. The following +assignments result in A containing "aval" and B evaluating to +"preavalpost". :: + + A = "aval" + B = "pre${A}post" + +.. note:: + + Unlike in Bourne shells, the curly braces are mandatory: Only ``${FOO}`` and not + ``$FOO`` is recognized as an expansion of ``FOO``. + +The "=" operator does not immediately expand variable references in the +right-hand side. Instead, expansion is deferred until the variable +assigned to is actually used. The result depends on the current values +of the referenced variables. The following example should clarify this +behavior: :: + + A = "${B} baz" + B = "${C} bar" + C = "foo" + *At this point, ${A} equals "foo bar baz"* + C = "qux" + *At this point, ${A} equals "qux bar baz"* + B = "norf" + *At this point, ${A} equals "norf baz"\* + +Contrast this behavior with the +:ref:`bitbake-user-manual/bitbake-user-manual-metadata:immediate variable +expansion (:=)` operator. + +If the variable expansion syntax is used on a variable that does not +exist, the string is kept as is. For example, given the following +assignment, ``BAR`` expands to the literal string "${FOO}" as long as +``FOO`` does not exist. :: + + BAR = "${FOO}" + +Setting a default value (?=) +---------------------------- + +You can use the "?=" operator to achieve a "softer" assignment for a +variable. This type of assignment allows you to define a variable if it +is undefined when the statement is parsed, but to leave the value alone +if the variable has a value. Here is an example: :: + + A ?= "aval" + +If ``A`` is +set at the time this statement is parsed, the variable retains its +value. However, if ``A`` is not set, the variable is set to "aval". + +.. note:: + + This assignment is immediate. Consequently, if multiple "?=" + assignments to a single variable exist, the first of those ends up + getting used. + +Setting a weak default value (??=) +---------------------------------- + +It is possible to use a "weaker" assignment than in the previous section +by using the "??=" operator. This assignment behaves identical to "?=" +except that the assignment is made at the end of the parsing process +rather than immediately. Consequently, when multiple "??=" assignments +exist, the last one is used. Also, any "=" or "?=" assignment will +override the value set with "??=". Here is an example: :: + + A ??= "somevalue" + A ??= "someothervalue" + +If ``A`` is set before the above statements are +parsed, the variable retains its value. If ``A`` is not set, the +variable is set to "someothervalue". + +Again, this assignment is a "lazy" or "weak" assignment because it does +not occur until the end of the parsing process. + +Immediate variable expansion (:=) +--------------------------------- + +The ":=" operator results in a variable's contents being expanded +immediately, rather than when the variable is actually used: :: + + T = "123" + A := "test ${T}" + T = "456" + B := "${T} ${C}" + C = "cval" + C := "${C}append" + +In this example, ``A`` contains "test 123", even though the final value +of ``T`` is "456". The variable ``B`` will end up containing "456 +cvalappend". This is because references to undefined variables are +preserved as is during (immediate)expansion. This is in contrast to GNU +Make, where undefined variables expand to nothing. The variable ``C`` +contains "cvalappend" since ``${C}`` immediately expands to "cval". + +.. _appending-and-prepending: + +Appending (+=) and prepending (=+) With Spaces +---------------------------------------------- + +Appending and prepending values is common and can be accomplished using +the "+=" and "=+" operators. These operators insert a space between the +current value and prepended or appended value. + +These operators take immediate effect during parsing. Here are some +examples: :: + + B = "bval" + B += "additionaldata" + C = "cval" + C =+ "test" + +The variable ``B`` contains "bval additionaldata" and ``C`` contains "test +cval". + +.. _appending-and-prepending-without-spaces: + +Appending (.=) and Prepending (=.) Without Spaces +------------------------------------------------- + +If you want to append or prepend values without an inserted space, use +the ".=" and "=." operators. + +These operators take immediate effect during parsing. Here are some +examples: :: + + B = "bval" + B .= "additionaldata" + C = "cval" + C =. "test" + +The variable ``B`` contains "bvaladditionaldata" and ``C`` contains +"testcval". + +Appending and Prepending (Override Style Syntax) +------------------------------------------------ + +You can also append and prepend a variable's value using an override +style syntax. When you use this syntax, no spaces are inserted. + +These operators differ from the ":=", ".=", "=.", "+=", and "=+" +operators in that their effects are applied at variable expansion time +rather than being immediately applied. Here are some examples: :: + + B = "bval" + B_append = " additional data" + C = "cval" + C_prepend = "additional data " + D = "dval" + D_append = "additional data" + +The variable ``B`` +becomes "bval additional data" and ``C`` becomes "additional data cval". +The variable ``D`` becomes "dvaladditional data". + +.. note:: + + You must control all spacing when you use the override syntax. + +It is also possible to append and prepend to shell functions and +BitBake-style Python functions. See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:shell functions`" and ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:bitbake-style python functions`" +sections for examples. + +.. _removing-override-style-syntax: + +Removal (Override Style Syntax) +------------------------------- + +You can remove values from lists using the removal override style +syntax. Specifying a value for removal causes all occurrences of that +value to be removed from the variable. + +When you use this syntax, BitBake expects one or more strings. +Surrounding spaces and spacing are preserved. Here is an example: :: + + FOO = "123 456 789 123456 123 456 123 456" + FOO_remove = "123" + FOO_remove = "456" + FOO2 = " abc def ghi abcdef abc def abc def def" + FOO2_remove = "\ + def \ + abc \ + ghi \ + " + +The variable ``FOO`` becomes +" 789 123456 " and ``FOO2`` becomes " abcdef ". + +Like "_append" and "_prepend", "_remove" is applied at variable +expansion time. + +Override Style Operation Advantages +----------------------------------- + +An advantage of the override style operations "_append", "_prepend", and +"_remove" as compared to the "+=" and "=+" operators is that the +override style operators provide guaranteed operations. For example, +consider a class ``foo.bbclass`` that needs to add the value "val" to +the variable ``FOO``, and a recipe that uses ``foo.bbclass`` as follows: :: + + inherit foo + FOO = "initial" + +If ``foo.bbclass`` uses the "+=" operator, +as follows, then the final value of ``FOO`` will be "initial", which is +not what is desired: :: + + FOO += "val" + +If, on the other hand, ``foo.bbclass`` +uses the "_append" operator, then the final value of ``FOO`` will be +"initial val", as intended: :: + + FOO_append = " val" + +.. note:: + + It is never necessary to use "+=" together with "_append". The following + sequence of assignments appends "barbaz" to FOO: :: + + FOO_append = "bar" + FOO_append = "baz" + + + The only effect of changing the second assignment in the previous + example to use "+=" would be to add a space before "baz" in the + appended value (due to how the "+=" operator works). + +Another advantage of the override style operations is that you can +combine them with other overrides as described in the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax (overrides)`" section. + +Variable Flag Syntax +-------------------- + +Variable flags are BitBake's implementation of variable properties or +attributes. It is a way of tagging extra information onto a variable. +You can find more out about variable flags in general in the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section. + +You can define, append, and prepend values to variable flags. All the +standard syntax operations previously mentioned work for variable flags +except for override style syntax (i.e. "_prepend", "_append", and +"_remove"). + +Here are some examples showing how to set variable flags: :: + + FOO[a] = "abc" + FOO[b] = "123" + FOO[a] += "456" + +The variable ``FOO`` has two flags: +``[a]`` and ``[b]``. The flags are immediately set to "abc" and "123", +respectively. The ``[a]`` flag becomes "abc 456". + +No need exists to pre-define variable flags. You can simply start using +them. One extremely common application is to attach some brief +documentation to a BitBake variable as follows: :: + + CACHE[doc] = "The directory holding the cache of the metadata." + +Inline Python Variable Expansion +-------------------------------- + +You can use inline Python variable expansion to set variables. Here is +an example: :: + + DATE = "${@time.strftime('%Y%m%d',time.gmtime())}" + +This example results in the ``DATE`` variable being set to the current date. + +Probably the most common use of this feature is to extract the value of +variables from BitBake's internal data dictionary, ``d``. The following +lines select the values of a package name and its version number, +respectively: :: + + PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}" + PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}" + +.. note:: + + Inline Python expressions work just like variable expansions insofar as the + "=" and ":=" operators are concerned. Given the following assignment, foo() + is called each time FOO is expanded: :: + + FOO = "${@foo()}" + + Contrast this with the following immediate assignment, where foo() is only + called once, while the assignment is parsed: :: + + FOO := "${@foo()}" + +For a different way to set variables with Python code during parsing, +see the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:anonymous python functions`" section. + +Unsetting variables +------------------- + +It is possible to completely remove a variable or a variable flag from +BitBake's internal data dictionary by using the "unset" keyword. Here is +an example: :: + + unset DATE + unset do_fetch[noexec] + +These two statements remove the ``DATE`` and the ``do_fetch[noexec]`` flag. + +Providing Pathnames +------------------- + +When specifying pathnames for use with BitBake, do not use the tilde +("~") character as a shortcut for your home directory. Doing so might +cause BitBake to not recognize the path since BitBake does not expand +this character in the same way a shell would. + +Instead, provide a fuller path as the following example illustrates: :: + + BBLAYERS ?= " \ + /home/scott-lenovo/LayerA \ + " + +Exporting Variables to the Environment +====================================== + +You can export variables to the environment of running tasks by using +the ``export`` keyword. For example, in the following example, the +``do_foo`` task prints "value from the environment" when run: :: + + export ENV_VARIABLE + ENV_VARIABLE = "value from the environment" + + do_foo() { + bbplain "$ENV_VARIABLE" + } + +.. note:: + + BitBake does not expand ``$ENV_VARIABLE`` in this case because it lacks the + obligatory ``{}`` . Rather, ``$ENV_VARIABLE`` is expanded by the shell. + +It does not matter whether ``export ENV_VARIABLE`` appears before or +after assignments to ``ENV_VARIABLE``. + +It is also possible to combine ``export`` with setting a value for the +variable. Here is an example: :: + + export ENV_VARIABLE = "variable-value" + +In the output of ``bitbake -e``, variables that are exported to the +environment are preceded by "export". + +Among the variables commonly exported to the environment are ``CC`` and +``CFLAGS``, which are picked up by many build systems. + +Conditional Syntax (Overrides) +============================== + +BitBake uses :term:`OVERRIDES` to control what +variables are overridden after BitBake parses recipes and configuration +files. This section describes how you can use ``OVERRIDES`` as +conditional metadata, talks about key expansion in relationship to +``OVERRIDES``, and provides some examples to help with understanding. + +Conditional Metadata +-------------------- + +You can use ``OVERRIDES`` to conditionally select a specific version of +a variable and to conditionally append or prepend the value of a +variable. + +.. note:: + + Overrides can only use lower-case characters. Additionally, + underscores are not permitted in override names as they are used to + separate overrides from each other and from the variable name. + +- *Selecting a Variable:* The ``OVERRIDES`` variable is a + colon-character-separated list that contains items for which you want + to satisfy conditions. Thus, if you have a variable that is + conditional on “arm”, and “arm” is in ``OVERRIDES``, then the + “arm”-specific version of the variable is used rather than the + non-conditional version. Here is an example: :: + + OVERRIDES = "architecture:os:machine" + TEST = "default" + TEST_os = "osspecific" + TEST_nooverride = "othercondvalue" + + In this example, the ``OVERRIDES`` + variable lists three overrides: "architecture", "os", and "machine". + The variable ``TEST`` by itself has a default value of "default". You + select the os-specific version of the ``TEST`` variable by appending + the "os" override to the variable (i.e. ``TEST_os``). + + To better understand this, consider a practical example that assumes + an OpenEmbedded metadata-based Linux kernel recipe file. The + following lines from the recipe file first set the kernel branch + variable ``KBRANCH`` to a default value, then conditionally override + that value based on the architecture of the build: :: + + KBRANCH = "standard/base" + KBRANCH_qemuarm = "standard/arm-versatile-926ejs" + KBRANCH_qemumips = "standard/mti-malta32" + KBRANCH_qemuppc = "standard/qemuppc" + KBRANCH_qemux86 = "standard/common-pc/base" + KBRANCH_qemux86-64 = "standard/common-pc-64/base" + KBRANCH_qemumips64 = "standard/mti-malta64" + +- *Appending and Prepending:* BitBake also supports append and prepend + operations to variable values based on whether a specific item is + listed in ``OVERRIDES``. Here is an example: :: + + DEPENDS = "glibc ncurses" + OVERRIDES = "machine:local" + DEPENDS_append_machine = "libmad" + + In this example, ``DEPENDS`` becomes "glibc ncurses libmad". + + Again, using an OpenEmbedded metadata-based kernel recipe file as an + example, the following lines will conditionally append to the + ``KERNEL_FEATURES`` variable based on the architecture: :: + + KERNEL_FEATURES_append = " ${KERNEL_EXTRA_FEATURES}" + KERNEL_FEATURES_append_qemux86=" cfg/sound.scc cfg/paravirt_kvm.scc" + KERNEL_FEATURES_append_qemux86-64=" cfg/sound.scc cfg/paravirt_kvm.scc" + +- *Setting a Variable for a Single Task:* BitBake supports setting a + variable just for the duration of a single task. Here is an example: :: + + FOO_task-configure = "val 1" + FOO_task-compile = "val 2" + + In the + previous example, ``FOO`` has the value "val 1" while the + ``do_configure`` task is executed, and the value "val 2" while the + ``do_compile`` task is executed. + + Internally, this is implemented by prepending the task (e.g. + "task-compile:") to the value of + :term:`OVERRIDES` for the local datastore of the + ``do_compile`` task. + + You can also use this syntax with other combinations (e.g. + "``_prepend``") as shown in the following example: :: + + EXTRA_OEMAKE_prepend_task-compile = "${PARALLEL_MAKE} " + +Key Expansion +------------- + +Key expansion happens when the BitBake datastore is finalized. To better +understand this, consider the following example: :: + + A${B} = "X" + B = "2" + A2 = "Y" + +In this case, after all the parsing is complete, BitBake expands +``${B}`` into "2". This expansion causes ``A2``, which was set to "Y" +before the expansion, to become "X". + +.. _variable-interaction-worked-examples: + +Examples +-------- + +Despite the previous explanations that show the different forms of +variable definitions, it can be hard to work out exactly what happens +when variable operators, conditional overrides, and unconditional +overrides are combined. This section presents some common scenarios +along with explanations for variable interactions that typically confuse +users. + +There is often confusion concerning the order in which overrides and +various "append" operators take effect. Recall that an append or prepend +operation using "_append" and "_prepend" does not result in an immediate +assignment as would "+=", ".=", "=+", or "=.". Consider the following +example: :: + + OVERRIDES = "foo" + A = "Z" + A_foo_append = "X" + +For this case, +``A`` is unconditionally set to "Z" and "X" is unconditionally and +immediately appended to the variable ``A_foo``. Because overrides have +not been applied yet, ``A_foo`` is set to "X" due to the append and +``A`` simply equals "Z". + +Applying overrides, however, changes things. Since "foo" is listed in +``OVERRIDES``, the conditional variable ``A`` is replaced with the "foo" +version, which is equal to "X". So effectively, ``A_foo`` replaces +``A``. + +This next example changes the order of the override and the append: :: + + OVERRIDES = "foo" + A = "Z" + A_append_foo = "X" + +For this case, before +overrides are handled, ``A`` is set to "Z" and ``A_append_foo`` is set +to "X". Once the override for "foo" is applied, however, ``A`` gets +appended with "X". Consequently, ``A`` becomes "ZX". Notice that spaces +are not appended. + +This next example has the order of the appends and overrides reversed +back as in the first example: :: + + OVERRIDES = "foo" + A = "Y" + A_foo_append = "Z" + A_foo_append = "X" + +For this case, before any overrides are resolved, +``A`` is set to "Y" using an immediate assignment. After this immediate +assignment, ``A_foo`` is set to "Z", and then further appended with "X" +leaving the variable set to "ZX". Finally, applying the override for +"foo" results in the conditional variable ``A`` becoming "ZX" (i.e. +``A`` is replaced with ``A_foo``). + +This final example mixes in some varying operators: :: + + A = "1" + A_append = "2" + A_append = "3" + A += "4" + A .= "5" + +For this case, the type of append +operators are affecting the order of assignments as BitBake passes +through the code multiple times. Initially, ``A`` is set to "1 45" +because of the three statements that use immediate operators. After +these assignments are made, BitBake applies the "_append" operations. +Those operations result in ``A`` becoming "1 4523". + +Sharing Functionality +===================== + +BitBake allows for metadata sharing through include files (``.inc``) and +class files (``.bbclass``). For example, suppose you have a piece of +common functionality such as a task definition that you want to share +between more than one recipe. In this case, creating a ``.bbclass`` file +that contains the common functionality and then using the ``inherit`` +directive in your recipes to inherit the class would be a common way to +share the task. + +This section presents the mechanisms BitBake provides to allow you to +share functionality between recipes. Specifically, the mechanisms +include ``include``, ``inherit``, ``INHERIT``, and ``require`` +directives. + +Locating Include and Class Files +-------------------------------- + +BitBake uses the :term:`BBPATH` variable to locate +needed include and class files. Additionally, BitBake searches the +current directory for ``include`` and ``require`` directives. + +.. note:: + + The BBPATH variable is analogous to the environment variable PATH . + +In order for include and class files to be found by BitBake, they need +to be located in a "classes" subdirectory that can be found in +``BBPATH``. + +``inherit`` Directive +--------------------- + +When writing a recipe or class file, you can use the ``inherit`` +directive to inherit the functionality of a class (``.bbclass``). +BitBake only supports this directive when used within recipe and class +files (i.e. ``.bb`` and ``.bbclass``). + +The ``inherit`` directive is a rudimentary means of specifying +functionality contained in class files that your recipes require. For +example, you can easily abstract out the tasks involved in building a +package that uses Autoconf and Automake and put those tasks into a class +file and then have your recipe inherit that class file. + +As an example, your recipes could use the following directive to inherit +an ``autotools.bbclass`` file. The class file would contain common +functionality for using Autotools that could be shared across recipes: :: + + inherit autotools + +In this case, BitBake would search for the directory +``classes/autotools.bbclass`` in ``BBPATH``. + +.. note:: + + You can override any values and functions of the inherited class + within your recipe by doing so after the "inherit" statement. + +If you want to use the directive to inherit multiple classes, separate +them with spaces. The following example shows how to inherit both the +``buildhistory`` and ``rm_work`` classes: :: + + inherit buildhistory rm_work + +An advantage with the inherit directive as compared to both the +:ref:`include ` and :ref:`require ` +directives is that you can inherit class files conditionally. You can +accomplish this by using a variable expression after the ``inherit`` +statement. Here is an example: :: + + inherit ${VARNAME} + +If ``VARNAME`` is +going to be set, it needs to be set before the ``inherit`` statement is +parsed. One way to achieve a conditional inherit in this case is to use +overrides: :: + + VARIABLE = "" + VARIABLE_someoverride = "myclass" + +Another method is by using anonymous Python. Here is an example: :: + + python () { + if condition == value: + d.setVar('VARIABLE', 'myclass') + else: + d.setVar('VARIABLE', '') + } + +Alternatively, you could use an in-line Python expression in the +following form: :: + + inherit ${@'classname' if condition else ''} + inherit ${@functionname(params)} + +In all cases, if the expression evaluates to an +empty string, the statement does not trigger a syntax error because it +becomes a no-op. + +``include`` Directive +--------------------- + +BitBake understands the ``include`` directive. This directive causes +BitBake to parse whatever file you specify, and to insert that file at +that location. The directive is much like its equivalent in Make except +that if the path specified on the include line is a relative path, +BitBake locates the first file it can find within ``BBPATH``. + +The include directive is a more generic method of including +functionality as compared to the :ref:`inherit ` +directive, which is restricted to class (i.e. ``.bbclass``) files. The +include directive is applicable for any other kind of shared or +encapsulated functionality or configuration that does not suit a +``.bbclass`` file. + +As an example, suppose you needed a recipe to include some self-test +definitions: :: + + include test_defs.inc + +.. note:: + + The include directive does not produce an error when the file cannot be + found. Consequently, it is recommended that if the file you are including is + expected to exist, you should use :ref:`require ` instead + of include . Doing so makes sure that an error is produced if the file cannot + be found. + +.. _require-inclusion: + +``require`` Directive +--------------------- + +BitBake understands the ``require`` directive. This directive behaves +just like the ``include`` directive with the exception that BitBake +raises a parsing error if the file to be included cannot be found. Thus, +any file you require is inserted into the file that is being parsed at +the location of the directive. + +The require directive, like the include directive previously described, +is a more generic method of including functionality as compared to the +:ref:`inherit ` directive, which is restricted to class +(i.e. ``.bbclass``) files. The require directive is applicable for any +other kind of shared or encapsulated functionality or configuration that +does not suit a ``.bbclass`` file. + +Similar to how BitBake handles :ref:`include `, if +the path specified on the require line is a relative path, BitBake +locates the first file it can find within ``BBPATH``. + +As an example, suppose you have two versions of a recipe (e.g. +``foo_1.2.2.bb`` and ``foo_2.0.0.bb``) where each version contains some +identical functionality that could be shared. You could create an +include file named ``foo.inc`` that contains the common definitions +needed to build "foo". You need to be sure ``foo.inc`` is located in the +same directory as your two recipe files as well. Once these conditions +are set up, you can share the functionality using a ``require`` +directive from within each recipe: :: + + require foo.inc + +``INHERIT`` Configuration Directive +----------------------------------- + +When creating a configuration file (``.conf``), you can use the +:term:`INHERIT` configuration directive to inherit a +class. BitBake only supports this directive when used within a +configuration file. + +As an example, suppose you needed to inherit a class file called +``abc.bbclass`` from a configuration file as follows: :: + + INHERIT += "abc" + +This configuration directive causes the named class to be inherited at +the point of the directive during parsing. As with the ``inherit`` +directive, the ``.bbclass`` file must be located in a "classes" +subdirectory in one of the directories specified in ``BBPATH``. + +.. note:: + + Because .conf files are parsed first during BitBake's execution, using + INHERIT to inherit a class effectively inherits the class globally (i.e. for + all recipes). + +If you want to use the directive to inherit multiple classes, you can +provide them on the same line in the ``local.conf`` file. Use spaces to +separate the classes. The following example shows how to inherit both +the ``autotools`` and ``pkgconfig`` classes: :: + + INHERIT += "autotools pkgconfig" + +Functions +========= + +As with most languages, functions are the building blocks that are used +to build up operations into tasks. BitBake supports these types of +functions: + +- *Shell Functions:* Functions written in shell script and executed + either directly as functions, tasks, or both. They can also be called + by other shell functions. + +- *BitBake-Style Python Functions:* Functions written in Python and + executed by BitBake or other Python functions using + ``bb.build.exec_func()``. + +- *Python Functions:* Functions written in Python and executed by + Python. + +- *Anonymous Python Functions:* Python functions executed automatically + during parsing. + +Regardless of the type of function, you can only define them in class +(``.bbclass``) and recipe (``.bb`` or ``.inc``) files. + +Shell Functions +--------------- + +Functions written in shell script and executed either directly as +functions, tasks, or both. They can also be called by other shell +functions. Here is an example shell function definition: :: + + some_function () { + echo "Hello World" + } + +When you create these types of functions in +your recipe or class files, you need to follow the shell programming +rules. The scripts are executed by ``/bin/sh``, which may not be a bash +shell but might be something such as ``dash``. You should not use +Bash-specific script (bashisms). + +Overrides and override-style operators like ``_append`` and ``_prepend`` +can also be applied to shell functions. Most commonly, this application +would be used in a ``.bbappend`` file to modify functions in the main +recipe. It can also be used to modify functions inherited from classes. + +As an example, consider the following: :: + + do_foo() { + bbplain first + fn + } + + fn_prepend() { + bbplain second + } + + fn() { + bbplain third + } + + do_foo_append() { + bbplain fourth + } + +Running ``do_foo`` prints the following: :: + + recipename do_foo: first + recipename do_foo: second + recipename do_foo: third + recipename do_foo: fourth + +.. note:: + + Overrides and override-style operators can be applied to any shell + function, not just :ref:`tasks `. + +You can use the ``bitbake -e`` recipename command to view the final +assembled function after all overrides have been applied. + +BitBake-Style Python Functions +------------------------------ + +These functions are written in Python and executed by BitBake or other +Python functions using ``bb.build.exec_func()``. + +An example BitBake function is: :: + + python some_python_function () { + d.setVar("TEXT", "Hello World") + print d.getVar("TEXT") + } + +Because the +Python "bb" and "os" modules are already imported, you do not need to +import these modules. Also in these types of functions, the datastore +("d") is a global variable and is always automatically available. + +.. note:: + + Variable expressions (e.g. ``${X}`` ) are no longer expanded within Python + functions. This behavior is intentional in order to allow you to freely set + variable values to expandable expressions without having them expanded + prematurely. If you do wish to expand a variable within a Python function, + use ``d.getVar("X")`` . Or, for more complicated expressions, use ``d.expand()``. + +Similar to shell functions, you can also apply overrides and +override-style operators to BitBake-style Python functions. + +As an example, consider the following: :: + + python do_foo_prepend() { + bb.plain("first") + } + + python do_foo() { + bb.plain("second") + } + + python do_foo_append() { + bb.plain("third") + } + +Running ``do_foo`` prints the following: :: + + recipename do_foo: first + recipename do_foo: second + recipename do_foo: third + +You can use the ``bitbake -e`` recipename command to view +the final assembled function after all overrides have been applied. + +Python Functions +---------------- + +These functions are written in Python and are executed by other Python +code. Examples of Python functions are utility functions that you intend +to call from in-line Python or from within other Python functions. Here +is an example: :: + + def get_depends(d): + if d.getVar('SOMECONDITION'): + return "dependencywithcond" + else: + return "dependency" + + SOMECONDITION = "1" + DEPENDS = "${@get_depends(d)}" + +This would result in ``DEPENDS`` containing ``dependencywithcond``. + +Here are some things to know about Python functions: + +- Python functions can take parameters. + +- The BitBake datastore is not automatically available. Consequently, + you must pass it in as a parameter to the function. + +- The "bb" and "os" Python modules are automatically available. You do + not need to import them. + +BitBake-Style Python Functions Versus Python Functions +------------------------------------------------------ + +Following are some important differences between BitBake-style Python +functions and regular Python functions defined with "def": + +- Only BitBake-style Python functions can be :ref:`tasks `. + +- Overrides and override-style operators can only be applied to + BitBake-style Python functions. + +- Only regular Python functions can take arguments and return values. + +- :ref:`Variable flags ` such as + ``[dirs]``, ``[cleandirs]``, and ``[lockfiles]`` can be used on BitBake-style + Python functions, but not on regular Python functions. + +- BitBake-style Python functions generate a separate + ``${``\ :term:`T`\ ``}/run.``\ function-name\ ``.``\ pid + script that is executed to run the function, and also generate a log + file in ``${T}/log.``\ function-name\ ``.``\ pid if they are executed + as tasks. + + Regular Python functions execute "inline" and do not generate any + files in ``${T}``. + +- Regular Python functions are called with the usual Python syntax. + BitBake-style Python functions are usually tasks and are called + directly by BitBake, but can also be called manually from Python code + by using the ``bb.build.exec_func()`` function. Here is an example: :: + + bb.build.exec_func("my_bitbake_style_function", d) + + .. note:: + + ``bb.build.exec_func()`` can also be used to run shell functions from Python + code. If you want to run a shell function before a Python function within + the same task, then you can use a parent helper Python function that + starts by running the shell function with ``bb.build.exec_func()`` and then + runs the Python code. + + To detect errors from functions executed with + ``bb.build.exec_func()``, you can catch the ``bb.build.FuncFailed`` + exception. + + .. note:: + + Functions in metadata (recipes and classes) should not themselves raise + ``bb.build.FuncFailed``. Rather, ``bb.build.FuncFailed`` should be viewed as a + general indicator that the called function failed by raising an + exception. For example, an exception raised by ``bb.fatal()`` will be caught + inside ``bb.build.exec_func()``, and a ``bb.build.FuncFailed`` will be raised in + response. + +Due to their simplicity, you should prefer regular Python functions over +BitBake-style Python functions unless you need a feature specific to +BitBake-style Python functions. Regular Python functions in metadata are +a more recent invention than BitBake-style Python functions, and older +code tends to use ``bb.build.exec_func()`` more often. + +Anonymous Python Functions +-------------------------- + +Sometimes it is useful to set variables or perform other operations +programmatically during parsing. To do this, you can define special +Python functions, called anonymous Python functions, that run at the end +of parsing. For example, the following conditionally sets a variable +based on the value of another variable: :: + + python () { + if d.getVar('SOMEVAR') == 'value': + d.setVar('ANOTHERVAR', 'value2') + } + +An equivalent way to mark a function as an anonymous function is to give it +the name "__anonymous", rather than no name. + +Anonymous Python functions always run at the end of parsing, regardless +of where they are defined. If a recipe contains many anonymous +functions, they run in the same order as they are defined within the +recipe. As an example, consider the following snippet: :: + + python () { + d.setVar('FOO', 'foo 2') + } + + FOO = "foo 1" + + python () { + d.appendVar('BAR',' bar 2') + } + + BAR = "bar 1" + +The previous example is conceptually +equivalent to the following snippet: :: + + FOO = "foo 1" + BAR = "bar 1" + FOO = "foo 2" + BAR += "bar 2" + +``FOO`` ends up with the value "foo 2", and +``BAR`` with the value "bar 1 bar 2". Just as in the second snippet, the +values set for the variables within the anonymous functions become +available to tasks, which always run after parsing. + +Overrides and override-style operators such as "``_append``" are applied +before anonymous functions run. In the following example, ``FOO`` ends +up with the value "foo from anonymous": :: + + FOO = "foo" + FOO_append = " from outside" + + python () { + d.setVar("FOO", "foo from anonymous") + } + +For methods +you can use with anonymous Python functions, see the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:functions you can call from within python`" +section. For a different method to run Python code during parsing, see +the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inline python variable expansion`" section. + +Flexible Inheritance for Class Functions +---------------------------------------- + +Through coding techniques and the use of ``EXPORT_FUNCTIONS``, BitBake +supports exporting a function from a class such that the class function +appears as the default implementation of the function, but can still be +called if a recipe inheriting the class needs to define its own version +of the function. + +To understand the benefits of this feature, consider the basic scenario +where a class defines a task function and your recipe inherits the +class. In this basic scenario, your recipe inherits the task function as +defined in the class. If desired, your recipe can add to the start and +end of the function by using the "_prepend" or "_append" operations +respectively, or it can redefine the function completely. However, if it +redefines the function, there is no means for it to call the class +version of the function. ``EXPORT_FUNCTIONS`` provides a mechanism that +enables the recipe's version of the function to call the original +version of the function. + +To make use of this technique, you need the following things in place: + +- The class needs to define the function as follows: :: + + classname_functionname + + For example, if you have a class file + ``bar.bbclass`` and a function named ``do_foo``, the class must + define the function as follows: :: + + bar_do_foo + +- The class needs to contain the ``EXPORT_FUNCTIONS`` statement as + follows: :: + + EXPORT_FUNCTIONS functionname + + For example, continuing with + the same example, the statement in the ``bar.bbclass`` would be as + follows: :: + + EXPORT_FUNCTIONS do_foo + +- You need to call the function appropriately from within your recipe. + Continuing with the same example, if your recipe needs to call the + class version of the function, it should call ``bar_do_foo``. + Assuming ``do_foo`` was a shell function and ``EXPORT_FUNCTIONS`` was + used as above, the recipe's function could conditionally call the + class version of the function as follows: :: + + do_foo() { + if [ somecondition ] ; then + bar_do_foo + else + # Do something else + fi + } + + To call your modified version of the function as defined in your recipe, + call it as ``do_foo``. + +With these conditions met, your single recipe can freely choose between +the original function as defined in the class file and the modified +function in your recipe. If you do not set up these conditions, you are +limited to using one function or the other. + +Tasks +===== + +Tasks are BitBake execution units that make up the steps that BitBake +can run for a given recipe. Tasks are only supported in recipes and +classes (i.e. in ``.bb`` files and files included or inherited from +``.bb`` files). By convention, tasks have names that start with "do\_". + +Promoting a Function to a Task +------------------------------ + +Tasks are either :ref:`shell functions ` or +:ref:`BitBake-style Python functions ` +that have been promoted to tasks by using the ``addtask`` command. The +``addtask`` command can also optionally describe dependencies between +the task and other tasks. Here is an example that shows how to define a +task and declare some dependencies: :: + + python do_printdate () { + import time + print time.strftime('%Y%m%d', time.gmtime()) + } + addtask printdate after do_fetch before do_build + +The first argument to ``addtask`` is the name +of the function to promote to a task. If the name does not start with +"do\_", "do\_" is implicitly added, which enforces the convention that all +task names start with "do\_". + +In the previous example, the ``do_printdate`` task becomes a dependency +of the ``do_build`` task, which is the default task (i.e. the task run +by the ``bitbake`` command unless another task is specified explicitly). +Additionally, the ``do_printdate`` task becomes dependent upon the +``do_fetch`` task. Running the ``do_build`` task results in the +``do_printdate`` task running first. + +.. note:: + + If you try out the previous example, you might see that the + ``do_printdate`` + task is only run the first time you build the recipe with the + ``bitbake`` + command. This is because BitBake considers the task "up-to-date" + after that initial run. If you want to force the task to always be + rerun for experimentation purposes, you can make BitBake always + consider the task "out-of-date" by using the + :ref:`[nostamp] ` + variable flag, as follows: :: + + do_printdate[nostamp] = "1" + + You can also explicitly run the task and provide the + -f option as follows: :: + + $ bitbake recipe -c printdate -f + + When manually selecting a task to run with the bitbake ``recipe + -c task`` command, you can omit the "do\_" prefix as part of the task + name. + +You might wonder about the practical effects of using ``addtask`` +without specifying any dependencies as is done in the following example: :: + + addtask printdate + +In this example, assuming dependencies have not been +added through some other means, the only way to run the task is by +explicitly selecting it with ``bitbake`` recipe ``-c printdate``. You +can use the ``do_listtasks`` task to list all tasks defined in a recipe +as shown in the following example: :: + + $ bitbake recipe -c listtasks + +For more information on task dependencies, see the +":ref:`bitbake-user-manual/bitbake-user-manual-execution:dependencies`" section. + +See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section for information +on variable flags you can use with tasks. + +Deleting a Task +--------------- + +As well as being able to add tasks, you can delete them. Simply use the +``deltask`` command to delete a task. For example, to delete the example +task used in the previous sections, you would use: :: + + deltask printdate + +If you delete a task using the ``deltask`` command and the task has +dependencies, the dependencies are not reconnected. For example, suppose +you have three tasks named ``do_a``, ``do_b``, and ``do_c``. +Furthermore, ``do_c`` is dependent on ``do_b``, which in turn is +dependent on ``do_a``. Given this scenario, if you use ``deltask`` to +delete ``do_b``, the implicit dependency relationship between ``do_c`` +and ``do_a`` through ``do_b`` no longer exists, and ``do_c`` +dependencies are not updated to include ``do_a``. Thus, ``do_c`` is free +to run before ``do_a``. + +If you want dependencies such as these to remain intact, use the +``[noexec]`` varflag to disable the task instead of using the +``deltask`` command to delete it: :: + + do_b[noexec] = "1" + +Passing Information Into the Build Task Environment +--------------------------------------------------- + +When running a task, BitBake tightly controls the shell execution +environment of the build tasks to make sure unwanted contamination from +the build machine cannot influence the build. + +.. note:: + + By default, BitBake cleans the environment to include only those + things exported or listed in its whitelist to ensure that the build + environment is reproducible and consistent. You can prevent this + "cleaning" by setting the :term:`BB_PRESERVE_ENV` variable. + +Consequently, if you do want something to get passed into the build task +environment, you must take these two steps: + +#. Tell BitBake to load what you want from the environment into the + datastore. You can do so through the + :term:`BB_ENV_WHITELIST` and + :term:`BB_ENV_EXTRAWHITE` variables. For + example, assume you want to prevent the build system from accessing + your ``$HOME/.ccache`` directory. The following command "whitelists" + the environment variable ``CCACHE_DIR`` causing BitBake to allow that + variable into the datastore: :: + + export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE CCACHE_DIR" + +#. Tell BitBake to export what you have loaded into the datastore to the + task environment of every running task. Loading something from the + environment into the datastore (previous step) only makes it + available in the datastore. To export it to the task environment of + every running task, use a command similar to the following in your + local configuration file ``local.conf`` or your distribution + configuration file: :: + + export CCACHE_DIR + + .. note:: + + A side effect of the previous steps is that BitBake records the + variable as a dependency of the build process in things like the + setscene checksums. If doing so results in unnecessary rebuilds of + tasks, you can whitelist the variable so that the setscene code + ignores the dependency when it creates checksums. + +Sometimes, it is useful to be able to obtain information from the +original execution environment. BitBake saves a copy of the original +environment into a special variable named :term:`BB_ORIGENV`. + +The ``BB_ORIGENV`` variable returns a datastore object that can be +queried using the standard datastore operators such as +``getVar(, False)``. The datastore object is useful, for example, to +find the original ``DISPLAY`` variable. Here is an example: :: + + origenv = d.getVar("BB_ORIGENV", False) + bar = origenv.getVar("BAR", False) + +The previous example returns ``BAR`` from the original execution +environment. + +Variable Flags +============== + +Variable flags (varflags) help control a task's functionality and +dependencies. BitBake reads and writes varflags to the datastore using +the following command forms: :: + + variable = d.getVarFlags("variable") + self.d.setVarFlags("FOO", {"func": True}) + +When working with varflags, the same syntax, with the exception of +overrides, applies. In other words, you can set, append, and prepend +varflags just like variables. See the +":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flag syntax`" section for details. + +BitBake has a defined set of varflags available for recipes and classes. +Tasks support a number of these flags which control various +functionality of the task: + +- ``[cleandirs]``: Empty directories that should be created before + the task runs. Directories that already exist are removed and + recreated to empty them. + +- ``[depends]``: Controls inter-task dependencies. See the + :term:`DEPENDS` variable and the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inter-task + dependencies`" section for more information. + +- ``[deptask]``: Controls task build-time dependencies. See the + :term:`DEPENDS` variable and the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:build dependencies`" section for more information. + +- ``[dirs]``: Directories that should be created before the task + runs. Directories that already exist are left as is. The last + directory listed is used as the current working directory for the + task. + +- ``[lockfiles]``: Specifies one or more lockfiles to lock while the + task executes. Only one task may hold a lockfile, and any task that + attempts to lock an already locked file will block until the lock is + released. You can use this variable flag to accomplish mutual + exclusion. + +- ``[noexec]``: When set to "1", marks the task as being empty, with + no execution required. You can use the ``[noexec]`` flag to set up + tasks as dependency placeholders, or to disable tasks defined + elsewhere that are not needed in a particular recipe. + +- ``[nostamp]``: When set to "1", tells BitBake to not generate a + stamp file for a task, which implies the task should always be + executed. + + .. caution:: + + Any task that depends (possibly indirectly) on a ``[nostamp]`` task will + always be executed as well. This can cause unnecessary rebuilding if you + are not careful. + +- ``[number_threads]``: Limits tasks to a specific number of + simultaneous threads during execution. This varflag is useful when + your build host has a large number of cores but certain tasks need to + be rate-limited due to various kinds of resource constraints (e.g. to + avoid network throttling). ``number_threads`` works similarly to the + :term:`BB_NUMBER_THREADS` variable but is task-specific. + + Set the value globally. For example, the following makes sure the + ``do_fetch`` task uses no more than two simultaneous execution + threads: do_fetch[number_threads] = "2" + + .. warning:: + + - Setting the varflag in individual recipes rather than globally + can result in unpredictable behavior. + + - Setting the varflag to a value greater than the value used in + the ``BB_NUMBER_THREADS`` variable causes ``number_threads`` to + have no effect. + +- ``[postfuncs]``: List of functions to call after the completion of + the task. + +- ``[prefuncs]``: List of functions to call before the task executes. + +- ``[rdepends]``: Controls inter-task runtime dependencies. See the + :term:`RDEPENDS` variable, the + :term:`RRECOMMENDS` variable, and the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:inter-task dependencies`" section for + more information. + +- ``[rdeptask]``: Controls task runtime dependencies. See the + :term:`RDEPENDS` variable, the + :term:`RRECOMMENDS` variable, and the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:runtime dependencies`" section for more + information. + +- ``[recideptask]``: When set in conjunction with ``recrdeptask``, + specifies a task that should be inspected for additional + dependencies. + +- ``[recrdeptask]``: Controls task recursive runtime dependencies. + See the :term:`RDEPENDS` variable, the + :term:`RRECOMMENDS` variable, and the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:recursive dependencies`" section for + more information. + +- ``[stamp-extra-info]``: Extra stamp information to append to the + task's stamp. As an example, OpenEmbedded uses this flag to allow + machine-specific tasks. + +- ``[umask]``: The umask to run the task under. + +Several varflags are useful for controlling how signatures are +calculated for variables. For more information on this process, see the +":ref:`bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)`" section. + +- ``[vardeps]``: Specifies a space-separated list of additional + variables to add to a variable's dependencies for the purposes of + calculating its signature. Adding variables to this list is useful, + for example, when a function refers to a variable in a manner that + does not allow BitBake to automatically determine that the variable + is referred to. + +- ``[vardepsexclude]``: Specifies a space-separated list of variables + that should be excluded from a variable's dependencies for the + purposes of calculating its signature. + +- ``[vardepvalue]``: If set, instructs BitBake to ignore the actual + value of the variable and instead use the specified value when + calculating the variable's signature. + +- ``[vardepvalueexclude]``: Specifies a pipe-separated list of + strings to exclude from the variable's value when calculating the + variable's signature. + +Events +====== + +BitBake allows installation of event handlers within recipe and class +files. Events are triggered at certain points during operation, such as +the beginning of operation against a given recipe (i.e. ``*.bb``), the +start of a given task, a task failure, a task success, and so forth. The +intent is to make it easy to do things like email notification on build +failures. + +Following is an example event handler that prints the name of the event +and the content of the ``FILE`` variable: :: + + addhandler myclass_eventhandler + python myclass_eventhandler() { + from bb.event import getName + print("The name of the Event is %s" % getName(e)) + print("The file we run for is %s" % d.getVar('FILE')) + } + myclass_eventhandler[eventmask] = "bb.event.BuildStarted + bb.event.BuildCompleted" + +In the previous example, an eventmask has been +set so that the handler only sees the "BuildStarted" and +"BuildCompleted" events. This event handler gets called every time an +event matching the eventmask is triggered. A global variable "e" is +defined, which represents the current event. With the ``getName(e)`` +method, you can get the name of the triggered event. The global +datastore is available as "d". In legacy code, you might see "e.data" +used to get the datastore. However, realize that "e.data" is deprecated +and you should use "d" going forward. + +The context of the datastore is appropriate to the event in question. +For example, "BuildStarted" and "BuildCompleted" events run before any +tasks are executed so would be in the global configuration datastore +namespace. No recipe-specific metadata exists in that namespace. The +"BuildStarted" and "BuildCompleted" events also run in the main +cooker/server process rather than any worker context. Thus, any changes +made to the datastore would be seen by other cooker/server events within +the current build but not seen outside of that build or in any worker +context. Task events run in the actual tasks in question consequently +have recipe-specific and task-specific contents. These events run in the +worker context and are discarded at the end of task execution. + +During a standard build, the following common events might occur. The +following events are the most common kinds of events that most metadata +might have an interest in viewing: + +- ``bb.event.ConfigParsed()``: Fired when the base configuration; which + consists of ``bitbake.conf``, ``base.bbclass`` and any global + ``INHERIT`` statements; has been parsed. You can see multiple such + events when each of the workers parse the base configuration or if + the server changes configuration and reparses. Any given datastore + only has one such event executed against it, however. If + ```BB_INVALIDCONF`` <#>`__ is set in the datastore by the event + handler, the configuration is reparsed and a new event triggered, + allowing the metadata to update configuration. + +- ``bb.event.HeartbeatEvent()``: Fires at regular time intervals of one + second. You can configure the interval time using the + ``BB_HEARTBEAT_EVENT`` variable. The event's "time" attribute is the + ``time.time()`` value when the event is triggered. This event is + useful for activities such as system state monitoring. + +- ``bb.event.ParseStarted()``: Fired when BitBake is about to start + parsing recipes. This event's "total" attribute represents the number + of recipes BitBake plans to parse. + +- ``bb.event.ParseProgress()``: Fired as parsing progresses. This + event's "current" attribute is the number of recipes parsed as well + as the "total" attribute. + +- ``bb.event.ParseCompleted()``: Fired when parsing is complete. This + event's "cached", "parsed", "skipped", "virtuals", "masked", and + "errors" attributes provide statistics for the parsing results. + +- ``bb.event.BuildStarted()``: Fired when a new build starts. BitBake + fires multiple "BuildStarted" events (one per configuration) when + multiple configuration (multiconfig) is enabled. + +- ``bb.build.TaskStarted()``: Fired when a task starts. This event's + "taskfile" attribute points to the recipe from which the task + originates. The "taskname" attribute, which is the task's name, + includes the ``do_`` prefix, and the "logfile" attribute point to + where the task's output is stored. Finally, the "time" attribute is + the task's execution start time. + +- ``bb.build.TaskInvalid()``: Fired if BitBake tries to execute a task + that does not exist. + +- ``bb.build.TaskFailedSilent()``: Fired for setscene tasks that fail + and should not be presented to the user verbosely. + +- ``bb.build.TaskFailed()``: Fired for normal tasks that fail. + +- ``bb.build.TaskSucceeded()``: Fired when a task successfully + completes. + +- ``bb.event.BuildCompleted()``: Fired when a build finishes. + +- ``bb.cooker.CookerExit()``: Fired when the BitBake server/cooker + shuts down. This event is usually only seen by the UIs as a sign they + should also shutdown. + +This next list of example events occur based on specific requests to the +server. These events are often used to communicate larger pieces of +information from the BitBake server to other parts of BitBake such as +user interfaces: + +- ``bb.event.TreeDataPreparationStarted()`` +- ``bb.event.TreeDataPreparationProgress()`` +- ``bb.event.TreeDataPreparationCompleted()`` +- ``bb.event.DepTreeGenerated()`` +- ``bb.event.CoreBaseFilesFound()`` +- ``bb.event.ConfigFilePathFound()`` +- ``bb.event.FilesMatchingFound()`` +- ``bb.event.ConfigFilesFound()`` +- ``bb.event.TargetsTreeGenerated()`` + +.. _variants-class-extension-mechanism: + +Variants - Class Extension Mechanism +==================================== + +BitBake supports two features that facilitate creating from a single +recipe file multiple incarnations of that recipe file where all +incarnations are buildable. These features are enabled through the +:term:`BBCLASSEXTEND` and :term:`BBVERSIONS` variables. + +.. note:: + + The mechanism for this class extension is extremely specific to the + implementation. Usually, the recipe's :term:`PROVIDES` , :term:`PN` , and + :term:`DEPENDS` variables would need to be modified by the extension + class. For specific examples, see the OE-Core native , nativesdk , and + multilib classes. + +- ``BBCLASSEXTEND``: This variable is a space separated list of + classes used to "extend" the recipe for each variant. Here is an + example that results in a second incarnation of the current recipe + being available. This second incarnation will have the "native" class + inherited. :: + + BBCLASSEXTEND = "native" + +- ``BBVERSIONS``: This variable allows a single recipe to build + multiple versions of a project from a single recipe file. You can + also specify conditional metadata (using the + :term:`OVERRIDES` mechanism) for a single + version, or an optionally named range of versions. Here is an + example: :: + + BBVERSIONS = "1.0 2.0 git" + SRC_URI_git = "git://someurl/somepath.git" + + BBVERSIONS = "1.0.[0-6]:1.0.0+ 1.0.[7-9]:1.0.7+" + SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;patch=1" + + The name of the range defaults to the original version of the recipe. For + example, in OpenEmbedded, the recipe file ``foo_1.0.0+.bb`` creates a default + name range of ``1.0.0+``. This is useful because the range name is not only + placed into overrides, but it is also made available for the metadata to use + in the variable that defines the base recipe versions for use in ``file://`` + search paths (:term:`FILESPATH`). + +Dependencies +============ + +To allow for efficient parallel processing, BitBake handles dependencies +at the task level. Dependencies can exist both between tasks within a +single recipe and between tasks in different recipes. Following are +examples of each: + +- For tasks within a single recipe, a recipe's ``do_configure`` task + might need to complete before its ``do_compile`` task can run. + +- For tasks in different recipes, one recipe's ``do_configure`` task + might require another recipe's ``do_populate_sysroot`` task to finish + first such that the libraries and headers provided by the other + recipe are available. + +This section describes several ways to declare dependencies. Remember, +even though dependencies are declared in different ways, they are all +simply dependencies between tasks. + +.. _dependencies-internal-to-the-bb-file: + +Dependencies Internal to the ``.bb`` File +----------------------------------------- + +BitBake uses the ``addtask`` directive to manage dependencies that are +internal to a given recipe file. You can use the ``addtask`` directive +to indicate when a task is dependent on other tasks or when other tasks +depend on that recipe. Here is an example: :: + + addtask printdate after do_fetch before do_build + +In this example, the ``do_printdate`` task +depends on the completion of the ``do_fetch`` task, and the ``do_build`` +task depends on the completion of the ``do_printdate`` task. + +.. note:: + + For a task to run, it must be a direct or indirect dependency of some + other task that is scheduled to run. + + For illustration, here are some examples: + + - The directive ``addtask mytask before do_configure`` causes + ``do_mytask`` to run before ``do_configure`` runs. Be aware that + ``do_mytask`` still only runs if its :ref:`input + checksum ` has changed since the last time it was + run. Changes to the input checksum of ``do_mytask`` also + indirectly cause ``do_configure`` to run. + + - The directive ``addtask mytask after do_configure`` by itself + never causes ``do_mytask`` to run. ``do_mytask`` can still be run + manually as follows: :: + + $ bitbake recipe -c mytask + + Declaring ``do_mytask`` as a dependency of some other task that is + scheduled to run also causes it to run. Regardless, the task runs after + ``do_configure``. + +Build Dependencies +------------------ + +BitBake uses the :term:`DEPENDS` variable to manage +build time dependencies. The ``[deptask]`` varflag for tasks signifies +the task of each item listed in ``DEPENDS`` that must complete before +that task can be executed. Here is an example: :: + + do_configure[deptask] = "do_populate_sysroot" + +In this example, the ``do_populate_sysroot`` task +of each item in ``DEPENDS`` must complete before ``do_configure`` can +execute. + +Runtime Dependencies +-------------------- + +BitBake uses the :term:`PACKAGES`, :term:`RDEPENDS`, and :term:`RRECOMMENDS` +variables to manage runtime dependencies. + +The ``PACKAGES`` variable lists runtime packages. Each of those packages +can have ``RDEPENDS`` and ``RRECOMMENDS`` runtime dependencies. The +``[rdeptask]`` flag for tasks is used to signify the task of each item +runtime dependency which must have completed before that task can be +executed. :: + + do_package_qa[rdeptask] = "do_packagedata" + +In the previous +example, the ``do_packagedata`` task of each item in ``RDEPENDS`` must +have completed before ``do_package_qa`` can execute. +Although ``RDEPENDS`` contains entries from the +runtime dependency namespace, BitBake knows how to map them back +to the build-time dependency namespace, in which the tasks are defined. + +Recursive Dependencies +---------------------- + +BitBake uses the ``[recrdeptask]`` flag to manage recursive task +dependencies. BitBake looks through the build-time and runtime +dependencies of the current recipe, looks through the task's inter-task +dependencies, and then adds dependencies for the listed task. Once +BitBake has accomplished this, it recursively works through the +dependencies of those tasks. Iterative passes continue until all +dependencies are discovered and added. + +The ``[recrdeptask]`` flag is most commonly used in high-level recipes +that need to wait for some task to finish "globally". For example, +``image.bbclass`` has the following: :: + + do_rootfs[recrdeptask] += "do_packagedata" + +This statement says that the ``do_packagedata`` task of +the current recipe and all recipes reachable (by way of dependencies) +from the image recipe must run before the ``do_rootfs`` task can run. + +BitBake allows a task to recursively depend on itself by +referencing itself in the task list: :: + + do_a[recrdeptask] = "do_a do_b" + +In the same way as before, this means that the ``do_a`` +and ``do_b`` tasks of the current recipe and all +recipes reachable (by way of dependencies) from the recipe +must run before the ``do_a`` task can run. In this +case BitBake will ignore the current recipe's ``do_a`` +task circular dependency on itself. + +Inter-Task Dependencies +----------------------- + +BitBake uses the ``[depends]`` flag in a more generic form to manage +inter-task dependencies. This more generic form allows for +inter-dependency checks for specific tasks rather than checks for the +data in ``DEPENDS``. Here is an example: :: + + do_patch[depends] = "quilt-native:do_populate_sysroot" + +In this example, the ``do_populate_sysroot`` task of the target ``quilt-native`` +must have completed before the ``do_patch`` task can execute. + +The ``[rdepends]`` flag works in a similar way but takes targets in the +runtime namespace instead of the build-time dependency namespace. + +Functions You Can Call From Within Python +========================================= + +BitBake provides many functions you can call from within Python +functions. This section lists the most commonly used functions, and +mentions where to find others. + +Functions for Accessing Datastore Variables +------------------------------------------- + +It is often necessary to access variables in the BitBake datastore using +Python functions. The BitBake datastore has an API that allows you this +access. Here is a list of available operations: + +.. list-table:: + :widths: auto + :header-rows: 1 + + * - *Operation* + - *Description* + * - ``d.getVar("X", expand)`` + - Returns the value of variable "X". Using "expand=True" expands the + value. Returns "None" if the variable "X" does not exist. + * - ``d.setVar("X", "value")`` + - Sets the variable "X" to "value" + * - ``d.appendVar("X", "value")`` + - Adds "value" to the end of the variable "X". Acts like ``d.setVar("X", + "value")`` if the variable "X" does not exist. + * - ``d.prependVar("X", "value")`` + - Adds "value" to the start of the variable "X". Acts like + ``d.setVar("X","value")`` if the variable "X" does not exist. + * - ``d.delVar("X")`` + - Deletes the variable "X" from the datastore. Does nothing if the variable + "X" does not exist. + * - ``d.renameVar("X", "Y")`` + - Renames the variable "X" to "Y". Does nothing if the variable "X" does + not exist. + * - ``d.getVarFlag("X", flag, expand)`` + - Returns the value of variable "X". Using "expand=True" expands the + value. Returns "None" if either the variable "X" or the named flag does + not exist. + * - ``d.setVarFlag("X", flag, "value")`` + - Sets the named flag for variable "X" to "value". + * - ``d.appendVarFlag("X", flag, "value")`` + - Appends "value" to the named flag on the variable "X". Acts like + ``d.setVarFlag("X", flag, "value")`` if the named flag does not exist. + * - ``d.prependVarFlag("X", flag, "value")`` + - Prepends "value" to the named flag on the variable "X". Acts like + ``d.setVarFlag("X", flag, "value")`` if the named flag does not exist. + * - ``d.delVarFlag("X", flag)`` + - Deletes the named flag on the variable "X" from the datastore. + * - ``d.setVarFlags("X", flagsdict)`` + - Sets the flags specified in the ``flagsdict()`` + parameter. ``setVarFlags`` does not clear previous flags. Think of this + operation as ``addVarFlags``. + * - ``d.getVarFlags("X")`` + - Returns a ``flagsdict`` of the flags for the variable "X". Returns "None" + if the variable "X" does not exist. + * - ``d.delVarFlags("X")`` + - Deletes all the flags for the variable "X". Does nothing if the variable + "X" does not exist. + * - ``d.expand(expression)`` + - Expands variable references in the specified string + expression. References to variables that do not exist are left as is. For + example, ``d.expand("foo ${X}")`` expands to the literal string "foo + ${X}" if the variable "X" does not exist. + +Other Functions +--------------- + +You can find many other functions that can be called from Python by +looking at the source code of the ``bb`` module, which is in +``bitbake/lib/bb``. For example, ``bitbake/lib/bb/utils.py`` includes +the commonly used functions ``bb.utils.contains()`` and +``bb.utils.mkdirhier()``, which come with docstrings. + +Task Checksums and Setscene +=========================== + +BitBake uses checksums (or signatures) along with the setscene to +determine if a task needs to be run. This section describes the process. +To help understand how BitBake does this, the section assumes an +OpenEmbedded metadata-based example. + +These checksums are stored in :term:`STAMP`. You can +examine the checksums using the following BitBake command: :: + + $ bitbake-dumpsigs + +This command returns the signature data in a readable +format that allows you to examine the inputs used when the OpenEmbedded +build system generates signatures. For example, using +``bitbake-dumpsigs`` allows you to examine the ``do_compile`` task's +“sigdata” for a C application (e.g. ``bash``). Running the command also +reveals that the “CC” variable is part of the inputs that are hashed. +Any changes to this variable would invalidate the stamp and cause the +``do_compile`` task to run. + +The following list describes related variables: + +- :term:`BB_HASHCHECK_FUNCTION`: + Specifies the name of the function to call during the "setscene" part + of the task's execution in order to validate the list of task hashes. + +- :term:`BB_SETSCENE_DEPVALID`: + Specifies a function BitBake calls that determines whether BitBake + requires a setscene dependency to be met. + +- :term:`BB_SETSCENE_VERIFY_FUNCTION2`: + Specifies a function to call that verifies the list of planned task + execution before the main task execution happens. + +- :term:`BB_STAMP_POLICY`: Defines the mode + for comparing timestamps of stamp files. + +- :term:`BB_STAMP_WHITELIST`: Lists stamp + files that are looked at when the stamp policy is "whitelist". + +- :term:`BB_TASKHASH`: Within an executing task, + this variable holds the hash of the task as returned by the currently + enabled signature generator. + +- :term:`STAMP`: The base path to create stamp files. + +- :term:`STAMPCLEAN`: Again, the base path to + create stamp files but can use wildcards for matching a range of + files for clean operations. + +Wildcard Support in Variables +============================= + +Support for wildcard use in variables varies depending on the context in +which it is used. For example, some variables and file names allow +limited use of wildcards through the "``%``" and "``*``" characters. +Other variables or names support Python's +`glob `_ syntax, +`fnmatch `_ +syntax, or +`Regular Expression (re) `_ +syntax. + +For variables that have wildcard suport, the documentation describes +which form of wildcard, its use, and its limitations. diff --git a/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst new file mode 100644 index 000000000..fb1b353c8 --- /dev/null +++ b/poky/bitbake/doc/bitbake-user-manual/bitbake-user-manual-ref-variables.rst @@ -0,0 +1,1372 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +================== +Variables Glossary +================== + +| + +This chapter lists common variables used by BitBake and gives an +overview of their function and contents. + +.. note:: + + Following are some points regarding the variables listed in this + glossary: + + - The variables listed in this glossary are specific to BitBake. + Consequently, the descriptions are limited to that context. + + - Also, variables exist in other systems that use BitBake (e.g. The + Yocto Project and OpenEmbedded) that have names identical to those + found in this glossary. For such cases, the variables in those + systems extend the functionality of the variable as it is + described here in this glossary. + + - Finally, there are variables mentioned in this glossary that do + not appear in the BitBake glossary. These other variables are + variables used in systems that use BitBake. + +.. glossary:: + + ASSUME_PROVIDED + Lists recipe names (:term:`PN` values) BitBake does not + attempt to build. Instead, BitBake assumes these recipes have already + been built. + + In OpenEmbedded-Core, ``ASSUME_PROVIDED`` mostly specifies native + tools that should not be built. An example is ``git-native``, which + when specified allows for the Git binary from the host to be used + rather than building ``git-native``. + + B + The directory in which BitBake executes functions during a recipe's + build process. + + BB_ALLOWED_NETWORKS + Specifies a space-delimited list of hosts that the fetcher is allowed + to use to obtain the required source code. Following are + considerations surrounding this variable: + + - This host list is only used if + :term:`BB_NO_NETWORK` is either not set or + set to "0". + + - Limited support for the "``*``" wildcard character for matching + against the beginning of host names exists. For example, the + following setting matches ``git.gnu.org``, ``ftp.gnu.org``, and + ``foo.git.gnu.org``. :: + + BB_ALLOWED_NETWORKS = "\*.gnu.org" + + .. important:: + + The use of the "``*``" character only works at the beginning of + a host name and it must be isolated from the remainder of the + host name. You cannot use the wildcard character in any other + location of the name or combined with the front part of the + name. + + For example, ``*.foo.bar`` is supported, while ``*aa.foo.bar`` + is not. + + - Mirrors not in the host list are skipped and logged in debug. + + - Attempts to access networks not in the host list cause a failure. + + Using ``BB_ALLOWED_NETWORKS`` in conjunction with + :term:`PREMIRRORS` is very useful. Adding the + host you want to use to ``PREMIRRORS`` results in the source code + being fetched from an allowed location and avoids raising an error + when a host that is not allowed is in a + :term:`SRC_URI` statement. This is because the + fetcher does not attempt to use the host listed in ``SRC_URI`` after + a successful fetch from the ``PREMIRRORS`` occurs. + + BB_CONSOLELOG + Specifies the path to a log file into which BitBake's user interface + writes output during the build. + + BB_CURRENTTASK + Contains the name of the currently running task. The name does not + include the ``do_`` prefix. + + BB_DANGLINGAPPENDS_WARNONLY + Defines how BitBake handles situations where an append file + (``.bbappend``) has no corresponding recipe file (``.bb``). This + condition often occurs when layers get out of sync (e.g. ``oe-core`` + bumps a recipe version and the old recipe no longer exists and the + other layer has not been updated to the new version of the recipe + yet). + + The default fatal behavior is safest because it is the sane reaction + given something is out of sync. It is important to realize when your + changes are no longer being applied. + + BB_DEFAULT_TASK + The default task to use when none is specified (e.g. with the ``-c`` + command line option). The task name specified should not include the + ``do_`` prefix. + + BB_DISKMON_DIRS + Monitors disk space and available inodes during the build and allows + you to control the build based on these parameters. + + Disk space monitoring is disabled by default. When setting this + variable, use the following form: :: + + BB_DISKMON_DIRS = ",, [...]" + + where: + + is: + ABORT: Immediately abort the build when + a threshold is broken. + STOPTASKS: Stop the build after the currently + executing tasks have finished when + a threshold is broken. + WARN: Issue a warning but continue the + build when a threshold is broken. + Subsequent warnings are issued as + defined by the + BB_DISKMON_WARNINTERVAL variable, + which must be defined. + + is: + Any directory you choose. You can specify one or + more directories to monitor by separating the + groupings with a space. If two directories are + on the same device, only the first directory + is monitored. + + is: + Either the minimum available disk space, + the minimum number of free inodes, or + both. You must specify at least one. To + omit one or the other, simply omit the value. + Specify the threshold using G, M, K for Gbytes, + Mbytes, and Kbytes, respectively. If you do + not specify G, M, or K, Kbytes is assumed by + default. Do not use GB, MB, or KB. + + Here are some examples: :: + + BB_DISKMON_DIRS = "ABORT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K" + BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G" + BB_DISKMON_DIRS = "ABORT,${TMPDIR},,100K" + + The first example works only if you also set the + :term:`BB_DISKMON_WARNINTERVAL` + variable. This example causes the build system to immediately abort + when either the disk space in ``${TMPDIR}`` drops below 1 Gbyte or + the available free inodes drops below 100 Kbytes. Because two + directories are provided with the variable, the build system also + issues a warning when the disk space in the ``${SSTATE_DIR}`` + directory drops below 1 Gbyte or the number of free inodes drops + below 100 Kbytes. Subsequent warnings are issued during intervals as + defined by the ``BB_DISKMON_WARNINTERVAL`` variable. + + The second example stops the build after all currently executing + tasks complete when the minimum disk space in the ``${TMPDIR}`` + directory drops below 1 Gbyte. No disk monitoring occurs for the free + inodes in this case. + + The final example immediately aborts the build when the number of + free inodes in the ``${TMPDIR}`` directory drops below 100 Kbytes. No + disk space monitoring for the directory itself occurs in this case. + + BB_DISKMON_WARNINTERVAL + Defines the disk space and free inode warning intervals. + + If you are going to use the ``BB_DISKMON_WARNINTERVAL`` variable, you + must also use the :term:`BB_DISKMON_DIRS` + variable and define its action as "WARN". During the build, + subsequent warnings are issued each time disk space or number of free + inodes further reduces by the respective interval. + + If you do not provide a ``BB_DISKMON_WARNINTERVAL`` variable and you + do use ``BB_DISKMON_DIRS`` with the "WARN" action, the disk + monitoring interval defaults to the following: + BB_DISKMON_WARNINTERVAL = "50M,5K" + + When specifying the variable in your configuration file, use the + following form: :: + + BB_DISKMON_WARNINTERVAL = "," + + where: + + is: + An interval of memory expressed in either + G, M, or K for Gbytes, Mbytes, or Kbytes, + respectively. You cannot use GB, MB, or KB. + + is: + An interval of free inodes expressed in either + G, M, or K for Gbytes, Mbytes, or Kbytes, + respectively. You cannot use GB, MB, or KB. + + Here is an example: :: + + BB_DISKMON_DIRS = "WARN,${SSTATE_DIR},1G,100K" + BB_DISKMON_WARNINTERVAL = "50M,5K" + + These variables cause BitBake to + issue subsequent warnings each time the available disk space further + reduces by 50 Mbytes or the number of free inodes further reduces by + 5 Kbytes in the ``${SSTATE_DIR}`` directory. Subsequent warnings + based on the interval occur each time a respective interval is + reached beyond the initial warning (i.e. 1 Gbytes and 100 Kbytes). + + BB_ENV_WHITELIST + Specifies the internal whitelist of variables to allow through from + the external environment into BitBake's datastore. If the value of + this variable is not specified (which is the default), the following + list is used: :term:`BBPATH`, :term:`BB_PRESERVE_ENV`, + :term:`BB_ENV_WHITELIST`, and :term:`BB_ENV_EXTRAWHITE`. + + .. note:: + + You must set this variable in the external environment in order + for it to work. + + BB_ENV_EXTRAWHITE + Specifies an additional set of variables to allow through (whitelist) + from the external environment into BitBake's datastore. This list of + variables are on top of the internal list set in + :term:`BB_ENV_WHITELIST`. + + .. note:: + + You must set this variable in the external environment in order + for it to work. + + BB_FETCH_PREMIRRORONLY + When set to "1", causes BitBake's fetcher module to only search + :term:`PREMIRRORS` for files. BitBake will not + search the main :term:`SRC_URI` or + :term:`MIRRORS`. + + BB_FILENAME + Contains the filename of the recipe that owns the currently running + task. For example, if the ``do_fetch`` task that resides in the + ``my-recipe.bb`` is executing, the ``BB_FILENAME`` variable contains + "/foo/path/my-recipe.bb". + + BBFILES_DYNAMIC + Activates content depending on presence of identified layers. You + identify the layers by the collections that the layers define. + + Use the ``BBFILES_DYNAMIC`` variable to avoid ``.bbappend`` files whose + corresponding ``.bb`` file is in a layer that attempts to modify other + layers through ``.bbappend`` but does not want to introduce a hard + dependency on those other layers. + + Additionally you can prefix the rule with "!" to add ``.bbappend`` and + ``.bb`` files in case a layer is not present. Use this avoid hard + dependency on those other layers. + + Use the following form for ``BBFILES_DYNAMIC``: :: + + collection_name:filename_pattern + + The following example identifies two collection names and two filename + patterns: :: + + BBFILES_DYNAMIC += "\ + clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \ + core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \ + " + + When the collection name is prefixed with "!" it will add the file pattern in case + the layer is absent: :: + + BBFILES_DYNAMIC += "\ + !clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \ + " + + This next example shows an error message that occurs because invalid + entries are found, which cause parsing to abort: :: + + ERROR: BBFILES_DYNAMIC entries must be of the form {!}:, not: + /work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend + /work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend + + BB_GENERATE_MIRROR_TARBALLS + Causes tarballs of the Git repositories, including the Git metadata, + to be placed in the :term:`DL_DIR` directory. Anyone + wishing to create a source mirror would want to enable this variable. + + For performance reasons, creating and placing tarballs of the Git + repositories is not the default action by BitBake. :: + + BB_GENERATE_MIRROR_TARBALLS = "1" + + BB_HASHCONFIG_WHITELIST + Lists variables that are excluded from base configuration checksum, + which is used to determine if the cache can be reused. + + One of the ways BitBake determines whether to re-parse the main + metadata is through checksums of the variables in the datastore of + the base configuration data. There are variables that you typically + want to exclude when checking whether or not to re-parse and thus + rebuild the cache. As an example, you would usually exclude ``TIME`` + and ``DATE`` because these variables are always changing. If you did + not exclude them, BitBake would never reuse the cache. + + BB_HASHBASE_WHITELIST + Lists variables that are excluded from checksum and dependency data. + Variables that are excluded can therefore change without affecting + the checksum mechanism. A common example would be the variable for + the path of the build. BitBake's output should not (and usually does + not) depend on the directory in which it was built. + + BB_HASHCHECK_FUNCTION + Specifies the name of the function to call during the "setscene" part + of the task's execution in order to validate the list of task hashes. + The function returns the list of setscene tasks that should be + executed. + + At this point in the execution of the code, the objective is to + quickly verify if a given setscene function is likely to work or not. + It's easier to check the list of setscene functions in one pass than + to call many individual tasks. The returned list need not be + completely accurate. A given setscene task can still later fail. + However, the more accurate the data returned, the more efficient the + build will be. + + BB_INVALIDCONF + Used in combination with the ``ConfigParsed`` event to trigger + re-parsing the base metadata (i.e. all the recipes). The + ``ConfigParsed`` event can set the variable to trigger the re-parse. + You must be careful to avoid recursive loops with this functionality. + + BB_LOGCONFIG + Specifies the name of a config file that contains the user logging + configuration. See + :ref:`bitbake-user-manual/bitbake-user-manual-execution:logging` + for additional information + + BB_LOGFMT + Specifies the name of the log files saved into + ``${``\ :term:`T`\ ``}``. By default, the ``BB_LOGFMT`` + variable is undefined and the log file names get created using the + following form: :: + + log.{task}.{pid} + + If you want to force log files to take a specific name, you can set this + variable in a configuration file. + + BB_NICE_LEVEL + Allows BitBake to run at a specific priority (i.e. nice level). + System permissions usually mean that BitBake can reduce its priority + but not raise it again. See :term:`BB_TASK_NICE_LEVEL` for + additional information. + + BB_NO_NETWORK + Disables network access in the BitBake fetcher modules. With this + access disabled, any command that attempts to access the network + becomes an error. + + Disabling network access is useful for testing source mirrors, + running builds when not connected to the Internet, and when operating + in certain kinds of firewall environments. + + BB_NUMBER_THREADS + The maximum number of tasks BitBake should run in parallel at any one + time. If your host development system supports multiple cores, a good + rule of thumb is to set this variable to twice the number of cores. + + BB_NUMBER_PARSE_THREADS + Sets the number of threads BitBake uses when parsing. By default, the + number of threads is equal to the number of cores on the system. + + BB_ORIGENV + Contains a copy of the original external environment in which BitBake + was run. The copy is taken before any whitelisted variable values are + filtered into BitBake's datastore. + + .. note:: + + The contents of this variable is a datastore object that can be + queried using the normal datastore operations. + + BB_PRESERVE_ENV + Disables whitelisting and instead allows all variables through from + the external environment into BitBake's datastore. + + .. note:: + + You must set this variable in the external environment in order + for it to work. + + BB_RUNFMT + Specifies the name of the executable script files (i.e. run files) + saved into ``${``\ :term:`T`\ ``}``. By default, the + ``BB_RUNFMT`` variable is undefined and the run file names get + created using the following form: :: + + run.{task}.{pid} + + If you want to force run files to take a specific name, you can set this + variable in a configuration file. + + BB_RUNTASK + Contains the name of the currently executing task. The value includes + the "do\_" prefix. For example, if the currently executing task is + ``do_config``, the value is "do_config". + + BB_SCHEDULER + Selects the name of the scheduler to use for the scheduling of + BitBake tasks. Three options exist: + + - *basic* - The basic framework from which everything derives. Using + this option causes tasks to be ordered numerically as they are + parsed. + + - *speed* - Executes tasks first that have more tasks depending on + them. The "speed" option is the default. + + - *completion* - Causes the scheduler to try to complete a given + recipe once its build has started. + + BB_SCHEDULERS + Defines custom schedulers to import. Custom schedulers need to be + derived from the ``RunQueueScheduler`` class. + + For information how to select a scheduler, see the + :term:`BB_SCHEDULER` variable. + + BB_SETSCENE_DEPVALID + Specifies a function BitBake calls that determines whether BitBake + requires a setscene dependency to be met. + + When running a setscene task, BitBake needs to know which + dependencies of that setscene task also need to be run. Whether + dependencies also need to be run is highly dependent on the metadata. + The function specified by this variable returns a "True" or "False" + depending on whether the dependency needs to be met. + + BB_SETSCENE_VERIFY_FUNCTION2 + Specifies a function to call that verifies the list of planned task + execution before the main task execution happens. The function is + called once BitBake has a list of setscene tasks that have run and + either succeeded or failed. + + The function allows for a task list check to see if they make sense. + Even if BitBake was planning to skip a task, the returned value of + the function can force BitBake to run the task, which is necessary + under certain metadata defined circumstances. + + BB_SIGNATURE_EXCLUDE_FLAGS + Lists variable flags (varflags) that can be safely excluded from + checksum and dependency data for keys in the datastore. When + generating checksum or dependency data for keys in the datastore, the + flags set against that key are normally included in the checksum. + + For more information on varflags, see the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" + section. + + BB_SIGNATURE_HANDLER + Defines the name of the signature handler BitBake uses. The signature + handler defines the way stamp files are created and handled, if and + how the signature is incorporated into the stamps, and how the + signature itself is generated. + + A new signature handler can be added by injecting a class derived + from the ``SignatureGenerator`` class into the global namespace. + + BB_SRCREV_POLICY + Defines the behavior of the fetcher when it interacts with source + control systems and dynamic source revisions. The + ``BB_SRCREV_POLICY`` variable is useful when working without a + network. + + The variable can be set using one of two policies: + + - *cache* - Retains the value the system obtained previously rather + than querying the source control system each time. + + - *clear* - Queries the source controls system every time. With this + policy, there is no cache. The "clear" policy is the default. + + BB_STAMP_POLICY + Defines the mode used for how timestamps of stamp files are compared. + You can set the variable to one of the following modes: + + - *perfile* - Timestamp comparisons are only made between timestamps + of a specific recipe. This is the default mode. + + - *full* - Timestamp comparisons are made for all dependencies. + + - *whitelist* - Identical to "full" mode except timestamp + comparisons are made for recipes listed in the + :term:`BB_STAMP_WHITELIST` variable. + + .. note:: + + Stamp policies are largely obsolete with the introduction of + setscene tasks. + + BB_STAMP_WHITELIST + Lists files whose stamp file timestamps are compared when the stamp + policy mode is set to "whitelist". For information on stamp policies, + see the :term:`BB_STAMP_POLICY` variable. + + BB_STRICT_CHECKSUM + Sets a more strict checksum mechanism for non-local URLs. Setting + this variable to a value causes BitBake to report an error if it + encounters a non-local URL that does not have at least one checksum + specified. + + BB_TASK_IONICE_LEVEL + Allows adjustment of a task's Input/Output priority. During + Autobuilder testing, random failures can occur for tasks due to I/O + starvation. These failures occur during various QEMU runtime + timeouts. You can use the ``BB_TASK_IONICE_LEVEL`` variable to adjust + the I/O priority of these tasks. + + .. note:: + + This variable works similarly to the :term:`BB_TASK_NICE_LEVEL` + variable except with a task's I/O priorities. + + Set the variable as follows: :: + + BB_TASK_IONICE_LEVEL = "class.prio" + + For *class*, the default value is "2", which is a best effort. You can use + "1" for realtime and "3" for idle. If you want to use realtime, you + must have superuser privileges. + + For *prio*, you can use any value from "0", which is the highest + priority, to "7", which is the lowest. The default value is "4". You + do not need any special privileges to use this range of priority + values. + + .. note:: + + In order for your I/O priority settings to take effect, you need the + Completely Fair Queuing (CFQ) Scheduler selected for the backing block + device. To select the scheduler, use the following command form where + device is the device (e.g. sda, sdb, and so forth): :: + + $ sudo sh -c “echo cfq > /sys/block/device/queu/scheduler + + BB_TASK_NICE_LEVEL + Allows specific tasks to change their priority (i.e. nice level). + + You can use this variable in combination with task overrides to raise + or lower priorities of specific tasks. For example, on the `Yocto + Project `__ autobuilder, QEMU emulation + in images is given a higher priority as compared to build tasks to + ensure that images do not suffer timeouts on loaded systems. + + BB_TASKHASH + Within an executing task, this variable holds the hash of the task as + returned by the currently enabled signature generator. + + BB_VERBOSE_LOGS + Controls how verbose BitBake is during builds. If set, shell scripts + echo commands and shell script output appears on standard out + (stdout). + + BB_WORKERCONTEXT + Specifies if the current context is executing a task. BitBake sets + this variable to "1" when a task is being executed. The value is not + set when the task is in server context during parsing or event + handling. + + BBCLASSEXTEND + Allows you to extend a recipe so that it builds variants of the + software. Some examples of these variants for recipes from the + OpenEmbedded-Core metadata are "natives" such as ``quilt-native``, + which is a copy of Quilt built to run on the build system; "crosses" + such as ``gcc-cross``, which is a compiler built to run on the build + machine but produces binaries that run on the target ``MACHINE``; + "nativesdk", which targets the SDK machine instead of ``MACHINE``; + and "mulitlibs" in the form "``multilib:``\ multilib_name". + + To build a different variant of the recipe with a minimal amount of + code, it usually is as simple as adding the variable to your recipe. + Here are two examples. The "native" variants are from the + OpenEmbedded-Core metadata: :: + + BBCLASSEXTEND =+ "native nativesdk" + BBCLASSEXTEND =+ "multilib:multilib_name" + + .. note:: + + Internally, the ``BBCLASSEXTEND`` mechanism generates recipe + variants by rewriting variable values and applying overrides such + as ``_class-native``. For example, to generate a native version of + a recipe, a :term:`DEPENDS` on "foo" is + rewritten to a ``DEPENDS`` on "foo-native". + + Even when using ``BBCLASSEXTEND``, the recipe is only parsed once. + Parsing once adds some limitations. For example, it is not + possible to include a different file depending on the variant, + since ``include`` statements are processed when the recipe is + parsed. + + BBDEBUG + Sets the BitBake debug output level to a specific value as + incremented by the ``-D`` command line option. + + .. note:: + + You must set this variable in the external environment in order + for it to work. + + BBFILE_COLLECTIONS + Lists the names of configured layers. These names are used to find + the other ``BBFILE_*`` variables. Typically, each layer appends its + name to this variable in its ``conf/layer.conf`` file. + + BBFILE_PATTERN + Variable that expands to match files from + :term:`BBFILES` in a particular layer. This + variable is used in the ``conf/layer.conf`` file and must be suffixed + with the name of the specific layer (e.g. + ``BBFILE_PATTERN_emenlow``). + + BBFILE_PRIORITY + Assigns the priority for recipe files in each layer. + + This variable is useful in situations where the same recipe appears + in more than one layer. Setting this variable allows you to + prioritize a layer against other layers that contain the same recipe + - effectively letting you control the precedence for the multiple + layers. The precedence established through this variable stands + regardless of a recipe's version (:term:`PV` variable). + For example, a layer that has a recipe with a higher ``PV`` value but + for which the ``BBFILE_PRIORITY`` is set to have a lower precedence + still has a lower precedence. + + A larger value for the ``BBFILE_PRIORITY`` variable results in a + higher precedence. For example, the value 6 has a higher precedence + than the value 5. If not specified, the ``BBFILE_PRIORITY`` variable + is set based on layer dependencies (see the ``LAYERDEPENDS`` variable + for more information. The default priority, if unspecified for a + layer with no dependencies, is the lowest defined priority + 1 (or 1 + if no priorities are defined). + + .. tip:: + + You can use the command bitbake-layers show-layers to list all + configured layers along with their priorities. + + BBFILES + A space-separated list of recipe files BitBake uses to build + software. + + When specifying recipe files, you can pattern match using Python's + `glob `_ syntax. + For details on the syntax, see the documentation by following the + previous link. + + BBINCLUDED + Contains a space-separated list of all of all files that BitBake's + parser included during parsing of the current file. + + BBINCLUDELOGS + If set to a value, enables printing the task log when reporting a + failed task. + + BBINCLUDELOGS_LINES + If :term:`BBINCLUDELOGS` is set, specifies + the maximum number of lines from the task log file to print when + reporting a failed task. If you do not set ``BBINCLUDELOGS_LINES``, + the entire log is printed. + + BBLAYERS + Lists the layers to enable during the build. This variable is defined + in the ``bblayers.conf`` configuration file in the build directory. + Here is an example: :: + + BBLAYERS = " \ + /home/scottrif/poky/meta \ + /home/scottrif/poky/meta-yocto \ + /home/scottrif/poky/meta-yocto-bsp \ + /home/scottrif/poky/meta-mykernel \ + " + + This example enables four layers, one of which is a custom, user-defined + layer named ``meta-mykernel``. + + BBLAYERS_FETCH_DIR + Sets the base location where layers are stored. This setting is used + in conjunction with ``bitbake-layers layerindex-fetch`` and tells + ``bitbake-layers`` where to place the fetched layers. + + BBMASK + Prevents BitBake from processing recipes and recipe append files. + + You can use the ``BBMASK`` variable to "hide" these ``.bb`` and + ``.bbappend`` files. BitBake ignores any recipe or recipe append + files that match any of the expressions. It is as if BitBake does not + see them at all. Consequently, matching files are not parsed or + otherwise used by BitBake. + + The values you provide are passed to Python's regular expression + compiler. Consequently, the syntax follows Python's Regular + Expression (re) syntax. The expressions are compared against the full + paths to the files. For complete syntax information, see Python's + documentation at http://docs.python.org/3/library/re.html. + + The following example uses a complete regular expression to tell + BitBake to ignore all recipe and recipe append files in the + ``meta-ti/recipes-misc/`` directory: :: + + BBMASK = "meta-ti/recipes-misc/" + + If you want to mask out multiple directories or recipes, you can + specify multiple regular expression fragments. This next example + masks out multiple directories and individual recipes: :: + + BBMASK += "/meta-ti/recipes-misc/ meta-ti/recipes-ti/packagegroup/" + BBMASK += "/meta-oe/recipes-support/" + BBMASK += "/meta-foo/.*/openldap" + BBMASK += "opencv.*\.bbappend" + BBMASK += "lzma" + + .. note:: + + When specifying a directory name, use the trailing slash character + to ensure you match just that directory name. + + BBMULTICONFIG + Enables BitBake to perform multiple configuration builds and lists + each separate configuration (multiconfig). You can use this variable + to cause BitBake to build multiple targets where each target has a + separate configuration. Define ``BBMULTICONFIG`` in your + ``conf/local.conf`` configuration file. + + As an example, the following line specifies three multiconfigs, each + having a separate configuration file: :: + + BBMULTIFONFIG = "configA configB configC" + + Each configuration file you use must reside in the + build directory within a directory named ``conf/multiconfig`` (e.g. + build_directory\ ``/conf/multiconfig/configA.conf``). + + For information on how to use ``BBMULTICONFIG`` in an environment + that supports building targets with multiple configurations, see the + ":ref:`bitbake-user-manual/bitbake-user-manual-intro:executing a multiple configuration build`" + section. + + BBPATH + Used by BitBake to locate class (``.bbclass``) and configuration + (``.conf``) files. This variable is analogous to the ``PATH`` + variable. + + If you run BitBake from a directory outside of the build directory, + you must be sure to set ``BBPATH`` to point to the build directory. + Set the variable as you would any environment variable and then run + BitBake: :: + + $ BBPATH="build_directory" + $ export BBPATH + $ bitbake target + + BBSERVER + Points to the server that runs memory-resident BitBake. The variable + is only used when you employ memory-resident BitBake. + + BBTARGETS + Allows you to use a configuration file to add to the list of + command-line target recipes you want to build. + + BBVERSIONS + Allows a single recipe to build multiple versions of a project from a + single recipe file. You also able to specify conditional metadata + using the :term:`OVERRIDES` mechanism for a + single version or for an optionally named range of versions. + + For more information on ``BBVERSIONS``, see the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variants - class extension mechanism`" + section. + + BITBAKE_UI + Used to specify the UI module to use when running BitBake. Using this + variable is equivalent to using the ``-u`` command-line option. + + .. note:: + + You must set this variable in the external environment in order + for it to work. + + BUILDNAME + A name assigned to the build. The name defaults to a datetime stamp + of when the build was started but can be defined by the metadata. + + BZRDIR + The directory in which files checked out of a Bazaar system are + stored. + + CACHE + Specifies the directory BitBake uses to store a cache of the metadata + so it does not need to be parsed every time BitBake is started. + + CVSDIR + The directory in which files checked out under the CVS system are + stored. + + DEFAULT_PREFERENCE + Specifies a weak bias for recipe selection priority. + + The most common usage of this is variable is to set it to "-1" within + a recipe for a development version of a piece of software. Using the + variable in this way causes the stable version of the recipe to build + by default in the absence of ``PREFERRED_VERSION`` being used to + build the development version. + + .. note:: + + The bias provided by DEFAULT_PREFERENCE is weak and is overridden by + :term:`BBFILE_PRIORITY` if that variable is different between two + layers that contain different versions of the same recipe. + + DEPENDS + Lists a recipe's build-time dependencies (i.e. other recipe files). + + Consider this simple example for two recipes named "a" and "b" that + produce similarly named packages. In this example, the ``DEPENDS`` + statement appears in the "a" recipe: :: + + DEPENDS = "b" + + Here, the dependency is such that the ``do_configure`` task for recipe "a" + depends on the ``do_populate_sysroot`` task of recipe "b". This means + anything that recipe "b" puts into sysroot is available when recipe "a" is + configuring itself. + + For information on runtime dependencies, see the :term:`RDEPENDS` + variable. + + DESCRIPTION + A long description for the recipe. + + DL_DIR + The central download directory used by the build process to store + downloads. By default, ``DL_DIR`` gets files suitable for mirroring for + everything except Git repositories. If you want tarballs of Git + repositories, use the :term:`BB_GENERATE_MIRROR_TARBALLS` variable. + + EXCLUDE_FROM_WORLD + Directs BitBake to exclude a recipe from world builds (i.e. + ``bitbake world``). During world builds, BitBake locates, parses and + builds all recipes found in every layer exposed in the + ``bblayers.conf`` configuration file. + + To exclude a recipe from a world build using this variable, set the + variable to "1" in the recipe. + + .. note:: + + Recipes added to ``EXCLUDE_FROM_WORLD`` may still be built during a world + build in order to satisfy dependencies of other recipes. Adding a + recipe to ``EXCLUDE_FROM_WORLD`` only ensures that the recipe is not + explicitly added to the list of build targets in a world build. + + FAKEROOT + Contains the command to use when running a shell script in a fakeroot + environment. The ``FAKEROOT`` variable is obsolete and has been + replaced by the other ``FAKEROOT*`` variables. See these entries in + the glossary for more information. + + FAKEROOTBASEENV + Lists environment variables to set when executing the command defined + by :term:`FAKEROOTCMD` that starts the + bitbake-worker process in the fakeroot environment. + + FAKEROOTCMD + Contains the command that starts the bitbake-worker process in the + fakeroot environment. + + FAKEROOTDIRS + Lists directories to create before running a task in the fakeroot + environment. + + FAKEROOTENV + Lists environment variables to set when running a task in the + fakeroot environment. For additional information on environment + variables and the fakeroot environment, see the + :term:`FAKEROOTBASEENV` variable. + + FAKEROOTNOENV + Lists environment variables to set when running a task that is not in + the fakeroot environment. For additional information on environment + variables and the fakeroot environment, see the + :term:`FAKEROOTENV` variable. + + FETCHCMD + Defines the command the BitBake fetcher module executes when running + fetch operations. You need to use an override suffix when you use the + variable (e.g. ``FETCHCMD_git`` or ``FETCHCMD_svn``). + + FILE + Points at the current file. BitBake sets this variable during the + parsing process to identify the file being parsed. BitBake also sets + this variable when a recipe is being executed to identify the recipe + file. + + FILESPATH + Specifies directories BitBake uses when searching for patches and + files. The "local" fetcher module uses these directories when + handling ``file://`` URLs. The variable behaves like a shell ``PATH`` + environment variable. The value is a colon-separated list of + directories that are searched left-to-right in order. + + GITDIR + The directory in which a local copy of a Git repository is stored + when it is cloned. + + HGDIR + The directory in which files checked out of a Mercurial system are + stored. + + HOMEPAGE + Website where more information about the software the recipe is + building can be found. + + INHERIT + Causes the named class or classes to be inherited globally. Anonymous + functions in the class or classes are not executed for the base + configuration and in each individual recipe. The OpenEmbedded build + system ignores changes to ``INHERIT`` in individual recipes. + + For more information on ``INHERIT``, see the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:\`\`inherit\`\` configuration directive`" + section. + + LAYERDEPENDS + Lists the layers, separated by spaces, upon which this recipe + depends. Optionally, you can specify a specific layer version for a + dependency by adding it to the end of the layer name with a colon, + (e.g. "anotherlayer:3" to be compared against + :term:`LAYERVERSION`\ ``_anotherlayer`` in + this case). BitBake produces an error if any dependency is missing or + the version numbers do not match exactly (if specified). + + You use this variable in the ``conf/layer.conf`` file. You must also + use the specific layer name as a suffix to the variable (e.g. + ``LAYERDEPENDS_mylayer``). + + LAYERDIR + When used inside the ``layer.conf`` configuration file, this variable + provides the path of the current layer. This variable is not + available outside of ``layer.conf`` and references are expanded + immediately when parsing of the file completes. + + LAYERDIR_RE + When used inside the ``layer.conf`` configuration file, this variable + provides the path of the current layer, escaped for use in a regular + expression (:term:`BBFILE_PATTERN`). This + variable is not available outside of ``layer.conf`` and references + are expanded immediately when parsing of the file completes. + + LAYERVERSION + Optionally specifies the version of a layer as a single number. You + can use this variable within + :term:`LAYERDEPENDS` for another layer in + order to depend on a specific version of the layer. + + You use this variable in the ``conf/layer.conf`` file. You must also + use the specific layer name as a suffix to the variable (e.g. + ``LAYERDEPENDS_mylayer``). + + LICENSE + The list of source licenses for the recipe. + + MIRRORS + Specifies additional paths from which BitBake gets source code. When + the build system searches for source code, it first tries the local + download directory. If that location fails, the build system tries + locations defined by :term:`PREMIRRORS`, the + upstream source, and then locations specified by ``MIRRORS`` in that + order. + + MULTI_PROVIDER_WHITELIST + Allows you to suppress BitBake warnings caused when building two + separate recipes that provide the same output. + + BitBake normally issues a warning when building two different recipes + where each provides the same output. This scenario is usually + something the user does not want. However, cases do exist where it + makes sense, particularly in the ``virtual/*`` namespace. You can use + this variable to suppress BitBake's warnings. + + To use the variable, list provider names (e.g. recipe names, + ``virtual/kernel``, and so forth). + + OVERRIDES + BitBake uses ``OVERRIDES`` to control what variables are overridden + after BitBake parses recipes and configuration files. + + Following is a simple example that uses an overrides list based on + machine architectures: OVERRIDES = "arm:x86:mips:powerpc" You can + find information on how to use ``OVERRIDES`` in the + ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax + (overrides)`" section. + + P4DIR + The directory in which a local copy of a Perforce depot is stored + when it is fetched. + + PACKAGES + The list of packages the recipe creates. + + PACKAGES_DYNAMIC + A promise that your recipe satisfies runtime dependencies for + optional modules that are found in other recipes. + ``PACKAGES_DYNAMIC`` does not actually satisfy the dependencies, it + only states that they should be satisfied. For example, if a hard, + runtime dependency (:term:`RDEPENDS`) of another + package is satisfied during the build through the + ``PACKAGES_DYNAMIC`` variable, but a package with the module name is + never actually produced, then the other package will be broken. + + PE + The epoch of the recipe. By default, this variable is unset. The + variable is used to make upgrades possible when the versioning scheme + changes in some backwards incompatible way. + + PERSISTENT_DIR + Specifies the directory BitBake uses to store data that should be + preserved between builds. In particular, the data stored is the data + that uses BitBake's persistent data API and the data used by the PR + Server and PR Service. + + PF + Specifies the recipe or package name and includes all version and + revision numbers (i.e. ``eglibc-2.13-r20+svnr15508/`` and + ``bash-4.2-r1/``). + + PN + The recipe name. + + PR + The revision of the recipe. + + PREFERRED_PROVIDER + Determines which recipe should be given preference when multiple + recipes provide the same item. You should always suffix the variable + with the name of the provided item, and you should set it to the + :term:`PN` of the recipe to which you want to give + precedence. Some examples: :: + + PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" + PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86" + PREFERRED_PROVIDER_virtual/libgl ?= "mesa" + + PREFERRED_PROVIDERS + Determines which recipe should be given preference for cases where + multiple recipes provide the same item. Functionally, + ``PREFERRED_PROVIDERS`` is identical to + :term:`PREFERRED_PROVIDER`. However, the ``PREFERRED_PROVIDERS`` variable + lets you define preferences for multiple situations using the following + form: :: + + PREFERRED_PROVIDERS = "xxx:yyy aaa:bbb ..." + + This form is a convenient replacement for the following: :: + + PREFERRED_PROVIDER_xxx = "yyy" + PREFERRED_PROVIDER_aaa = "bbb" + + PREFERRED_VERSION + If there are multiple versions of recipes available, this variable + determines which recipe should be given preference. You must always + suffix the variable with the :term:`PN` you want to + select, and you should set :term:`PV` accordingly for + precedence. + + The ``PREFERRED_VERSION`` variable supports limited wildcard use + through the "``%``" character. You can use the character to match any + number of characters, which can be useful when specifying versions + that contain long revision numbers that potentially change. Here are + two examples: :: + + PREFERRED_VERSION_python = "2.7.3" + PREFERRED_VERSION_linux-yocto = "4.12%" + + .. important:: + + The use of the " % " character is limited in that it only works at the + end of the string. You cannot use the wildcard character in any other + location of the string. + + PREMIRRORS + Specifies additional paths from which BitBake gets source code. When + the build system searches for source code, it first tries the local + download directory. If that location fails, the build system tries + locations defined by ``PREMIRRORS``, the upstream source, and then + locations specified by :term:`MIRRORS` in that order. + + Typically, you would add a specific server for the build system to + attempt before any others by adding something like the following to + your configuration: :: + + PREMIRRORS_prepend = "\ + git://.*/.* http://www.yoctoproject.org/sources/ \n \ + ftp://.*/.* http://www.yoctoproject.org/sources/ \n \ + http://.*/.* http://www.yoctoproject.org/sources/ \n \ + https://.*/.* http://www.yoctoproject.org/sources/ \n" + + These changes cause the build system to intercept Git, FTP, HTTP, and + HTTPS requests and direct them to the ``http://`` sources mirror. You can + use ``file://`` URLs to point to local directories or network shares as + well. + + PROVIDES + A list of aliases by which a particular recipe can be known. By + default, a recipe's own ``PN`` is implicitly already in its + ``PROVIDES`` list. If a recipe uses ``PROVIDES``, the additional + aliases are synonyms for the recipe and can be useful satisfying + dependencies of other recipes during the build as specified by + ``DEPENDS``. + + Consider the following example ``PROVIDES`` statement from a recipe + file ``libav_0.8.11.bb``: :: + + PROVIDES += "libpostproc" + + The ``PROVIDES`` statement results in the "libav" recipe also being known + as "libpostproc". + + In addition to providing recipes under alternate names, the + ``PROVIDES`` mechanism is also used to implement virtual targets. A + virtual target is a name that corresponds to some particular + functionality (e.g. a Linux kernel). Recipes that provide the + functionality in question list the virtual target in ``PROVIDES``. + Recipes that depend on the functionality in question can include the + virtual target in :term:`DEPENDS` to leave the + choice of provider open. + + Conventionally, virtual targets have names on the form + "virtual/function" (e.g. "virtual/kernel"). The slash is simply part + of the name and has no syntactical significance. + + PRSERV_HOST + The network based :term:`PR` service host and port. + + Following is an example of how the ``PRSERV_HOST`` variable is set: :: + + PRSERV_HOST = "localhost:0" + + You must set the variable if you want to automatically start a local PR + service. You can set ``PRSERV_HOST`` to other values to use a remote PR + service. + + PV + The version of the recipe. + + RDEPENDS + Lists a package's runtime dependencies (i.e. other packages) that + must be installed in order for the built package to run correctly. If + a package in this list cannot be found during the build, you will get + a build error. + + Because the ``RDEPENDS`` variable applies to packages being built, + you should always use the variable in a form with an attached package + name. For example, suppose you are building a development package + that depends on the ``perl`` package. In this case, you would use the + following ``RDEPENDS`` statement: :: + + RDEPENDS_${PN}-dev += "perl" + + In the example, the development package depends on the ``perl`` package. + Thus, the ``RDEPENDS`` variable has the ``${PN}-dev`` package name as part + of the variable. + + BitBake supports specifying versioned dependencies. Although the + syntax varies depending on the packaging format, BitBake hides these + differences from you. Here is the general syntax to specify versions + with the ``RDEPENDS`` variable: :: + + RDEPENDS_${PN} = "package (operator version)" + + For ``operator``, you can specify the following: :: + + = + < + > + <= + >= + + For example, the following sets up a dependency on version 1.2 or + greater of the package ``foo``: :: + + RDEPENDS_${PN} = "foo (>= 1.2)" + + For information on build-time dependencies, see the :term:`DEPENDS` + variable. + + REPODIR + The directory in which a local copy of a ``google-repo`` directory is + stored when it is synced. + + RPROVIDES + A list of package name aliases that a package also provides. These + aliases are useful for satisfying runtime dependencies of other + packages both during the build and on the target (as specified by + ``RDEPENDS``). + + As with all package-controlling variables, you must always use the + variable in conjunction with a package name override. Here is an + example: :: + + RPROVIDES_${PN} = "widget-abi-2" + + RRECOMMENDS + A list of packages that extends the usability of a package being + built. The package being built does not depend on this list of + packages in order to successfully build, but needs them for the + extended usability. To specify runtime dependencies for packages, see + the ``RDEPENDS`` variable. + + BitBake supports specifying versioned recommends. Although the syntax + varies depending on the packaging format, BitBake hides these + differences from you. Here is the general syntax to specify versions + with the ``RRECOMMENDS`` variable: :: + + RRECOMMENDS_${PN} = "package (operator version)" + + For ``operator``, you can specify the following: :: + + = + < + > + <= + >= + + For example, the following sets up a recommend on version + 1.2 or greater of the package ``foo``: :: + + RRECOMMENDS_${PN} = "foo (>= 1.2)" + + SECTION + The section in which packages should be categorized. + + SRC_URI + The list of source files - local or remote. This variable tells + BitBake which bits to pull for the build and how to pull them. For + example, if the recipe or append file needs to fetch a single tarball + from the Internet, the recipe or append file uses a ``SRC_URI`` entry + that specifies that tarball. On the other hand, if the recipe or + append file needs to fetch a tarball and include a custom file, the + recipe or append file needs an ``SRC_URI`` variable that specifies + all those sources. + + The following list explains the available URI protocols: + + - ``file://`` : Fetches files, which are usually files shipped + with the metadata, from the local machine. The path is relative to + the :term:`FILESPATH` variable. + + - ``bzr://`` : Fetches files from a Bazaar revision control + repository. + + - ``git://`` : Fetches files from a Git revision control + repository. + + - ``osc://`` : Fetches files from an OSC (OpenSUSE Build service) + revision control repository. + + - ``repo://`` : Fetches files from a repo (Git) repository. + + - ``http://`` : Fetches files from the Internet using HTTP. + + - ``https://`` : Fetches files from the Internet using HTTPS. + + - ``ftp://`` : Fetches files from the Internet using FTP. + + - ``cvs://`` : Fetches files from a CVS revision control + repository. + + - ``hg://`` : Fetches files from a Mercurial (``hg``) revision + control repository. + + - ``p4://`` : Fetches files from a Perforce (``p4``) revision + control repository. + + - ``ssh://`` : Fetches files from a secure shell. + + - ``svn://`` : Fetches files from a Subversion (``svn``) revision + control repository. + + Here are some additional options worth mentioning: + + - ``unpack`` : Controls whether or not to unpack the file if it is + an archive. The default action is to unpack the file. + + - ``subdir`` : Places the file (or extracts its contents) into the + specified subdirectory. This option is useful for unusual tarballs + or other archives that do not have their files already in a + subdirectory within the archive. + + - ``name`` : Specifies a name to be used for association with + ``SRC_URI`` checksums when you have more than one file specified + in ``SRC_URI``. + + - ``downloadfilename`` : Specifies the filename used when storing + the downloaded file. + + SRCDATE + The date of the source code used to build the package. This variable + applies only if the source was fetched from a Source Code Manager + (SCM). + + SRCREV + The revision of the source code used to build the package. This + variable applies only when using Subversion, Git, Mercurial and + Bazaar. If you want to build a fixed revision and you want to avoid + performing a query on the remote repository every time BitBake parses + your recipe, you should specify a ``SRCREV`` that is a full revision + identifier and not just a tag. + + SRCREV_FORMAT + Helps construct valid :term:`SRCREV` values when + multiple source controlled URLs are used in + :term:`SRC_URI`. + + The system needs help constructing these values under these + circumstances. Each component in the ``SRC_URI`` is assigned a name + and these are referenced in the ``SRCREV_FORMAT`` variable. Consider + an example with URLs named "machine" and "meta". In this case, + ``SRCREV_FORMAT`` could look like "machine_meta" and those names + would have the SCM versions substituted into each position. Only one + ``AUTOINC`` placeholder is added and if needed. And, this placeholder + is placed at the start of the returned string. + + STAMP + Specifies the base path used to create recipe stamp files. The path + to an actual stamp file is constructed by evaluating this string and + then appending additional information. + + STAMPCLEAN + Specifies the base path used to create recipe stamp files. Unlike the + :term:`STAMP` variable, ``STAMPCLEAN`` can contain + wildcards to match the range of files a clean operation should + remove. BitBake uses a clean operation to remove any other stamps it + should be removing when creating a new stamp. + + SUMMARY + A short summary for the recipe, which is 72 characters or less. + + SVNDIR + The directory in which files checked out of a Subversion system are + stored. + + T + Points to a directory were BitBake places temporary files, which + consist mostly of task logs and scripts, when building a particular + recipe. + + TOPDIR + Points to the build directory. BitBake automatically sets this + variable. diff --git a/poky/bitbake/doc/conf.py b/poky/bitbake/doc/conf.py new file mode 100644 index 000000000..197f2757a --- /dev/null +++ b/poky/bitbake/doc/conf.py @@ -0,0 +1,94 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +import datetime + +current_version = "dev" + +# String used in sidebar +version = 'Version: ' + current_version +if current_version == 'dev': + version = 'Version: Current Development' +# Version seen in documentation_options.js and hence in js switchers code +release = current_version + +# -- Project information ----------------------------------------------------- + +project = 'Bitbake' +copyright = '2004-%s, Richard Purdie, Chris Larson, and Phil Blundell' \ + % datetime.datetime.now().year +author = 'Richard Purdie, Chris Larson, and Phil Blundell' + +# external links and substitutions +extlinks = { + 'yocto_docs': ('https://docs.yoctoproject.org%s', None), + 'oe_lists': ('https://lists.openembedded.org%s', None), +} + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.extlinks', +] +autosectionlabel_prefix_document = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# master document name. The default changed from contents to index. so better +# set it ourselves. +master_doc = 'index' + +# create substitution for project configuration variables +rst_prolog = """ +.. |project_name| replace:: %s +.. |copyright| replace:: %s +.. |author| replace:: %s +""" % (project, copyright, author) + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['sphinx-static'] + +# Add customm CSS and JS files +html_css_files = ['theme_overrides.css'] +html_js_files = ['switchers.js'] + +# Hide 'Created using Sphinx' text +html_show_sphinx = False + +# Add 'Last updated' on each page +html_last_updated_fmt = '%b %d, %Y' + +# Remove the trailing 'dot' in section numbers +html_secnumber_suffix = " " diff --git a/poky/bitbake/doc/genindex.rst b/poky/bitbake/doc/genindex.rst new file mode 100644 index 000000000..a4af06f65 --- /dev/null +++ b/poky/bitbake/doc/genindex.rst @@ -0,0 +1,3 @@ +===== +Index +===== diff --git a/poky/bitbake/doc/index.rst b/poky/bitbake/doc/index.rst new file mode 100644 index 000000000..3ff8b1580 --- /dev/null +++ b/poky/bitbake/doc/index.rst @@ -0,0 +1,38 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +=================== +BitBake User Manual +=================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + bitbake-user-manual/bitbake-user-manual-intro + bitbake-user-manual/bitbake-user-manual-execution + bitbake-user-manual/bitbake-user-manual-metadata + bitbake-user-manual/bitbake-user-manual-fetching + bitbake-user-manual/bitbake-user-manual-ref-variables + bitbake-user-manual/bitbake-user-manual-hello + +.. toctree:: + :maxdepth: 1 + :hidden: + + genindex + releases + +---- + +.. include:: + +| BitBake Community +| Copyright |copy| |copyright| +| + +This work is licensed under the Creative Commons Attribution License. To view a +copy of this license, visit http://creativecommons.org/licenses/by/2.5/ or send +a letter to Creative Commons, 444 Castro Street, Suite 900, Mountain View, +California 94041, USA. diff --git a/poky/bitbake/doc/releases.rst b/poky/bitbake/doc/releases.rst new file mode 100644 index 000000000..d68d71599 --- /dev/null +++ b/poky/bitbake/doc/releases.rst @@ -0,0 +1,130 @@ +.. SPDX-License-Identifier: CC-BY-2.5 + +========================= + Current Release Manuals +========================= + +**************************** +3.1 'dunfell' Release Series +**************************** + +- :yocto_docs:`3.1 BitBake User Manual ` +- :yocto_docs:`3.1.1 BitBake User Manual ` +- :yocto_docs:`3.1.2 BitBake User Manual ` + +========================== + Previous Release Manuals +========================== + +************************* +3.0 'zeus' Release Series +************************* + +- :yocto_docs:`3.0 BitBake User Manual ` +- :yocto_docs:`3.0.1 BitBake User Manual ` +- :yocto_docs:`3.0.2 BitBake User Manual ` +- :yocto_docs:`3.0.3 BitBake User Manual ` + +**************************** +2.7 'warrior' Release Series +**************************** + +- :yocto_docs:`2.7 BitBake User Manual ` +- :yocto_docs:`2.7.1 BitBake User Manual ` +- :yocto_docs:`2.7.2 BitBake User Manual ` +- :yocto_docs:`2.7.3 BitBake User Manual ` +- :yocto_docs:`2.7.4 BitBake User Manual ` + +************************* +2.6 'thud' Release Series +************************* + +- :yocto_docs:`2.6 BitBake User Manual ` +- :yocto_docs:`2.6.1 BitBake User Manual ` +- :yocto_docs:`2.6.2 BitBake User Manual ` +- :yocto_docs:`2.6.3 BitBake User Manual ` +- :yocto_docs:`2.6.4 BitBake User Manual ` + +************************* +2.5 'sumo' Release Series +************************* + +- :yocto_docs:`2.5 BitBake User Manual ` +- :yocto_docs:`2.5.1 BitBake User Manual ` +- :yocto_docs:`2.5.2 BitBake User Manual ` +- :yocto_docs:`2.5.3 BitBake User Manual ` + +************************** +2.4 'rocko' Release Series +************************** + +- :yocto_docs:`2.4 BitBake User Manual ` +- :yocto_docs:`2.4.1 BitBake User Manual ` +- :yocto_docs:`2.4.2 BitBake User Manual ` +- :yocto_docs:`2.4.3 BitBake User Manual ` +- :yocto_docs:`2.4.4 BitBake User Manual ` + +************************* +2.3 'pyro' Release Series +************************* + +- :yocto_docs:`2.3 BitBake User Manual ` +- :yocto_docs:`2.3.1 BitBake User Manual ` +- :yocto_docs:`2.3.2 BitBake User Manual ` +- :yocto_docs:`2.3.3 BitBake User Manual ` +- :yocto_docs:`2.3.4 BitBake User Manual ` + +************************** +2.2 'morty' Release Series +************************** + +- :yocto_docs:`2.2 BitBake User Manual ` +- :yocto_docs:`2.2.1 BitBake User Manual ` +- :yocto_docs:`2.2.2 BitBake User Manual ` +- :yocto_docs:`2.2.3 BitBake User Manual ` + +**************************** +2.1 'krogoth' Release Series +**************************** + +- :yocto_docs:`2.1 BitBake User Manual ` +- :yocto_docs:`2.1.1 BitBake User Manual ` +- :yocto_docs:`2.1.2 BitBake User Manual ` +- :yocto_docs:`2.1.3 BitBake User Manual ` + +*************************** +2.0 'jethro' Release Series +*************************** + +- :yocto_docs:`1.9 BitBake User Manual ` +- :yocto_docs:`2.0 BitBake User Manual ` +- :yocto_docs:`2.0.1 BitBake User Manual ` +- :yocto_docs:`2.0.2 BitBake User Manual ` +- :yocto_docs:`2.0.3 BitBake User Manual ` + +************************* +1.8 'fido' Release Series +************************* + +- :yocto_docs:`1.8 BitBake User Manual ` +- :yocto_docs:`1.8.1 BitBake User Manual ` +- :yocto_docs:`1.8.2 BitBake User Manual ` + +************************** +1.7 'dizzy' Release Series +************************** + +- :yocto_docs:`1.7 BitBake User Manual ` +- :yocto_docs:`1.7.1 BitBake User Manual ` +- :yocto_docs:`1.7.2 BitBake User Manual ` +- :yocto_docs:`1.7.3 BitBake User Manual ` + +************************** +1.6 'daisy' Release Series +************************** + +- :yocto_docs:`1.6 BitBake User Manual ` +- :yocto_docs:`1.6.1 BitBake User Manual ` +- :yocto_docs:`1.6.2 BitBake User Manual ` +- :yocto_docs:`1.6.3 BitBake User Manual ` + diff --git a/poky/bitbake/doc/sphinx-static/switchers.js b/poky/bitbake/doc/sphinx-static/switchers.js new file mode 100644 index 000000000..32113cfa9 --- /dev/null +++ b/poky/bitbake/doc/sphinx-static/switchers.js @@ -0,0 +1,233 @@ +(function() { + 'use strict'; + + var all_versions = { + 'dev': 'dev (3.2)', + '3.1.2': '3.1.2', + '3.0.3': '3.0.3', + '2.7.4': '2.7.4', + }; + + var all_doctypes = { + 'single': 'Individual Webpages', + 'mega': "All-in-one 'Mega' Manual", + }; + + // Simple version comparision + // Return 1 if a > b + // Return -1 if a < b + // Return 0 if a == b + function ver_compare(a, b) { + if (a == "dev") { + return 1; + } + + if (a === b) { + return 0; + } + + var a_components = a.split("."); + var b_components = b.split("."); + + var len = Math.min(a_components.length, b_components.length); + + // loop while the components are equal + for (var i = 0; i < len; i++) { + // A bigger than B + if (parseInt(a_components[i]) > parseInt(b_components[i])) { + return 1; + } + + // B bigger than A + if (parseInt(a_components[i]) < parseInt(b_components[i])) { + return -1; + } + } + + // If one's a prefix of the other, the longer one is greater. + if (a_components.length > b_components.length) { + return 1; + } + + if (a_components.length < b_components.length) { + return -1; + } + + // Otherwise they are the same. + return 0; + } + + function build_version_select(current_series, current_version) { + var buf = [''); + return buf.join(''); + } + + function build_doctype_select(current_doctype) { + var buf = [''); + return buf.join(''); + } + + function navigate_to_first_existing(urls) { + // Navigate to the first existing URL in urls. + var url = urls.shift(); + + // Web browsers won't redirect file:// urls to file urls using ajax but + // its useful for local testing + if (url.startsWith("file://")) { + window.location.href = url; + return; + } + + if (urls.length == 0) { + window.location.href = url; + return; + } + $.ajax({ + url: url, + success: function() { + window.location.href = url; + }, + error: function() { + navigate_to_first_existing(urls); + } + }); + } + + function get_docroot_url() { + var url = window.location.href; + var root = DOCUMENTATION_OPTIONS.URL_ROOT; + + var urlarray = url.split('/'); + // Trim off anything after '/' + urlarray.pop(); + var depth = (root.match(/\.\.\//g) || []).length; + for (var i = 0; i < depth; i++) { + urlarray.pop(); + } + + return urlarray.join('/') + '/'; + } + + function on_version_switch() { + var selected_version = $(this).children('option:selected').attr('value'); + var url = window.location.href; + var current_version = DOCUMENTATION_OPTIONS.VERSION; + var docroot = get_docroot_url() + + var new_versionpath = selected_version + '/'; + if (selected_version == "dev") + new_versionpath = ''; + + // dev versions have no version prefix + if (current_version == "dev") { + var new_url = docroot + new_versionpath + url.replace(docroot, ""); + var fallback_url = docroot + new_versionpath; + } else { + var new_url = url.replace('/' + current_version + '/', '/' + new_versionpath); + var fallback_url = new_url.replace(url.replace(docroot, ""), ""); + } + + console.log(get_docroot_url()) + console.log(url + " to url " + new_url); + console.log(url + " to fallback " + fallback_url); + + if (new_url != url) { + navigate_to_first_existing([ + new_url, + fallback_url, + 'https://www.yoctoproject.org/docs/', + ]); + } + } + + function on_doctype_switch() { + var selected_doctype = $(this).children('option:selected').attr('value'); + var url = window.location.href; + if (selected_doctype == 'mega') { + var docroot = get_docroot_url() + var current_version = DOCUMENTATION_OPTIONS.VERSION; + // Assume manuals before 3.2 are using old docbook mega-manual + if (ver_compare(current_version, "3.2") < 0) { + var new_url = docroot + "mega-manual/mega-manual.html"; + } else { + var new_url = docroot + "singleindex.html"; + } + } else { + var new_url = url.replace("singleindex.html", "index.html") + } + + if (new_url != url) { + navigate_to_first_existing([ + new_url, + 'https://www.yoctoproject.org/docs/', + ]); + } + } + + // Returns the current doctype based upon the url + function doctype_segment_from_url(url) { + if (url.includes("singleindex") || url.includes("mega-manual")) + return "mega"; + return "single"; + } + + $(document).ready(function() { + var release = DOCUMENTATION_OPTIONS.VERSION; + var current_doctype = doctype_segment_from_url(window.location.href); + var current_series = release.substr(0, 3); + var version_select = build_version_select(current_series, release); + + $('.version_switcher_placeholder').html(version_select); + $('.version_switcher_placeholder select').bind('change', on_version_switch); + + var doctype_select = build_doctype_select(current_doctype); + + $('.doctype_switcher_placeholder').html(doctype_select); + $('.doctype_switcher_placeholder select').bind('change', on_doctype_switch); + + if (ver_compare(release, "3.1") < 0) { + $('#outdated-warning').html('Version ' + release + ' of the project is now considered obsolete, please select and use a more recent version'); + $('#outdated-warning').css('padding', '.5em'); + } else if (release != "dev") { + $.each(all_versions, function(version, title) { + var series = version.substr(0, 3); + if (series == current_series && version != release) { + $('#outdated-warning').html('This document is for outdated version ' + release + ', you should select the latest release version in this series, ' + version + '.'); + $('#outdated-warning').css('padding', '.5em'); + } + }); + } + }); +})(); diff --git a/poky/bitbake/doc/sphinx-static/theme_overrides.css b/poky/bitbake/doc/sphinx-static/theme_overrides.css new file mode 100644 index 000000000..4fd76a152 --- /dev/null +++ b/poky/bitbake/doc/sphinx-static/theme_overrides.css @@ -0,0 +1,164 @@ +/* + SPDX-License-Identifier: CC-BY-2.0-UK +*/ + +body { + font-family: Verdana, Sans, sans-serif; + + min-width: 640px; + margin: 0em auto; + color: #333; +} + +h1,h2,h3,h4,h5,h6,h7 { + font-family: Arial, Sans; + color: #00557D; + clear: both; +} + +h1 { + font-size: 2em; + text-align: left; + padding: 0em 0em 0em 0em; + margin: 2em 0em 0em 0em; +} + +h2.subtitle { + margin: 0.10em 0em 3.0em 0em; + padding: 0em 0em 0em 0em; + font-size: 1.8em; + padding-left: 20%; + font-weight: normal; + font-style: italic; +} + +h2 { + margin: 2em 0em 0.66em 0em; + padding: 0.5em 0em 0em 0em; + font-size: 1.5em; + font-weight: bold; +} + +h3.subtitle { + margin: 0em 0em 1em 0em; + padding: 0em 0em 0em 0em; + font-size: 142.14%; + text-align: right; +} + +h3 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 140%; + font-weight: bold; +} + +h4 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 120%; + font-weight: bold; +} + +h5 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 110%; + font-weight: bold; +} + +h6 { + margin: 1em 0em 0em 0em; + padding: 1em 0em 0em 0em; + font-size: 110%; + font-weight: bold; +} + +em { + font-weight: bold; +} + +.pre { + font-size: medium; + font-family: Courier, monospace; +} + +.wy-nav-content a { + text-decoration: underline; + color: #444; + background: transparent; +} + +.wy-nav-content a:hover { + text-decoration: underline; + background-color: #dedede; +} + +.wy-nav-content a:visited { + color: #444; +} + +[alt='Permalink'] { color: #eee; } +[alt='Permalink']:hover { color: black; } + +@media screen { + /* content column + * + * RTD theme's default is 800px as max width for the content, but we have + * tables with tons of columns, which need the full width of the view-port. + */ + + .wy-nav-content{max-width: none; } + + /* inline literal: drop the borderbox, padding and red color */ + code, .rst-content tt, .rst-content code { + color: inherit; + border: none; + padding: unset; + background: inherit; + font-size: 85%; + } + + .rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal { + color: inherit; + } + + /* Admonition should be gray, not blue or green */ + .rst-content .note .admonition-title, + .rst-content .tip .admonition-title, + .rst-content .warning .admonition-title, + .rst-content .caution .admonition-title, + .rst-content .important .admonition-title { + background: #f0f0f2; + color: #00557D; + + } + + .rst-content .note, + .rst-content .tip, + .rst-content .important, + .rst-content .warning, + .rst-content .caution { + background: #f0f0f2; + } + + /* Remove the icon in front of note/tip element, and before the logo */ + .icon-home:before, .rst-content .admonition-title:before { + display: none + } + + /* a custom informalexample container is used in some doc */ + .informalexample { + border: 1px solid; + border-color: #aaa; + margin: 1em 0em; + padding: 1em; + page-break-inside: avoid; + } + + /* Remove the blue background in the top left corner, around the logo */ + .wy-side-nav-search { + background: inherit; + } + +} diff --git a/poky/bitbake/lib/bb/COW.py b/poky/bitbake/lib/bb/COW.py index bc20ce38e..23c22b65e 100644 --- a/poky/bitbake/lib/bb/COW.py +++ b/poky/bitbake/lib/bb/COW.py @@ -3,13 +3,14 @@ # # Copyright (C) 2006 Tim Ansell # -#Please Note: +# Please Note: # Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. # Assign a file to __warn__ to get warnings about slow operations. # import copy + ImmutableTypes = ( bool, complex, @@ -22,9 +23,11 @@ ImmutableTypes = ( MUTABLE = "__mutable__" + class COWMeta(type): pass + class COWDictMeta(COWMeta): __warn__ = False __hasmutable__ = False @@ -33,12 +36,15 @@ class COWDictMeta(COWMeta): def __str__(cls): # FIXME: I have magic numbers! return "" % (cls.__count__, len(cls.__dict__) - 3) + __repr__ = __str__ def cow(cls): class C(cls): __count__ = cls.__count__ + 1 + return C + copy = cow __call__ = cow @@ -70,8 +76,9 @@ class COWDictMeta(COWMeta): return value __getmarker__ = [] + def __getreadonly__(cls, key, default=__getmarker__): - """\ + """ Get a value (even if mutable) which you promise not to change. """ return cls.__getitem__(key, default, True) @@ -138,24 +145,29 @@ class COWDictMeta(COWMeta): def iterkeys(cls): return cls.iter("keys") + def itervalues(cls, readonly=False): if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: - print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + print("Warning: If you aren't going to change any of the values call with True.", file=cls.__warn__) return cls.iter("values", readonly) + def iteritems(cls, readonly=False): if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: - print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + print("Warning: If you aren't going to change any of the values call with True.", file=cls.__warn__) return cls.iter("items", readonly) + class COWSetMeta(COWDictMeta): def __str__(cls): # FIXME: I have magic numbers! - return "" % (cls.__count__, len(cls.__dict__) -3) + return "" % (cls.__count__, len(cls.__dict__) - 3) + __repr__ = __str__ def cow(cls): class C(cls): __count__ = cls.__count__ + 1 + return C def add(cls, value): @@ -173,131 +185,11 @@ class COWSetMeta(COWDictMeta): def iteritems(cls): raise TypeError("sets don't have 'items'") + # These are the actual classes you use! -class COWDictBase(object, metaclass = COWDictMeta): +class COWDictBase(metaclass=COWDictMeta): __count__ = 0 -class COWSetBase(object, metaclass = COWSetMeta): - __count__ = 0 -if __name__ == "__main__": - import sys - COWDictBase.__warn__ = sys.stderr - a = COWDictBase() - print("a", a) - - a['a'] = 'a' - a['b'] = 'b' - a['dict'] = {} - - b = a.copy() - print("b", b) - b['c'] = 'b' - - print() - - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(): - print(x) - print() - - b['dict']['a'] = 'b' - b['a'] = 'c' - - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(): - print(x) - print() - - try: - b['dict2'] - except KeyError as e: - print("Okay!") - - a['set'] = COWSetBase() - a['set'].add("o1") - a['set'].add("o1") - a['set'].add("o2") - - print("a", a) - for x in a['set'].itervalues(): - print(x) - print("--") - print("b", b) - for x in b['set'].itervalues(): - print(x) - print() - - b['set'].add('o3') - - print("a", a) - for x in a['set'].itervalues(): - print(x) - print("--") - print("b", b) - for x in b['set'].itervalues(): - print(x) - print() - - a['set2'] = set() - a['set2'].add("o1") - a['set2'].add("o1") - a['set2'].add("o2") - - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(readonly=True): - print(x) - print() - - del b['b'] - try: - print(b['b']) - except KeyError: - print("Yay! deleted key raises error") - - if 'b' in b: - print("Boo!") - else: - print("Yay - has_key with delete works!") - - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(readonly=True): - print(x) - print() - - b.__revertitem__('b') - - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(readonly=True): - print(x) - print() - - b.__revertitem__('dict') - print("a", a) - for x in a.iteritems(): - print(x) - print("--") - print("b", b) - for x in b.iteritems(readonly=True): - print(x) - print() +class COWSetBase(metaclass=COWSetMeta): + __count__ = 0 diff --git a/poky/bitbake/lib/bb/__init__.py b/poky/bitbake/lib/bb/__init__.py index 2c94e10c8..888dd5ccc 100644 --- a/poky/bitbake/lib/bb/__init__.py +++ b/poky/bitbake/lib/bb/__init__.py @@ -93,7 +93,7 @@ class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin): def __repr__(self): logger = self.logger - level = getLevelName(logger.getEffectiveLevel()) + level = logger.getLevelName(logger.getEffectiveLevel()) return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level) logging.LoggerAdapter = BBLoggerAdapter diff --git a/poky/bitbake/lib/bb/build.py b/poky/bitbake/lib/bb/build.py index 94f9cb371..974d2ff06 100644 --- a/poky/bitbake/lib/bb/build.py +++ b/poky/bitbake/lib/bb/build.py @@ -29,6 +29,9 @@ from bb import data, event, utils bblogger = logging.getLogger('BitBake') logger = logging.getLogger('BitBake.Build') +verboseShellLogging = False +verboseStdoutLogging = False + __mtime_cache = {} def cached_mtime_noerror(f): @@ -413,7 +416,7 @@ def exec_func_shell(func, d, runfile, cwd=None): bb.data.emit_func(func, script, d) - if bb.msg.loggerVerboseLogs: + if verboseShellLogging or bb.utils.to_boolean(d.getVar("BB_VERBOSE_LOGS", False)): script.write("set -x\n") if cwd: script.write("cd '%s'\n" % cwd) @@ -433,7 +436,7 @@ exit $ret if fakerootcmd: cmd = [fakerootcmd, runfile] - if bb.msg.loggerDefaultVerbose: + if verboseStdoutLogging: logfile = LogTee(logger, StdoutNoopContextManager()) else: logfile = StdoutNoopContextManager() diff --git a/poky/bitbake/lib/bb/cache.py b/poky/bitbake/lib/bb/cache.py index b819a0c2d..9e0c931a0 100644 --- a/poky/bitbake/lib/bb/cache.py +++ b/poky/bitbake/lib/bb/cache.py @@ -636,7 +636,7 @@ class Cache(NoCache): # Have to be careful about spaces and colons in filenames flist = self.filelist_regex.split(fl) for f in flist: - if not f or "*" in f: + if not f: continue f, exist = f.split(":") if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)): diff --git a/poky/bitbake/lib/bb/command.py b/poky/bitbake/lib/bb/command.py index 4d152ff4c..f8c6a03bb 100644 --- a/poky/bitbake/lib/bb/command.py +++ b/poky/bitbake/lib/bb/command.py @@ -54,13 +54,20 @@ class Command: self.cooker = cooker self.cmds_sync = CommandsSync() self.cmds_async = CommandsAsync() - self.remotedatastores = bb.remotedata.RemoteDatastores(cooker) + self.remotedatastores = None # FIXME Add lock for this self.currentAsyncCommand = None def runCommand(self, commandline, ro_only = False): command = commandline.pop(0) + + # Ensure cooker is ready for commands + if command != "updateConfig" and command != "setFeatures": + self.cooker.init_configdata() + if not self.remotedatastores: + self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) + if hasattr(CommandsSync, command): # Can run synchronous commands straight away command_method = getattr(self.cmds_sync, command) @@ -136,7 +143,8 @@ class Command: self.cooker.finishcommand() def reset(self): - self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) + if self.remotedatastores: + self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker) class CommandsSync: """ diff --git a/poky/bitbake/lib/bb/compat.py b/poky/bitbake/lib/bb/compat.py deleted file mode 100644 index 49356681a..000000000 --- a/poky/bitbake/lib/bb/compat.py +++ /dev/null @@ -1,10 +0,0 @@ -# -# SPDX-License-Identifier: GPL-2.0-only -# - -"""Code pulled from future python versions, here for compatibility""" - -from collections import MutableMapping, KeysView, ValuesView, ItemsView, OrderedDict -from functools import total_ordering - - diff --git a/poky/bitbake/lib/bb/cooker.py b/poky/bitbake/lib/bb/cooker.py index 912360546..5442f7d22 100644 --- a/poky/bitbake/lib/bb/cooker.py +++ b/poky/bitbake/lib/bb/cooker.py @@ -148,15 +148,16 @@ class BBCooker: Manages one bitbake build run """ - def __init__(self, configuration, featureSet=None, idleCallBackRegister=None): + def __init__(self, featureSet=None, idleCallBackRegister=None): self.recipecaches = None + self.eventlog = None self.skiplist = {} self.featureset = CookerFeatures() if featureSet: for f in featureSet: self.featureset.setFeature(f) - self.configuration = configuration + self.configuration = bb.cookerdata.CookerConfiguration() self.idleCallBackRegister = idleCallBackRegister @@ -194,18 +195,6 @@ class BBCooker: self.hashserv = None self.hashservaddr = None - self.initConfigurationData() - - bb.debug(1, "BBCooker parsed base configuration %s" % time.time()) - sys.stdout.flush() - - # we log all events to a file if so directed - if self.configuration.writeeventlog: - # register the log file writer as UI Handler - writer = EventWriter(self, self.configuration.writeeventlog) - EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) - bb.event.register_UIHhandler(EventLogWriteHandler(writer)) - self.inotify_modified_files = [] def _process_inotify_updates(server, cooker, abort): @@ -239,6 +228,13 @@ class BBCooker: bb.debug(1, "BBCooker startup complete %s" % time.time()) sys.stdout.flush() + def init_configdata(self): + if not hasattr(self, "data"): + self.initConfigurationData() + bb.debug(1, "BBCooker parsed base configuration %s" % time.time()) + sys.stdout.flush() + self.handlePRServ() + def process_inotify_updates(self): for n in [self.confignotifier, self.notifier]: if n.check_events(timeout=0): @@ -324,7 +320,7 @@ class BBCooker: for feature in features: self.featureset.setFeature(feature) bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) - if (original_featureset != list(self.featureset)) and self.state != state.error: + if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"): self.reset() def initConfigurationData(self): @@ -356,7 +352,7 @@ class BBCooker: self.caches_array.append(getattr(module, cache_name)) except ImportError as exc: logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc)) - sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name) + raise bb.BBHandledException() self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) self.databuilder.parseBaseConfiguration() @@ -413,11 +409,6 @@ class BBCooker: self.data.disableTracking() def parseConfiguration(self): - # Set log file verbosity - verboselogs = bb.utils.to_boolean(self.data.getVar("BB_VERBOSE_LOGS", False)) - if verboselogs: - bb.msg.loggerVerboseLogs = True - # Change nice level if we're asked to nice = self.data.getVar("BB_NICE_LEVEL") if nice: @@ -451,7 +442,28 @@ class BBCooker: logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o])) clean = False - setattr(self.configuration, o, options[o]) + if hasattr(self.configuration, o): + setattr(self.configuration, o, options[o]) + + if self.configuration.writeeventlog: + if self.eventlog and self.eventlog[0] != self.configuration.writeeventlog: + bb.event.unregister_UIHhandler(self.eventlog[1]) + if not self.eventlog or self.eventlog[0] != self.configuration.writeeventlog: + # we log all events to a file if so directed + # register the log file writer as UI Handler + writer = EventWriter(self, self.configuration.writeeventlog) + EventLogWriteHandler = namedtuple('EventLogWriteHandler', ['event']) + self.eventlog = (self.configuration.writeeventlog, bb.event.register_UIHhandler(EventLogWriteHandler(writer))) + + bb.msg.loggerDefaultLogLevel = self.configuration.default_loglevel + bb.msg.loggerDefaultDomains = self.configuration.debug_domains + + if hasattr(self, "data"): + origenv = bb.data.init() + for k in environment: + origenv.setVar(k, environment[k]) + self.data.setVar("BB_ORIGENV", origenv) + for k in bb.utils.approved_variables(): if k in environment and k not in self.configuration.env: logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k])) @@ -467,6 +479,10 @@ class BBCooker: logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k])) self.configuration.env[k] = environment[k] clean = False + + # Now update all the variables not in the datastore to match + self.configuration.env = environment + if not clean: logger.debug(1, "Base environment change, triggering reparse") self.reset() @@ -1111,7 +1127,7 @@ class BBCooker: from bb import shell except ImportError: parselog.exception("Interactive mode not available") - sys.exit(1) + raise bb.BBHandledException() else: shell.start( self ) @@ -1547,6 +1563,7 @@ class BBCooker: if self.state in (state.shutdown, state.forceshutdown, state.error): if hasattr(self.parser, 'shutdown'): self.parser.shutdown(clean=False, force = True) + self.parser.final_cleanup() raise bb.BBHandledException() if self.state != state.parsing: @@ -1654,12 +1671,10 @@ class BBCooker: return pkgs_to_build def pre_serve(self): - # We now are in our own process so we can call this here. - # PRServ exits if its parent process exits - self.handlePRServ() return def post_serve(self): + self.shutdown(force=True) prserv.serv.auto_shutdown() if self.hashserv: self.hashserv.process.terminate() @@ -1674,6 +1689,7 @@ class BBCooker: if self.parser: self.parser.shutdown(clean=not force, force=force) + self.parser.final_cleanup() def finishcommand(self): self.state = state.initial @@ -1687,8 +1703,9 @@ class BBCooker: self.finishcommand() self.extraconfigdata = {} self.command.reset() - self.databuilder.reset() - self.data = self.databuilder.data + if hasattr(self, "data"): + self.databuilder.reset() + self.data = self.databuilder.data self.parsecache_valid = False self.baseconfig_valid = False @@ -1745,10 +1762,10 @@ class CookerCollectFiles(object): collectlog.debug(1, "collecting .bb files") files = (config.getVar( "BBFILES") or "").split() - config.setVar("BBFILES", " ".join(files)) # Sort files by priority files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem)[0] ) + config.setVar("BBFILES_PRIORITIZED", " ".join(files)) if not len(files): files = self.get_bbfiles() @@ -1977,7 +1994,8 @@ class Parser(multiprocessing.Process): except queue.Empty: pass else: - self.results.cancel_join_thread() + self.results.close() + self.results.join_thread() break if pending: @@ -1986,6 +2004,8 @@ class Parser(multiprocessing.Process): try: job = self.jobs.pop() except IndexError: + self.results.close() + self.results.join_thread() break result = self.parse(*job) # Clear the siggen cache after parsing to control memory usage, its huge @@ -2063,6 +2083,7 @@ class CookerParser(object): self.start() self.haveshutdown = False + self.syncthread = None def start(self): self.results = self.load_cached() @@ -2070,6 +2091,9 @@ class CookerParser(object): if self.toparse: bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) def init(): + signal.signal(signal.SIGTERM, signal.SIG_DFL) + signal.signal(signal.SIGHUP, signal.SIG_DFL) + signal.signal(signal.SIGINT, signal.SIG_IGN) bb.utils.set_process_name(multiprocessing.current_process().name) multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, exitpriority=1) multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, exitpriority=1) @@ -2103,12 +2127,9 @@ class CookerParser(object): self.total) bb.event.fire(event, self.cfgdata) - for process in self.processes: - self.parser_quit.put(None) - else: - self.parser_quit.cancel_join_thread() - for process in self.processes: - self.parser_quit.put(None) + + for process in self.processes: + self.parser_quit.put(None) # Cleanup the queue before call process.join(), otherwise there might be # deadlocks. @@ -2125,13 +2146,17 @@ class CookerParser(object): else: process.join() + self.parser_quit.close() + # Allow data left in the cancel queue to be discarded + self.parser_quit.cancel_join_thread() + def sync_caches(): for c in self.bb_caches.values(): c.sync() - sync = threading.Thread(target=sync_caches) + sync = threading.Thread(target=sync_caches, name="SyncThread") + self.syncthread = sync sync.start() - multiprocessing.util.Finalize(None, sync.join, exitpriority=-100) bb.codeparser.parser_cache_savemerge() bb.fetch.fetcher_parse_done() if self.cooker.configuration.profile: @@ -2145,6 +2170,10 @@ class CookerParser(object): bb.utils.process_profilelog(profiles, pout = pout) print("Processed parsing statistics saved to %s" % (pout)) + def final_cleanup(self): + if self.syncthread: + self.syncthread.join() + def load_cached(self): for mc, cache, filename, appends in self.fromcache: cached, infos = cache.load(filename, appends) diff --git a/poky/bitbake/lib/bb/cookerdata.py b/poky/bitbake/lib/bb/cookerdata.py index b86e7d446..91cc4347f 100644 --- a/poky/bitbake/lib/bb/cookerdata.py +++ b/poky/bitbake/lib/bb/cookerdata.py @@ -58,11 +58,18 @@ class ConfigParameters(object): def updateToServer(self, server, environment): options = {} for o in ["abort", "force", "invalidate_stamp", - "verbose", "debug", "dry_run", "dump_signatures", - "debug_domains", "extra_assume_provided", "profile", - "prefile", "postfile", "server_timeout"]: + "dry_run", "dump_signatures", + "extra_assume_provided", "profile", + "prefile", "postfile", "server_timeout", + "nosetscene", "setsceneonly", "skipsetscene", + "runall", "runonly", "writeeventlog"]: options[o] = getattr(self.options, o) + options['build_verbose_shell'] = self.options.verbose + options['build_verbose_stdout'] = self.options.verbose + options['default_loglevel'] = bb.msg.loggerDefaultLogLevel + options['debug_domains'] = bb.msg.loggerDefaultDomains + ret, error = server.runCommand(["updateConfig", options, environment, sys.argv]) if error: raise Exception("Unable to update the server configuration with local parameters: %s" % error) @@ -111,11 +118,11 @@ class CookerConfiguration(object): """ def __init__(self): - self.debug_domains = [] + self.debug_domains = bb.msg.loggerDefaultDomains + self.default_loglevel = bb.msg.loggerDefaultLogLevel self.extra_assume_provided = [] self.prefile = [] self.postfile = [] - self.debug = 0 self.cmd = None self.abort = True self.force = False @@ -125,24 +132,17 @@ class CookerConfiguration(object): self.skipsetscene = False self.invalidate_stamp = False self.dump_signatures = [] + self.build_verbose_shell = False + self.build_verbose_stdout = False self.dry_run = False self.tracking = False - self.xmlrpcinterface = [] - self.server_timeout = None self.writeeventlog = False - self.server_only = False self.limited_deps = False self.runall = [] self.runonly = [] self.env = {} - def setConfigParameters(self, parameters): - for key in self.__dict__.keys(): - if key in parameters.options.__dict__: - setattr(self, key, parameters.options.__dict__[key]) - self.env = parameters.environment.copy() - def __getstate__(self): state = {} for key in self.__dict__.keys(): @@ -164,7 +164,7 @@ def catch_parse_error(func): import traceback parselog.critical(traceback.format_exc()) parselog.critical("Unable to parse %s: %s" % (fn, exc)) - sys.exit(1) + raise bb.BBHandledException() except bb.data_smart.ExpansionError as exc: import traceback @@ -176,10 +176,10 @@ def catch_parse_error(func): if not fn.startswith(bbdir): break parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb)) - sys.exit(1) + raise bb.BBHandledException() except bb.parse.ParseError as exc: parselog.critical(str(exc)) - sys.exit(1) + raise bb.BBHandledException() return wrapped @catch_parse_error @@ -300,13 +300,13 @@ class CookerDataBuilder(object): self.data_hash = data_hash.hexdigest() except (SyntaxError, bb.BBHandledException): - raise bb.BBHandledException + raise bb.BBHandledException() except bb.data_smart.ExpansionError as e: logger.error(str(e)) - raise bb.BBHandledException + raise bb.BBHandledException() except Exception: logger.exception("Error parsing configuration files") - raise bb.BBHandledException + raise bb.BBHandledException() # Create a copy so we can reset at a later date when UIs disconnect self.origdata = self.data @@ -355,7 +355,7 @@ class CookerDataBuilder(object): for layer in broken_layers: parselog.critical(" %s", layer) parselog.critical("Please check BBLAYERS in %s" % (layerconf)) - sys.exit(1) + raise bb.BBHandledException() for layer in layers: parselog.debug(2, "Adding layer %s", layer) @@ -427,7 +427,7 @@ class CookerDataBuilder(object): handlerfn = data.getVarFlag(var, "filename", False) if not handlerfn: parselog.critical("Undefined event handler function '%s'" % var) - sys.exit(1) + raise bb.BBHandledException() handlerln = int(data.getVarFlag(var, "lineno", False)) bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln) diff --git a/poky/bitbake/lib/bb/daemonize.py b/poky/bitbake/lib/bb/daemonize.py index f01e6ec7c..c187fcfc6 100644 --- a/poky/bitbake/lib/bb/daemonize.py +++ b/poky/bitbake/lib/bb/daemonize.py @@ -14,6 +14,8 @@ import sys import io import traceback +import bb + def createDaemon(function, logfile): """ Detach a process from the controlling terminal and run it in the diff --git a/poky/bitbake/lib/bb/data_smart.py b/poky/bitbake/lib/bb/data_smart.py index 7f1b6dcb4..c559102cf 100644 --- a/poky/bitbake/lib/bb/data_smart.py +++ b/poky/bitbake/lib/bb/data_smart.py @@ -189,7 +189,7 @@ class IncludeHistory(object): if self.current.parent: self.current = self.current.parent else: - bb.warn("Include log: Tried to finish '%s' at top level." % filename) + bb.warn("Include log: Tried to finish '%s' at top level." % self.filename) return False def emit(self, o, level = 0): diff --git a/poky/bitbake/lib/bb/event.py b/poky/bitbake/lib/bb/event.py index 0e6d9b296..694b47052 100644 --- a/poky/bitbake/lib/bb/event.py +++ b/poky/bitbake/lib/bb/event.py @@ -10,17 +10,17 @@ BitBake build tools. # SPDX-License-Identifier: GPL-2.0-only # -import sys -import pickle -import logging -import atexit -import traceback import ast +import atexit +import collections +import logging +import pickle +import sys import threading +import traceback -import bb.utils -import bb.compat import bb.exceptions +import bb.utils # This is the pid for which we should generate the event. This is set when # the runqueue forks off. @@ -56,7 +56,7 @@ def set_class_handlers(h): _handlers = h def clean_class_handlers(): - return bb.compat.OrderedDict() + return collections.OrderedDict() # Internal _handlers = clean_class_handlers() diff --git a/poky/bitbake/lib/bb/fetch2/__init__.py b/poky/bitbake/lib/bb/fetch2/__init__.py index 756f60212..7ec1fea5d 100644 --- a/poky/bitbake/lib/bb/fetch2/__init__.py +++ b/poky/bitbake/lib/bb/fetch2/__init__.py @@ -1195,8 +1195,6 @@ def get_checksum_file_list(d): paths = ud.method.localpaths(ud, d) for f in paths: pth = ud.decodedurl - if '*' in pth: - f = os.path.join(os.path.abspath(f), pth) if f.startswith(dl_dir): # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else if os.path.exists(f): @@ -1365,9 +1363,6 @@ class FetchMethod(object): # We cannot compute checksums for directories if os.path.isdir(urldata.localpath): return False - if urldata.localpath.find("*") != -1: - return False - return True def recommends_checksum(self, urldata): @@ -1430,11 +1425,6 @@ class FetchMethod(object): iterate = False file = urldata.localpath - # Localpath can't deal with 'dir/*' entries, so it converts them to '.', - # but it must be corrected back for local files copying - if urldata.basename == '*' and file.endswith('/.'): - file = '%s/%s' % (file.rstrip('/.'), urldata.path) - try: unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) except ValueError as exc: @@ -1613,8 +1603,6 @@ class FetchMethod(object): """ if os.path.exists(ud.localpath): return True - if ud.localpath.find("*") != -1: - return True return False def implicit_urldata(self, ud, d): diff --git a/poky/bitbake/lib/bb/fetch2/local.py b/poky/bitbake/lib/bb/fetch2/local.py index 01d9ff9f8..25d4557db 100644 --- a/poky/bitbake/lib/bb/fetch2/local.py +++ b/poky/bitbake/lib/bb/fetch2/local.py @@ -17,7 +17,7 @@ import os import urllib.request, urllib.parse, urllib.error import bb import bb.utils -from bb.fetch2 import FetchMethod, FetchError +from bb.fetch2 import FetchMethod, FetchError, ParameterError from bb.fetch2 import logger class Local(FetchMethod): @@ -33,6 +33,8 @@ class Local(FetchMethod): ud.basename = os.path.basename(ud.decodedurl) ud.basepath = ud.decodedurl ud.needdonestamp = False + if "*" in ud.decodedurl: + raise bb.fetch2.ParameterError("file:// urls using globbing are no longer supported. Please place the files in a directory and reference that instead.", ud.url) return def localpath(self, urldata, d): @@ -55,12 +57,6 @@ class Local(FetchMethod): logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":")))) newpath, hist = bb.utils.which(filespath, path, history=True) searched.extend(hist) - if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1: - # For expressions using '*', best we can do is take the first directory in FILESPATH that exists - newpath, hist = bb.utils.which(filespath, ".", history=True) - searched.extend(hist) - logger.debug(2, "Searching for %s in path: %s" % (path, newpath)) - return searched if not os.path.exists(newpath): dldirfile = os.path.join(d.getVar("DL_DIR"), path) logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path)) @@ -70,8 +66,6 @@ class Local(FetchMethod): return searched def need_update(self, ud, d): - if ud.url.find("*") != -1: - return False if os.path.exists(ud.localpath): return False return True @@ -95,9 +89,6 @@ class Local(FetchMethod): """ Check the status of the url """ - if urldata.localpath.find("*") != -1: - logger.info("URL %s looks like a glob and was therefore not checked.", urldata.url) - return True if os.path.exists(urldata.localpath): return True return False diff --git a/poky/bitbake/lib/bb/fetch2/osc.py b/poky/bitbake/lib/bb/fetch2/osc.py index 8f091efd0..3a6cd2951 100644 --- a/poky/bitbake/lib/bb/fetch2/osc.py +++ b/poky/bitbake/lib/bb/fetch2/osc.py @@ -8,12 +8,15 @@ Based on the svn "Fetch" implementation. """ import logging +import os import bb from bb.fetch2 import FetchMethod from bb.fetch2 import FetchError from bb.fetch2 import MissingParameterError from bb.fetch2 import runfetchcmd +logger = logging.getLogger(__name__) + class Osc(FetchMethod): """Class to fetch a module or modules from Opensuse build server repositories.""" diff --git a/poky/bitbake/lib/bb/fetch2/ssh.py b/poky/bitbake/lib/bb/fetch2/ssh.py index 5e982ecf3..2c8557e1f 100644 --- a/poky/bitbake/lib/bb/fetch2/ssh.py +++ b/poky/bitbake/lib/bb/fetch2/ssh.py @@ -31,8 +31,7 @@ IETF secsh internet draft: # import re, os -from bb.fetch2 import FetchMethod -from bb.fetch2 import runfetchcmd +from bb.fetch2 import check_network_access, FetchMethod, ParameterError, runfetchcmd __pattern__ = re.compile(r''' @@ -65,7 +64,7 @@ class SSH(FetchMethod): def urldata_init(self, urldata, d): if 'protocol' in urldata.parm and urldata.parm['protocol'] == 'git': - raise bb.fetch2.ParameterError( + raise ParameterError( "Invalid protocol - if you wish to fetch from a git " + "repository using ssh, you need to use " + "git:// prefix with protocol=ssh", urldata.url) @@ -105,7 +104,7 @@ class SSH(FetchMethod): dldir ) - bb.fetch2.check_network_access(d, cmd, urldata.url) + check_network_access(d, cmd, urldata.url) runfetchcmd(cmd, d) diff --git a/poky/bitbake/lib/bb/fetch2/wget.py b/poky/bitbake/lib/bb/fetch2/wget.py index f7d1de26b..e6d9f528d 100644 --- a/poky/bitbake/lib/bb/fetch2/wget.py +++ b/poky/bitbake/lib/bb/fetch2/wget.py @@ -208,10 +208,7 @@ class Wget(FetchMethod): fetch.connection_cache.remove_connection(h.host, h.port) raise urllib.error.URLError(err) else: - try: - r = h.getresponse(buffering=True) - except TypeError: # buffering kw not supported - r = h.getresponse() + r = h.getresponse() # Pick apart the HTTPResponse object to get the addinfourl # object initialized properly. diff --git a/poky/bitbake/lib/bb/main.py b/poky/bitbake/lib/bb/main.py index af2880f8d..7990195ea 100755 --- a/poky/bitbake/lib/bb/main.py +++ b/poky/bitbake/lib/bb/main.py @@ -344,8 +344,6 @@ def bitbake_main(configParams, configuration): except: pass - configuration.setConfigParameters(configParams) - if configParams.server_only and configParams.remote_server: raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" % ("the BBSERVER environment variable" if "BBSERVER" in os.environ \ @@ -357,13 +355,13 @@ def bitbake_main(configParams, configuration): if "BBDEBUG" in os.environ: level = int(os.environ["BBDEBUG"]) - if level > configuration.debug: - configuration.debug = level + if level > configParams.debug: + configParams.debug = level - bb.msg.init_msgconfig(configParams.verbose, configuration.debug, - configuration.debug_domains) + bb.msg.init_msgconfig(configParams.verbose, configParams.debug, + configParams.debug_domains) - server_connection, ui_module = setup_bitbake(configParams, configuration) + server_connection, ui_module = setup_bitbake(configParams) # No server connection if server_connection is None: if configParams.status_only: @@ -390,7 +388,7 @@ def bitbake_main(configParams, configuration): return 1 -def setup_bitbake(configParams, configuration, extrafeatures=None): +def setup_bitbake(configParams, extrafeatures=None): # Ensure logging messages get sent to the UI as events handler = bb.event.LogHandler() if not configParams.status_only: @@ -431,11 +429,11 @@ def setup_bitbake(configParams, configuration, extrafeatures=None): logger.info("bitbake server is not running.") lock.close() return None, None - # we start a server with a given configuration + # we start a server with a given featureset logger.info("Starting bitbake server...") # Clear the event queue since we already displayed messages bb.event.ui_queue = [] - server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset) + server = bb.server.process.BitBakeServer(lock, sockname, featureset, configParams.server_timeout, configParams.xmlrpcinterface) else: logger.info("Reconnecting to bitbake server...") diff --git a/poky/bitbake/lib/bb/msg.py b/poky/bitbake/lib/bb/msg.py index 2d88c4e72..6f17b6acc 100644 --- a/poky/bitbake/lib/bb/msg.py +++ b/poky/bitbake/lib/bb/msg.py @@ -14,6 +14,7 @@ import sys import copy import logging import logging.config +import os from itertools import groupby import bb import bb.event @@ -146,18 +147,12 @@ class LogFilterLTLevel(logging.Filter): # loggerDefaultLogLevel = BBLogFormatter.NOTE -loggerDefaultVerbose = False -loggerVerboseLogs = False loggerDefaultDomains = {} def init_msgconfig(verbose, debug, debug_domains=None): """ Set default verbosity and debug levels config the logger """ - bb.msg.loggerDefaultVerbose = verbose - if verbose: - bb.msg.loggerVerboseLogs = True - if debug: bb.msg.loggerDefaultLogLevel = BBLogFormatter.DEBUG - debug + 1 elif verbose: diff --git a/poky/bitbake/lib/bb/namedtuple_with_abc.py b/poky/bitbake/lib/bb/namedtuple_with_abc.py index 646aed6ff..e46dbf084 100644 --- a/poky/bitbake/lib/bb/namedtuple_with_abc.py +++ b/poky/bitbake/lib/bb/namedtuple_with_abc.py @@ -61,17 +61,9 @@ class _NamedTupleABCMeta(ABCMeta): return ABCMeta.__new__(mcls, name, bases, namespace) -exec( - # Python 2.x metaclass declaration syntax - """class _NamedTupleABC(object): - '''The abstract base class + mix-in for named tuples.''' - __metaclass__ = _NamedTupleABCMeta - _fields = abstractproperty()""" if version_info[0] < 3 else - # Python 3.x metaclass declaration syntax - """class _NamedTupleABC(metaclass=_NamedTupleABCMeta): - '''The abstract base class + mix-in for named tuples.''' - _fields = abstractproperty()""" -) +class _NamedTupleABC(metaclass=_NamedTupleABCMeta): + '''The abstract base class + mix-in for named tuples.''' + _fields = abstractproperty() _namedtuple.abc = _NamedTupleABC diff --git a/poky/bitbake/lib/bb/persist_data.py b/poky/bitbake/lib/bb/persist_data.py index 7357ab2d4..5f4fbe350 100644 --- a/poky/bitbake/lib/bb/persist_data.py +++ b/poky/bitbake/lib/bb/persist_data.py @@ -12,14 +12,14 @@ currently, providing a key/value store accessed by 'domain'. # import collections +import contextlib +import functools import logging import os.path +import sqlite3 import sys import warnings -from bb.compat import total_ordering from collections import Mapping -import sqlite3 -import contextlib sqlversion = sqlite3.sqlite_version_info if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): @@ -28,7 +28,7 @@ if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): logger = logging.getLogger("BitBake.PersistData") -@total_ordering +@functools.total_ordering class SQLTable(collections.MutableMapping): class _Decorators(object): @staticmethod diff --git a/poky/bitbake/lib/bb/process.py b/poky/bitbake/lib/bb/process.py index f36c929d2..7c3995cce 100644 --- a/poky/bitbake/lib/bb/process.py +++ b/poky/bitbake/lib/bb/process.py @@ -7,6 +7,7 @@ import signal import subprocess import errno import select +import bb logger = logging.getLogger('BitBake.Process') diff --git a/poky/bitbake/lib/bb/runqueue.py b/poky/bitbake/lib/bb/runqueue.py index 02a261e30..28bdadb45 100644 --- a/poky/bitbake/lib/bb/runqueue.py +++ b/poky/bitbake/lib/bb/runqueue.py @@ -376,7 +376,7 @@ class RunQueueData: self.stampwhitelist = cfgData.getVar("BB_STAMP_WHITELIST") or "" self.multi_provider_whitelist = (cfgData.getVar("MULTI_PROVIDER_WHITELIST") or "").split() - self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData) + self.setscenewhitelist = get_setscene_enforce_whitelist(cfgData, targets) self.setscenewhitelist_checked = False self.setscene_enforce = (cfgData.getVar('BB_SETSCENE_ENFORCE') == "1") self.init_progress_reporter = bb.progress.DummyMultiStageProcessProgressReporter() @@ -1263,8 +1263,8 @@ class RunQueue: "fakerootnoenv" : self.rqdata.dataCaches[mc].fakerootnoenv, "sigdata" : bb.parse.siggen.get_taskdata(), "logdefaultlevel" : bb.msg.loggerDefaultLogLevel, - "logdefaultverbose" : bb.msg.loggerDefaultVerbose, - "logdefaultverboselogs" : bb.msg.loggerVerboseLogs, + "build_verbose_shell" : self.cooker.configuration.build_verbose_shell, + "build_verbose_stdout" : self.cooker.configuration.build_verbose_stdout, "logdefaultdomain" : bb.msg.loggerDefaultDomains, "prhost" : self.cooker.prhost, "buildname" : self.cfgData.getVar("BUILDNAME"), @@ -2999,16 +2999,15 @@ class runQueuePipe(): print("Warning, worker left partial message: %s" % self.queue) self.input.close() -def get_setscene_enforce_whitelist(d): +def get_setscene_enforce_whitelist(d, targets): if d.getVar('BB_SETSCENE_ENFORCE') != '1': return None whitelist = (d.getVar("BB_SETSCENE_ENFORCE_WHITELIST") or "").split() outlist = [] for item in whitelist[:]: if item.startswith('%:'): - for target in sys.argv[1:]: - if not target.startswith('-'): - outlist.append(target.split(':')[0] + ':' + item.split(':')[1]) + for (mc, target, task, fn) in targets: + outlist.append(target + ':' + item.split(':')[1]) else: outlist.append(item) return outlist diff --git a/poky/bitbake/lib/bb/server/process.py b/poky/bitbake/lib/bb/server/process.py index 65e1eab52..c7cb34f0c 100644 --- a/poky/bitbake/lib/bb/server/process.py +++ b/poky/bitbake/lib/bb/server/process.py @@ -25,6 +25,7 @@ import subprocess import errno import re import datetime +import pickle import bb.server.xmlrpcserver from bb import daemonize from multiprocessing import queues @@ -34,11 +35,15 @@ logger = logging.getLogger('BitBake') class ProcessTimeout(SystemExit): pass +def serverlog(msg): + print(str(os.getpid()) + " " + datetime.datetime.now().strftime('%H:%M:%S.%f') + " " + msg) + sys.stdout.flush() + class ProcessServer(): profile_filename = "profile.log" profile_processed_filename = "profile.log.processed" - def __init__(self, lock, sock, sockname, server_timeout, xmlrpcinterface): + def __init__(self, lock, lockname, sock, sockname, server_timeout, xmlrpcinterface): self.command_channel = False self.command_channel_reply = False self.quit = False @@ -54,10 +59,12 @@ class ProcessServer(): self._idlefuns = {} self.bitbake_lock = lock + self.bitbake_lock_name = lockname self.sock = sock self.sockname = sockname self.server_timeout = server_timeout + self.timeout = self.server_timeout self.xmlrpcinterface = xmlrpcinterface def register_idle_function(self, function, data): @@ -70,22 +77,7 @@ class ProcessServer(): if self.xmlrpcinterface[0]: self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self) - print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port)) - - heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT') - if heartbeat_event: - try: - self.heartbeat_seconds = float(heartbeat_event) - except: - bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event) - - self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT') - try: - if self.timeout: - self.timeout = float(self.timeout) - except: - bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout) - + serverlog("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port)) try: self.bitbake_lock.seek(0) @@ -96,7 +88,7 @@ class ProcessServer(): self.bitbake_lock.write("%s\n" % (os.getpid())) self.bitbake_lock.flush() except Exception as e: - print("Error writing to lock file: %s" % str(e)) + serverlog("Error writing to lock file: %s" % str(e)) pass if self.cooker.configuration.profile: @@ -110,7 +102,7 @@ class ProcessServer(): prof.dump_stats("profile.log") bb.utils.process_profilelog("profile.log") - print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed") + serverlog("Raw profiling information saved to profile.log and processed statistics to profile.log.processed") else: ret = self.main() @@ -129,10 +121,11 @@ class ProcessServer(): fds = [self.sock] if self.xmlrpc: fds.append(self.xmlrpc) - print("Entering server connection loop") + seendata = False + serverlog("Entering server connection loop") def disconnect_client(self, fds): - print("Disconnecting Client") + serverlog("Disconnecting Client") if self.controllersock: fds.remove(self.controllersock) self.controllersock.close() @@ -150,12 +143,12 @@ class ProcessServer(): self.haveui = False ready = select.select(fds,[],[],0)[0] if newconnections: - print("Starting new client") + serverlog("Starting new client") conn = newconnections.pop(-1) fds.append(conn) self.controllersock = conn elif self.timeout is None and not ready: - print("No timeout, exiting.") + serverlog("No timeout, exiting.") self.quit = True self.lastui = time.time() @@ -164,17 +157,17 @@ class ProcessServer(): while select.select([self.sock],[],[],0)[0]: controllersock, address = self.sock.accept() if self.controllersock: - print("Queuing %s (%s)" % (str(ready), str(newconnections))) + serverlog("Queuing %s (%s)" % (str(ready), str(newconnections))) newconnections.append(controllersock) else: - print("Accepting %s (%s)" % (str(ready), str(newconnections))) + serverlog("Accepting %s (%s)" % (str(ready), str(newconnections))) self.controllersock = controllersock fds.append(controllersock) if self.controllersock in ready: try: - print("Processing Client") + serverlog("Processing Client") ui_fds = recvfds(self.controllersock, 3) - print("Connecting Client") + serverlog("Connecting Client") # Where to write events to writer = ConnectionWriter(ui_fds[0]) @@ -198,14 +191,14 @@ class ProcessServer(): if not self.timeout == -1.0 and not self.haveui and self.timeout and \ (self.lastui + self.timeout) < time.time(): - print("Server timeout, exiting.") + serverlog("Server timeout, exiting.") self.quit = True # If we don't see a UI connection within maxuiwait, its unlikely we're going to see # one. We have had issue with processes hanging indefinitely so timing out UI-less # servers is useful. if not self.hadanyui and not self.xmlrpc and not self.timeout and (self.lastui + self.maxuiwait) < time.time(): - print("No UI connection within max timeout, exiting to avoid infinite loop.") + serverlog("No UI connection within max timeout, exiting to avoid infinite loop.") self.quit = True if self.command_channel in ready: @@ -220,17 +213,37 @@ class ProcessServer(): self.quit = True continue try: - print("Running command %s" % command) + serverlog("Running command %s" % command) self.command_channel_reply.send(self.cooker.command.runCommand(command)) + serverlog("Command Completed") except Exception as e: logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e))) if self.xmlrpc in ready: self.xmlrpc.handle_requests() + if not seendata and hasattr(self.cooker, "data"): + heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT') + if heartbeat_event: + try: + self.heartbeat_seconds = float(heartbeat_event) + except: + bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event) + + self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT') + try: + if self.timeout: + self.timeout = float(self.timeout) + except: + bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout) + seendata = True + ready = self.idle_commands(.1, fds) - print("Exiting") + if len(threading.enumerate()) != 1: + serverlog("More than one thread left?: " + str(threading.enumerate())) + + serverlog("Exiting") # Remove the socket file so we don't get any more connections to avoid races try: os.unlink(self.sockname) @@ -253,39 +266,67 @@ class ProcessServer(): # Finally release the lockfile but warn about other processes holding it open lock = self.bitbake_lock - lockfile = lock.name + lockfile = self.bitbake_lock_name + + def get_lock_contents(lockfile): + try: + with open(lockfile, "r") as f: + return f.readlines() + except FileNotFoundError: + return None + + lockcontents = get_lock_contents(lockfile) + serverlog("Original lockfile contents: " + str(lockcontents)) + lock.close() lock = None while not lock: - with bb.utils.timeout(3): - lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True) - if lock: - # We hold the lock so we can remove the file (hide stale pid data) - # via unlockfile. - bb.utils.unlockfile(lock) - return - + i = 0 + lock = None + while not lock and i < 30: + lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=False) if not lock: - # Some systems may not have lsof available - procs = None + newlockcontents = get_lock_contents(lockfile) + if newlockcontents != lockcontents: + # A new server was started, the lockfile contents changed, we can exit + serverlog("Lockfile now contains different contents, exiting: " + str(newlockcontents)) + return + time.sleep(0.1) + i += 1 + if lock: + # We hold the lock so we can remove the file (hide stale pid data) + # via unlockfile. + bb.utils.unlockfile(lock) + serverlog("Exiting as we could obtain the lock") + return + + if not lock: + # Some systems may not have lsof available + procs = None + try: + procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + # File was deleted? + continue + except OSError as e: + if e.errno != errno.ENOENT: + raise + if procs is None: + # Fall back to fuser if lsof is unavailable try: - procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT) + procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + # File was deleted? + continue except OSError as e: if e.errno != errno.ENOENT: raise - if procs is None: - # Fall back to fuser if lsof is unavailable - try: - procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock" - if procs: - msg += ":\n%s" % str(procs) - print(msg) + + msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock" + if procs: + msg += ":\n%s" % str(procs.decode("utf-8")) + serverlog(msg) def idle_commands(self, delay, fds=None): nextsleep = delay @@ -323,8 +364,9 @@ class ProcessServer(): self.next_heartbeat += self.heartbeat_seconds if self.next_heartbeat <= now: self.next_heartbeat = now + self.heartbeat_seconds - heartbeat = bb.event.HeartbeatEvent(now) - bb.event.fire(heartbeat, self.cooker.data) + if hasattr(self.cooker, "data"): + heartbeat = bb.event.HeartbeatEvent(now) + bb.event.fire(heartbeat, self.cooker.data) if nextsleep and now + nextsleep > self.next_heartbeat: # Shorten timeout so that we we wake up in time for # the heartbeat. @@ -353,7 +395,12 @@ class ServerCommunicator(): logger.info("No reply from server in 30s") if not self.recv.poll(30): raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server (60s)") - return self.recv.get() + ret, exc = self.recv.get() + # Should probably turn all exceptions in exc back into exceptions? + # For now, at least handle BBHandledException + if exc and ("BBHandledException" in exc or "SystemExit" in exc): + raise bb.BBHandledException() + return ret, exc def updateFeatureSet(self, featureset): _, error = self.runCommand(["setFeatures", featureset]) @@ -386,39 +433,26 @@ class BitBakeProcessServerConnection(object): self.connection.recv.close() return +start_log_format = '--- Starting bitbake server pid %s at %s ---' +start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f' + class BitBakeServer(object): - start_log_format = '--- Starting bitbake server pid %s at %s ---' - start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f' - def __init__(self, lock, sockname, configuration, featureset): + def __init__(self, lock, sockname, featureset, server_timeout, xmlrpcinterface): - self.configuration = configuration + self.server_timeout = server_timeout + self.xmlrpcinterface = xmlrpcinterface self.featureset = featureset self.sockname = sockname self.bitbake_lock = lock self.readypipe, self.readypipein = os.pipe() - # Create server control socket - if os.path.exists(sockname): - os.unlink(sockname) - # Place the log in the builddirectory alongside the lock file logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log") + self.logfile = logfile - self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - # AF_UNIX has path length issues so chdir here to workaround - cwd = os.getcwd() - try: - os.chdir(os.path.dirname(sockname)) - self.sock.bind(os.path.basename(sockname)) - finally: - os.chdir(cwd) - self.sock.listen(1) - - os.set_inheritable(self.sock.fileno(), True) startdatetime = datetime.datetime.now() bb.daemonize.createDaemon(self._startServer, logfile) - self.sock.close() self.bitbake_lock.close() os.close(self.readypipein) @@ -437,7 +471,7 @@ class BitBakeServer(object): ready.close() bb.error("Unable to start bitbake server (%s)" % str(r)) if os.path.exists(logfile): - logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)')) + logstart_re = re.compile(start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)')) started = False lines = [] lastlines = [] @@ -447,9 +481,9 @@ class BitBakeServer(object): lines.append(line) else: lastlines.append(line) - res = logstart_re.match(line.rstrip()) + res = logstart_re.search(line.rstrip()) if res: - ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format) + ldatetime = datetime.datetime.strptime(res.group(2), start_log_datetime_format) if ldatetime >= startdatetime: started = True lines.append(line) @@ -470,28 +504,53 @@ class BitBakeServer(object): ready.close() def _startServer(self): - print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format))) - sys.stdout.flush() + os.close(self.readypipe) + os.set_inheritable(self.bitbake_lock.fileno(), True) + os.set_inheritable(self.readypipein, True) + serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server") + os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1])) - try: - server = ProcessServer(self.bitbake_lock, self.sock, self.sockname, self.configuration.server_timeout, self.configuration.xmlrpcinterface) - os.close(self.readypipe) - writer = ConnectionWriter(self.readypipein) - try: - self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset, server.register_idle_function) - except bb.BBHandledException: - return None - writer.send("r") - writer.close() - server.cooker = self.cooker - print("Started bitbake server pid %d" % os.getpid()) - sys.stdout.flush() +def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface): + + import bb.cookerdata + import bb.cooker + + serverlog(start_log_format % (os.getpid(), datetime.datetime.now().strftime(start_log_datetime_format))) + + try: + bitbake_lock = os.fdopen(lockfd, "w") - server.run() + # Create server control socket + if os.path.exists(sockname): + os.unlink(sockname) + + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + # AF_UNIX has path length issues so chdir here to workaround + cwd = os.getcwd() + try: + os.chdir(os.path.dirname(sockname)) + sock.bind(os.path.basename(sockname)) finally: - # Flush any ,essages/errors to the logfile before exit - sys.stdout.flush() - sys.stderr.flush() + os.chdir(cwd) + sock.listen(1) + + server = ProcessServer(bitbake_lock, lockname, sock, sockname, server_timeout, xmlrpcinterface) + writer = ConnectionWriter(readypipeinfd) + try: + featureset = [] + cooker = bb.cooker.BBCooker(featureset, server.register_idle_function) + except bb.BBHandledException: + return None + writer.send("r") + writer.close() + server.cooker = cooker + serverlog("Started bitbake server pid %d" % os.getpid()) + + server.run() + finally: + # Flush any ,essages/errors to the logfile before exit + sys.stdout.flush() + sys.stderr.flush() def connectProcessServer(sockname, featureset): # Connect to socket diff --git a/poky/bitbake/lib/bb/siggen.py b/poky/bitbake/lib/bb/siggen.py index 4c63b0baa..ad49d1e2a 100644 --- a/poky/bitbake/lib/bb/siggen.py +++ b/poky/bitbake/lib/bb/siggen.py @@ -752,7 +752,7 @@ def clean_basepath(a): _, mc, a = a.split(":", 2) b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2] if a.startswith("virtual:"): - b = b + ":" + a.rsplit(":", 1)[0] + b = b + ":" + a.rsplit(":", 2)[0] if mc: b = b + ":mc:" + mc return b diff --git a/poky/bitbake/lib/bb/tests/cow.py b/poky/bitbake/lib/bb/tests/cow.py index bf6e79fce..75142649c 100644 --- a/poky/bitbake/lib/bb/tests/cow.py +++ b/poky/bitbake/lib/bb/tests/cow.py @@ -4,9 +4,17 @@ # SPDX-License-Identifier: GPL-2.0-only # # Copyright 2006 Holger Freyther +# Copyright (C) 2020 Agilent Technologies, Inc. # +import io +import re +import sys import unittest +import contextlib +import collections + +from bb.COW import COWDictBase, COWSetBase, COWDictMeta, COWSetMeta class COWTestCase(unittest.TestCase): @@ -14,11 +22,61 @@ class COWTestCase(unittest.TestCase): Test case for the COW module from mithro """ + def setUp(self): + self._track_warnings = False + self._warning_file = io.StringIO() + self._unhandled_warnings = collections.deque() + COWDictBase.__warn__ = self._warning_file + + def tearDown(self): + COWDictBase.__warn__ = sys.stderr + if self._track_warnings: + self._checkAllWarningsRead() + + def trackWarnings(self): + self._track_warnings = True + + def _collectWarnings(self): + self._warning_file.seek(0) + for warning in self._warning_file: + self._unhandled_warnings.append(warning.rstrip("\n")) + self._warning_file.truncate(0) + self._warning_file.seek(0) + + def _checkAllWarningsRead(self): + self._collectWarnings() + self.assertSequenceEqual(self._unhandled_warnings, []) + + @contextlib.contextmanager + def checkReportsWarning(self, expected_warning): + self._checkAllWarningsRead() + yield + self._collectWarnings() + warning = self._unhandled_warnings.popleft() + self.assertEqual(warning, expected_warning) + + def checkStrOutput(self, obj, expected_levels, expected_keys): + if obj.__class__ is COWDictMeta: + expected_class_name = "COWDict" + elif obj.__class__ is COWSetMeta: + expected_class_name = "COWSet" + else: + self.fail("obj is of unknown type {0}".format(type(obj))) + s = str(obj) + regex = re.compile(r"<(\w+) Level: (\d+) Current Keys: (\d+)>") + match = regex.match(s) + self.assertIsNotNone(match, "bad str output: '{0}'".format(s)) + class_name = match.group(1) + self.assertEqual(class_name, expected_class_name) + levels = int(match.group(2)) + self.assertEqual(levels, expected_levels, "wrong # levels in str: '{0}'".format(s)) + keys = int(match.group(3)) + self.assertEqual(keys, expected_keys, "wrong # keys in str: '{0}'".format(s)) + def testGetSet(self): """ Test and set """ - from bb.COW import COWDictBase a = COWDictBase.copy() self.assertEqual(False, 'a' in a) @@ -27,16 +85,14 @@ class COWTestCase(unittest.TestCase): a['b'] = 'b' self.assertEqual(True, 'a' in a) self.assertEqual(True, 'b' in a) - self.assertEqual('a', a['a'] ) - self.assertEqual('b', a['b'] ) + self.assertEqual('a', a['a']) + self.assertEqual('b', a['b']) def testCopyCopy(self): """ Test the copy of copies """ - from bb.COW import COWDictBase - # create two COW dict 'instances' b = COWDictBase.copy() c = COWDictBase.copy() @@ -94,30 +150,168 @@ class COWTestCase(unittest.TestCase): self.assertEqual(False, 'e' in b_2) def testCow(self): - from bb.COW import COWDictBase + self.trackWarnings() + c = COWDictBase.copy() c['123'] = 1027 c['other'] = 4711 - c['d'] = { 'abc' : 10, 'bcd' : 20 } + c['d'] = {'abc': 10, 'bcd': 20} copy = c.copy() self.assertEqual(1027, c['123']) self.assertEqual(4711, c['other']) - self.assertEqual({'abc':10, 'bcd':20}, c['d']) + self.assertEqual({'abc': 10, 'bcd': 20}, c['d']) self.assertEqual(1027, copy['123']) self.assertEqual(4711, copy['other']) - self.assertEqual({'abc':10, 'bcd':20}, copy['d']) + with self.checkReportsWarning("Warning: Doing a copy because d is a mutable type."): + self.assertEqual({'abc': 10, 'bcd': 20}, copy['d']) # cow it now copy['123'] = 1028 copy['other'] = 4712 copy['d']['abc'] = 20 - self.assertEqual(1027, c['123']) self.assertEqual(4711, c['other']) - self.assertEqual({'abc':10, 'bcd':20}, c['d']) + self.assertEqual({'abc': 10, 'bcd': 20}, c['d']) self.assertEqual(1028, copy['123']) self.assertEqual(4712, copy['other']) - self.assertEqual({'abc':20, 'bcd':20}, copy['d']) + self.assertEqual({'abc': 20, 'bcd': 20}, copy['d']) + + def testOriginalTestSuite(self): + # This test suite is a port of the original one from COW.py + self.trackWarnings() + + a = COWDictBase.copy() + self.checkStrOutput(a, 1, 0) + + a['a'] = 'a' + a['b'] = 'b' + a['dict'] = {} + self.checkStrOutput(a, 1, 4) # 4th member is dict__mutable__ + + b = a.copy() + self.checkStrOutput(b, 2, 0) + b['c'] = 'b' + self.checkStrOutput(b, 2, 1) + + with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): + self.assertListEqual(list(a.iteritems()), + [('a', 'a'), + ('b', 'b'), + ('dict', {}) + ]) + + with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): + b_gen = b.iteritems() + self.assertTupleEqual(next(b_gen), ('a', 'a')) + self.assertTupleEqual(next(b_gen), ('b', 'b')) + self.assertTupleEqual(next(b_gen), ('c', 'b')) + with self.checkReportsWarning("Warning: Doing a copy because dict is a mutable type."): + self.assertTupleEqual(next(b_gen), ('dict', {})) + with self.assertRaises(StopIteration): + next(b_gen) + + b['dict']['a'] = 'b' + b['a'] = 'c' + + self.checkStrOutput(a, 1, 4) + self.checkStrOutput(b, 2, 3) + + with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): + self.assertListEqual(list(a.iteritems()), + [('a', 'a'), + ('b', 'b'), + ('dict', {}) + ]) + + with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): + b_gen = b.iteritems() + self.assertTupleEqual(next(b_gen), ('a', 'c')) + self.assertTupleEqual(next(b_gen), ('b', 'b')) + self.assertTupleEqual(next(b_gen), ('c', 'b')) + self.assertTupleEqual(next(b_gen), ('dict', {'a': 'b'})) + with self.assertRaises(StopIteration): + next(b_gen) + + with self.assertRaises(KeyError): + print(b["dict2"]) + + a['set'] = COWSetBase() + a['set'].add("o1") + a['set'].add("o1") + a['set'].add("o2") + self.assertSetEqual(set(a['set'].itervalues()), {"o1", "o2"}) + self.assertSetEqual(set(b['set'].itervalues()), {"o1", "o2"}) + + b['set'].add('o3') + self.assertSetEqual(set(a['set'].itervalues()), {"o1", "o2"}) + self.assertSetEqual(set(b['set'].itervalues()), {"o1", "o2", "o3"}) + + a['set2'] = set() + a['set2'].add("o1") + a['set2'].add("o1") + a['set2'].add("o2") + + # We don't expect 'a' to change anymore + def check_a(): + with self.checkReportsWarning("Warning: If you aren't going to change any of the values call with True."): + a_gen = a.iteritems() + self.assertTupleEqual(next(a_gen), ('a', 'a')) + self.assertTupleEqual(next(a_gen), ('b', 'b')) + self.assertTupleEqual(next(a_gen), ('dict', {})) + self.assertTupleEqual(next(a_gen), ('set2', {'o1', 'o2'})) + a_sub_set = next(a_gen) + self.assertEqual(a_sub_set[0], 'set') + self.checkStrOutput(a_sub_set[1], 1, 2) + self.assertSetEqual(set(a_sub_set[1].itervalues()), {'o1', 'o2'}) + + check_a() + + b_gen = b.iteritems(readonly=True) + self.assertTupleEqual(next(b_gen), ('a', 'c')) + self.assertTupleEqual(next(b_gen), ('b', 'b')) + self.assertTupleEqual(next(b_gen), ('c', 'b')) + self.assertTupleEqual(next(b_gen), ('dict', {'a': 'b'})) + self.assertTupleEqual(next(b_gen), ('set2', {'o1', 'o2'})) + b_sub_set = next(b_gen) + self.assertEqual(b_sub_set[0], 'set') + self.checkStrOutput(b_sub_set[1], 2, 1) + self.assertSetEqual(set(b_sub_set[1].itervalues()), {'o1', 'o2', 'o3'}) + + del b['b'] + with self.assertRaises(KeyError): + print(b['b']) + self.assertFalse('b' in b) + + check_a() + + b.__revertitem__('b') + check_a() + self.assertEqual(b['b'], 'b') + self.assertTrue('b' in b) + + b.__revertitem__('dict') + check_a() + + b_gen = b.iteritems(readonly=True) + self.assertTupleEqual(next(b_gen), ('a', 'c')) + self.assertTupleEqual(next(b_gen), ('b', 'b')) + self.assertTupleEqual(next(b_gen), ('c', 'b')) + self.assertTupleEqual(next(b_gen), ('dict', {})) + self.assertTupleEqual(next(b_gen), ('set2', {'o1', 'o2'})) + b_sub_set = next(b_gen) + self.assertEqual(b_sub_set[0], 'set') + self.checkStrOutput(b_sub_set[1], 2, 1) + self.assertSetEqual(set(b_sub_set[1].itervalues()), {'o1', 'o2', 'o3'}) + + self.checkStrOutput(a, 1, 6) + self.checkStrOutput(b, 2, 3) + + def testSetMethods(self): + s = COWSetBase() + with self.assertRaises(TypeError): + print(s.iteritems()) + with self.assertRaises(TypeError): + print(s.iterkeys()) diff --git a/poky/bitbake/lib/bb/tests/data.py b/poky/bitbake/lib/bb/tests/data.py index 5f195047d..1d4a64b10 100644 --- a/poky/bitbake/lib/bb/tests/data.py +++ b/poky/bitbake/lib/bb/tests/data.py @@ -12,6 +12,7 @@ import bb import bb.data import bb.parse import logging +import os class LogRecord(): def __enter__(self): diff --git a/poky/bitbake/lib/bb/tests/event.py b/poky/bitbake/lib/bb/tests/event.py index 9229b63d4..9ca7e9bc8 100644 --- a/poky/bitbake/lib/bb/tests/event.py +++ b/poky/bitbake/lib/bb/tests/event.py @@ -6,17 +6,18 @@ # SPDX-License-Identifier: GPL-2.0-only # -import unittest -import bb -import logging -import bb.compat -import bb.event +import collections import importlib +import logging +import pickle import threading import time -import pickle +import unittest from unittest.mock import Mock from unittest.mock import call + +import bb +import bb.event from bb.msg import BBLogFormatter @@ -75,7 +76,7 @@ class EventHandlingTest(unittest.TestCase): def _create_test_handlers(self): """ Method used to create a test handler ordered dictionary """ - test_handlers = bb.compat.OrderedDict() + test_handlers = collections.OrderedDict() test_handlers["handler1"] = self._test_process.handler1 test_handlers["handler2"] = self._test_process.handler2 return test_handlers @@ -96,7 +97,7 @@ class EventHandlingTest(unittest.TestCase): def test_clean_class_handlers(self): """ Test clean_class_handlers method """ - cleanDict = bb.compat.OrderedDict() + cleanDict = collections.OrderedDict() self.assertEqual(cleanDict, bb.event.clean_class_handlers()) diff --git a/poky/bitbake/lib/bb/tests/fetch.py b/poky/bitbake/lib/bb/tests/fetch.py index 29c96b2b4..0ecf044f3 100644 --- a/poky/bitbake/lib/bb/tests/fetch.py +++ b/poky/bitbake/lib/bb/tests/fetch.py @@ -602,8 +602,8 @@ class FetcherLocalTest(FetcherTest): self.assertEqual(tree, ['a', 'dir/c']) def test_local_wildcard(self): - tree = self.fetchUnpack(['file://a', 'file://dir/*']) - self.assertEqual(tree, ['a', 'dir/c', 'dir/d', 'dir/subdir/e']) + with self.assertRaises(bb.fetch2.ParameterError): + tree = self.fetchUnpack(['file://a', 'file://dir/*']) def test_local_dir(self): tree = self.fetchUnpack(['file://a', 'file://dir']) @@ -1156,7 +1156,8 @@ class FetchLatestVersionTest(FetcherTest): ("mtd-utils", "git://git.yoctoproject.org/mtd-utils.git", "ca39eb1d98e736109c64ff9c1aa2a6ecca222d8f", "") : "1.5.0", # version pattern "pkg_name-X.Y" - ("presentproto", "git://anongit.freedesktop.org/git/xorg/proto/presentproto", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "") + # mirror of git://anongit.freedesktop.org/git/xorg/proto/presentproto since network issues interfered with testing + ("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "") : "1.0", # version pattern "pkg_name-vX.Y.Z" ("dtc", "git://git.qemu.org/dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "") @@ -1170,7 +1171,8 @@ class FetchLatestVersionTest(FetcherTest): ("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https", "4ed19e11c2975105b71b956440acdb25d46a347d", "") : "20120614", # packages with a valid UPSTREAM_CHECK_GITTAGREGEX - ("xf86-video-omap", "git://anongit.freedesktop.org/xorg/driver/xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", "(?P(\d+\.(\d\.?)*))") + # mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing + ("xf86-video-omap", "git://git.yoctoproject.org/bbfetchtests-xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", "(?P(\d+\.(\d\.?)*))") : "0.4.3", ("build-appliance-image", "git://git.yoctoproject.org/poky", "b37dd451a52622d5b570183a81583cc34c2ff555", "(?P(([0-9][\.|_]?)+[0-9]))") : "11.0.0", @@ -1262,9 +1264,7 @@ class FetchLatestVersionTest(FetcherTest): class FetchCheckStatusTest(FetcherTest): - test_wget_uris = ["http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2", - "http://www.cups.org/", - "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz", + test_wget_uris = ["http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz", "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz", "http://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz", "https://yoctoproject.org/", diff --git a/poky/bitbake/lib/bb/tinfoil.py b/poky/bitbake/lib/bb/tinfoil.py index dccbe0ebb..2fb1bb7d2 100644 --- a/poky/bitbake/lib/bb/tinfoil.py +++ b/poky/bitbake/lib/bb/tinfoil.py @@ -22,7 +22,6 @@ import bb.taskdata import bb.utils import bb.command import bb.remotedata -from bb.cookerdata import CookerConfiguration from bb.main import setup_bitbake, BitBakeConfigParameters import bb.fetch2 @@ -381,18 +380,13 @@ class Tinfoil: if not config_params: config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet) - cookerconfig = CookerConfiguration() - cookerconfig.setConfigParameters(config_params) - if not config_only: # Disable local loggers because the UI module is going to set up its own for handler in self.localhandlers: self.logger.handlers.remove(handler) self.localhandlers = [] - self.server_connection, ui_module = setup_bitbake(config_params, - cookerconfig, - extrafeatures) + self.server_connection, ui_module = setup_bitbake(config_params, extrafeatures) self.ui_module = ui_module @@ -738,7 +732,7 @@ class Tinfoil: continue if helper.eventHandler(event): if isinstance(event, bb.build.TaskFailedSilent): - logger.warning("Logfile for failed setscene task is %s" % event.logfile) + self.logger.warning("Logfile for failed setscene task is %s" % event.logfile) elif isinstance(event, bb.build.TaskFailed): bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter) continue @@ -812,18 +806,22 @@ class Tinfoil: prepare() has been called, or use a with... block when you create the tinfoil object which will ensure that it gets called. """ - if self.server_connection: - self.run_command('clientComplete') - _server_connections.remove(self.server_connection) - bb.event.ui_queue = [] - self.server_connection.terminate() - self.server_connection = None - - # Restore logging handlers to how it looked when we started - if self.oldhandlers: - for handler in self.logger.handlers: - if handler not in self.oldhandlers: - self.logger.handlers.remove(handler) + try: + if self.server_connection: + try: + self.run_command('clientComplete') + finally: + _server_connections.remove(self.server_connection) + bb.event.ui_queue = [] + self.server_connection.terminate() + self.server_connection = None + + finally: + # Restore logging handlers to how it looked when we started + if self.oldhandlers: + for handler in self.logger.handlers: + if handler not in self.oldhandlers: + self.logger.handlers.remove(handler) def _reconvert_type(self, obj, origtypename): """ diff --git a/poky/bitbake/lib/bb/ui/knotty.py b/poky/bitbake/lib/bb/ui/knotty.py index 87e873d64..a91e4fd15 100644 --- a/poky/bitbake/lib/bb/ui/knotty.py +++ b/poky/bitbake/lib/bb/ui/knotty.py @@ -144,7 +144,7 @@ class TerminalFilter(object): pass if not cr: try: - cr = (env['LINES'], env['COLUMNS']) + cr = (os.environ['LINES'], os.environ['COLUMNS']) except: cr = (25, 80) return cr @@ -380,14 +380,27 @@ _evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.Lo "bb.event.BuildBase", "bb.build.TaskStarted", "bb.build.TaskSucceeded", "bb.build.TaskFailedSilent", "bb.build.TaskProgress", "bb.event.ProcessStarted", "bb.event.ProcessProgress", "bb.event.ProcessFinished"] +def drain_events_errorhandling(eventHandler): + # We don't have logging setup, we do need to show any events we see before exiting + event = True + logger = bb.msg.logger_create('bitbake', sys.stdout) + while event: + event = eventHandler.waitEvent(0) + if isinstance(event, logging.LogRecord): + logger.handle(event) + def main(server, eventHandler, params, tf = TerminalFilter): - if not params.observe_only: - params.updateToServer(server, os.environ.copy()) + try: + if not params.observe_only: + params.updateToServer(server, os.environ.copy()) - includelogs, loglines, consolelogfile, logconfigfile = _log_settings_from_server(server, params.observe_only) + includelogs, loglines, consolelogfile, logconfigfile = _log_settings_from_server(server, params.observe_only) - loglevel, _ = bb.msg.constructLogOptions() + loglevel, _ = bb.msg.constructLogOptions() + except bb.BBHandledException: + drain_events_errorhandling(eventHandler) + return 1 if params.options.quiet == 0: console_loglevel = loglevel diff --git a/poky/bitbake/lib/bb/ui/ncurses.py b/poky/bitbake/lib/bb/ui/ncurses.py index da4fbeabb..cf1c876a5 100644 --- a/poky/bitbake/lib/bb/ui/ncurses.py +++ b/poky/bitbake/lib/bb/ui/ncurses.py @@ -48,6 +48,8 @@ import bb import xmlrpc.client from bb.ui import uihelper +logger = logging.getLogger(__name__) + parsespin = itertools.cycle( r'|/-\\' ) X = 0 diff --git a/poky/bitbake/lib/bb/ui/uievent.py b/poky/bitbake/lib/bb/ui/uievent.py index 13d0d4a04..8607d0523 100644 --- a/poky/bitbake/lib/bb/ui/uievent.py +++ b/poky/bitbake/lib/bb/ui/uievent.py @@ -11,9 +11,13 @@ server and queue them for the UI to process. This process must be used to avoid client/server deadlocks. """ -import socket, threading, pickle, collections +import collections, logging, pickle, socket, threading from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler +import bb + +logger = logging.getLogger(__name__) + class BBUIEventQueue: def __init__(self, BBServer, clientinfo=("localhost, 0")): diff --git a/poky/bitbake/lib/bb/utils.py b/poky/bitbake/lib/bb/utils.py index 50032e50c..0b79f92e2 100644 --- a/poky/bitbake/lib/bb/utils.py +++ b/poky/bitbake/lib/bb/utils.py @@ -402,8 +402,8 @@ def better_exec(code, context, text = None, realfile = "", pythonexception (t, value, tb) = sys.exc_info() try: _print_exception(t, value, tb, realfile, text, context) - except Exception as e: - logger.error("Exception handler error: %s" % str(e)) + except Exception as e2: + logger.error("Exception handler error: %s" % str(e2)) e = bb.BBHandledException(e) raise e @@ -433,20 +433,6 @@ def fileslocked(files): for lock in locks: bb.utils.unlockfile(lock) -@contextmanager -def timeout(seconds): - def timeout_handler(signum, frame): - pass - - original_handler = signal.signal(signal.SIGALRM, timeout_handler) - - try: - signal.alarm(seconds) - yield - finally: - signal.alarm(0) - signal.signal(signal.SIGALRM, original_handler) - def lockfile(name, shared=False, retry=True, block=False): """ Use the specified file as a lock file, return when the lock has @@ -1085,21 +1071,20 @@ def process_profilelog(fn, pout = None): # Either call with a list of filenames and set pout or a filename and optionally pout. if not pout: pout = fn + '.processed' - pout = open(pout, 'w') - - import pstats - if isinstance(fn, list): - p = pstats.Stats(*fn, stream=pout) - else: - p = pstats.Stats(fn, stream=pout) - p.sort_stats('time') - p.print_stats() - p.print_callers() - p.sort_stats('cumulative') - p.print_stats() - pout.flush() - pout.close() + with open(pout, 'w') as pout: + import pstats + if isinstance(fn, list): + p = pstats.Stats(*fn, stream=pout) + else: + p = pstats.Stats(fn, stream=pout) + p.sort_stats('time') + p.print_stats() + p.print_callers() + p.sort_stats('cumulative') + p.print_stats() + + pout.flush() # # Was present to work around multiprocessing pool bugs in python < 2.7.3 @@ -1472,14 +1457,20 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None): return (notadded, notremoved) - -def get_file_layer(filename, d): - """Determine the collection (as defined by a layer's layer.conf file) containing the specified file""" +def get_collection_res(d): collections = (d.getVar('BBFILE_COLLECTIONS') or '').split() collection_res = {} for collection in collections: collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or '' + return collection_res + + +def get_file_layer(filename, d, collection_res={}): + """Determine the collection (as defined by a layer's layer.conf file) containing the specified file""" + if not collection_res: + collection_res = get_collection_res(d) + def path_to_layer(path): # Use longest path so we handle nested layers matchlen = 0 @@ -1491,12 +1482,13 @@ def get_file_layer(filename, d): return match result = None - bbfiles = (d.getVar('BBFILES') or '').split() + bbfiles = (d.getVar('BBFILES_PRIORITIZED') or '').split() bbfilesmatch = False for bbfilesentry in bbfiles: - if fnmatch.fnmatch(filename, bbfilesentry): + if fnmatch.fnmatchcase(filename, bbfilesentry): bbfilesmatch = True result = path_to_layer(bbfilesentry) + break if not bbfilesmatch: # Probably a bbclass diff --git a/poky/bitbake/lib/bblayers/query.py b/poky/bitbake/lib/bblayers/query.py index ee2db0efe..f5e3c8474 100644 --- a/poky/bitbake/lib/bblayers/query.py +++ b/poky/bitbake/lib/bblayers/query.py @@ -21,6 +21,10 @@ def plugin_init(plugins): class QueryPlugin(LayerPlugin): + def __init__(self): + super(QueryPlugin, self).__init__() + self.collection_res = {} + def do_show_layers(self, args): """show current configured layers.""" logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority")) @@ -222,7 +226,6 @@ skipped recipes will also be listed, with a " (skipped)" suffix. multilayer = True if prov[0] != pref[0]: same_ver = False - if (multilayer or not show_overlayed_only) and (same_ver or not show_same_ver_only): if not items_listed: logger.plain('=== %s ===' % title) @@ -243,8 +246,13 @@ skipped recipes will also be listed, with a " (skipped)" suffix. else: return '?' + def get_collection_res(self): + if not self.collection_res: + self.collection_res = bb.utils.get_collection_res(self.tinfoil.config_data) + return self.collection_res + def get_file_layerdir(self, filename): - layer = bb.utils.get_file_layer(filename, self.tinfoil.config_data) + layer = bb.utils.get_file_layer(filename, self.tinfoil.config_data, self.get_collection_res()) return self.bbfile_collections.get(layer, None) def remove_layer_prefix(self, f): diff --git a/poky/bitbake/lib/hashserv/tests.py b/poky/bitbake/lib/hashserv/tests.py index 6e8629507..b34c43687 100644 --- a/poky/bitbake/lib/hashserv/tests.py +++ b/poky/bitbake/lib/hashserv/tests.py @@ -9,6 +9,7 @@ from . import create_server, create_client import hashlib import logging import multiprocessing +import os import sys import tempfile import threading diff --git a/poky/bitbake/lib/layerindexlib/__init__.py b/poky/bitbake/lib/layerindexlib/__init__.py index 77196b408..45157b668 100644 --- a/poky/bitbake/lib/layerindexlib/__init__.py +++ b/poky/bitbake/lib/layerindexlib/__init__.py @@ -7,6 +7,7 @@ import datetime import logging import imp +import os from collections import OrderedDict from layerindexlib.plugin import LayerIndexPluginUrlError @@ -70,7 +71,7 @@ class LayerIndex(): if self.__class__ != newIndex.__class__ or \ other.__class__ != newIndex.__class__: - raise TypeException("Can not add different types.") + raise TypeError("Can not add different types.") for indexEnt in self.indexes: newIndex.indexes.append(indexEnt) @@ -266,8 +267,8 @@ will write out the individual elements split by layer and related components. logger.debug(1, "Store not implemented in %s" % plugin.type) pass else: - logger.debug(1, "No plugins support %s" % url) - raise LayerIndexException("No plugins support %s" % url) + logger.debug(1, "No plugins support %s" % indexURI) + raise LayerIndexException("No plugins support %s" % indexURI) def is_empty(self): @@ -657,7 +658,7 @@ class LayerIndexObj(): if obj.id in self._index[indexname]: if self._index[indexname][obj.id] == obj: continue - raise LayerIndexError('Conflict adding object %s(%s) to index' % (indexname, obj.id)) + raise LayerIndexException('Conflict adding object %s(%s) to index' % (indexname, obj.id)) self._index[indexname][obj.id] = obj def add_raw_element(self, indexname, objtype, rawobjs): @@ -842,11 +843,11 @@ class LayerIndexObj(): def _resolve_dependencies(layerbranches, ignores, dependencies, invalid): for layerbranch in layerbranches: - if ignores and layerBranch.layer.name in ignores: + if ignores and layerbranch.layer.name in ignores: continue - for layerdependency in layerbranch.index.layerDependencies_layerBranchId[layerBranch.id]: - deplayerbranch = layerDependency.dependency_layerBranch + for layerdependency in layerbranch.index.layerDependencies_layerBranchId[layerbranch.id]: + deplayerbranch = layerdependency.dependency_layerBranch if ignores and deplayerbranch.layer.name in ignores: continue diff --git a/poky/bitbake/lib/layerindexlib/cooker.py b/poky/bitbake/lib/layerindexlib/cooker.py index 65b23d087..21ec438a2 100644 --- a/poky/bitbake/lib/layerindexlib/cooker.py +++ b/poky/bitbake/lib/layerindexlib/cooker.py @@ -4,6 +4,7 @@ # import logging +import os from collections import defaultdict @@ -73,7 +74,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin): d = self.layerindex.data if not branches: - raise LayerIndexFetchError("No branches specified for _load_bblayers!") + raise layerindexlib.LayerIndexFetchError("No branches specified for _load_bblayers!") index = layerindexlib.LayerIndexObj() @@ -202,7 +203,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin): try: depDict = bb.utils.explode_dep_versions2(deps) except bb.utils.VersionStringException as vse: - bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse))) + bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (collection, str(vse))) for dep, oplist in list(depDict.items()): # We need to search ourselves, so use the _ version... @@ -268,7 +269,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin): layer = bb.utils.get_file_layer(realfn[0], self.config_data) - depBranchId = collection_layerbranch[layer] + depBranchId = collection[layer] recipeId += 1 recipe = layerindexlib.Recipe(index, None) diff --git a/poky/bitbake/lib/layerindexlib/restapi.py b/poky/bitbake/lib/layerindexlib/restapi.py index 21fd14414..7023f42f2 100644 --- a/poky/bitbake/lib/layerindexlib/restapi.py +++ b/poky/bitbake/lib/layerindexlib/restapi.py @@ -5,9 +5,13 @@ import logging import json +import os + from urllib.parse import unquote from urllib.parse import urlparse +import bb + import layerindexlib import layerindexlib.plugin @@ -163,7 +167,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin): parsed = _get_json_response(apiurl=up_stripped.geturl(), username=username, password=password, retry=False) logger.debug(1, "%s: retry successful.") else: - raise LayerIndexFetchError('%s: Connection reset by peer. Is there a firewall blocking your connection?' % apiurl) + raise layerindexlib.LayerIndexFetchError('%s: Connection reset by peer. Is there a firewall blocking your connection?' % apiurl) return parsed diff --git a/poky/bitbake/lib/layerindexlib/tests/restapi.py b/poky/bitbake/lib/layerindexlib/tests/restapi.py index e5ccafe5c..4646d01f9 100644 --- a/poky/bitbake/lib/layerindexlib/tests/restapi.py +++ b/poky/bitbake/lib/layerindexlib/tests/restapi.py @@ -112,7 +112,7 @@ class LayerIndexWebRestApiTest(LayersTest): break else: self.logger.debug(1, "meta-python was not found") - self.assetTrue(False) + raise self.failureException # Only check the first element... break diff --git a/poky/bitbake/lib/ply/lex.py b/poky/bitbake/lib/ply/lex.py index 267ec100f..182f2e837 100644 --- a/poky/bitbake/lib/ply/lex.py +++ b/poky/bitbake/lib/ply/lex.py @@ -705,11 +705,7 @@ class LexerReflect(object): # Sort the functions by line number for f in self.funcsym.values(): - if sys.version_info[0] < 3: - f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno)) - else: - # Python 3.0 - f.sort(key=lambda x: func_code(x[1]).co_firstlineno) + f.sort(key=lambda x: func_code(x[1]).co_firstlineno) # Sort the strings by regular expression length for s in self.strsym.values(): diff --git a/poky/bitbake/lib/ply/yacc.py b/poky/bitbake/lib/ply/yacc.py index 561784f2f..46e7dc96f 100644 --- a/poky/bitbake/lib/ply/yacc.py +++ b/poky/bitbake/lib/ply/yacc.py @@ -1205,7 +1205,7 @@ class Production(object): # Precompute the list of productions immediately following. Hack. Remove later try: - p.lr_after = Prodnames[p.prod[n+1]] + p.lr_after = self.Prodnames[p.prod[n+1]] except (IndexError,KeyError): p.lr_after = [] try: diff --git a/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py b/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py index 455c408e9..5c4ea7179 100644 --- a/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py +++ b/poky/bitbake/lib/toaster/tests/functional/functional_helpers.py @@ -75,7 +75,7 @@ class SeleniumFunctionalTestCase(SeleniumTestCaseBase): try: table_element = self.get_table_element(table_id) element = table_element.find_element_by_link_text(link_text) - except NoSuchElementException as e: + except self.NoSuchElementException: print('no element found') raise return element @@ -86,7 +86,7 @@ class SeleniumFunctionalTestCase(SeleniumTestCaseBase): element_xpath = "//*[@id='" + table_id + "']" try: element = self.driver.find_element_by_xpath(element_xpath) - except NoSuchElementException as e: + except self.NoSuchElementException: raise return element row = coordinate[0] @@ -96,7 +96,7 @@ class SeleniumFunctionalTestCase(SeleniumTestCaseBase): element_xpath = "//*[@id='" + table_id + "']/tbody/tr[" + str(row) + "]" try: element = self.driver.find_element_by_xpath(element_xpath) - except NoSuchElementException as e: + except self.NoSuchElementException: return False return element #now we are looking for an element with specified X and Y @@ -105,6 +105,6 @@ class SeleniumFunctionalTestCase(SeleniumTestCaseBase): element_xpath = "//*[@id='" + table_id + "']/tbody/tr[" + str(row) + "]/td[" + str(column) + "]" try: element = self.driver.find_element_by_xpath(element_xpath) - except NoSuchElementException as e: + except self.NoSuchElementException: return False return element diff --git a/poky/documentation/.gitignore b/poky/documentation/.gitignore new file mode 100644 index 000000000..69fa449dd --- /dev/null +++ b/poky/documentation/.gitignore @@ -0,0 +1 @@ +_build/ diff --git a/poky/documentation/Makefile.sphinx b/poky/documentation/Makefile.sphinx new file mode 100644 index 000000000..c663c2954 --- /dev/null +++ b/poky/documentation/Makefile.sphinx @@ -0,0 +1,31 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build +DESTDIR = final + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile.sphinx clean publish + +publish: Makefile.sphinx html singlehtml + rm -rf $(BUILDDIR)/$(DESTDIR)/ + mkdir -p $(BUILDDIR)/$(DESTDIR)/ + cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/ + cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html + sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html + +clean: + @rm -rf $(BUILDDIR) + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile.sphinx + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/poky/documentation/_templates/breadcrumbs.html b/poky/documentation/_templates/breadcrumbs.html new file mode 100644 index 000000000..eb6244b74 --- /dev/null +++ b/poky/documentation/_templates/breadcrumbs.html @@ -0,0 +1,14 @@ +{% extends "!breadcrumbs.html" %} + +{% block breadcrumbs %} +
  • + {{ doctype or 'single' }} + {{ release }} +
  • +
  • »
  • + {% for doc in parents %} +
  • {{ doc.title }} »
  • + {% endfor %} +
  • {{ title }}
  • +{% endblock %} + diff --git a/poky/documentation/_templates/layout.html b/poky/documentation/_templates/layout.html new file mode 100644 index 000000000..308d5c7a2 --- /dev/null +++ b/poky/documentation/_templates/layout.html @@ -0,0 +1,7 @@ +{% extends "!layout.html" %} + +{% block extrabody %} +
    +
    +{% endblock %} + diff --git a/poky/documentation/adt-manual/adt-command.rst b/poky/documentation/adt-manual/adt-command.rst new file mode 100644 index 000000000..de854772b --- /dev/null +++ b/poky/documentation/adt-manual/adt-command.rst @@ -0,0 +1,180 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************** +Using the Command Line +********************** + +Recall that earlier the manual discussed how to use an existing +toolchain tarball that had been installed into the default installation +directory, ``/opt/poky/DISTRO``, which is outside of the :term:`Build Directory` +(see the section +"`Using a Cross-Toolchain +Tarball) <#using-an-existing-toolchain-tarball>`__". And, that sourcing +your architecture-specific environment setup script initializes a +suitable cross-toolchain development environment. + +During this setup, locations for the compiler, QEMU scripts, QEMU +binary, a special version of ``pkgconfig`` and other useful utilities +are added to the ``PATH`` variable. Also, variables to assist +``pkgconfig`` and ``autotools`` are also defined so that, for example, +``configure.sh`` can find pre-generated test results for tests that need +target hardware on which to run. You can see the "`Setting Up the +Cross-Development +Environment <#setting-up-the-cross-development-environment>`__" section +for the list of cross-toolchain environment variables established by the +script. + +Collectively, these conditions allow you to easily use the toolchain +outside of the OpenEmbedded build environment on both Autotools-based +projects and Makefile-based projects. This chapter provides information +for both these types of projects. + +Autotools-Based Projects +======================== + +Once you have a suitable cross-toolchain installed, it is very easy to +develop a project outside of the OpenEmbedded build system. This section +presents a simple "Helloworld" example that shows how to set up, +compile, and run the project. + +Creating and Running a Project Based on GNU Autotools +----------------------------------------------------- + +Follow these steps to create a simple Autotools-based project: + +1. *Create your directory:* Create a clean directory for your project + and then make that directory your working location: $ mkdir + $HOME/helloworld $ cd $HOME/helloworld + +2. *Populate the directory:* Create ``hello.c``, ``Makefile.am``, and + ``configure.in`` files as follows: + + - For ``hello.c``, include these lines: #include main() { + printf("Hello World!\n"); } + + - For ``Makefile.am``, include these lines: bin_PROGRAMS = hello + hello_SOURCES = hello.c + + - For ``configure.in``, include these lines: AC_INIT(hello.c) + AM_INIT_AUTOMAKE(hello,0.1) AC_PROG_CC AC_PROG_INSTALL + AC_OUTPUT(Makefile) + +3. *Source the cross-toolchain environment setup file:* Installation of + the cross-toolchain creates a cross-toolchain environment setup + script in the directory that the ADT was installed. Before you can + use the tools to develop your project, you must source this setup + script. The script begins with the string "environment-setup" and + contains the machine architecture, which is followed by the string + "poky-linux". Here is an example that sources a script from the + default ADT installation directory that uses the 32-bit Intel x86 + Architecture and the DISTRO_NAME Yocto Project release: $ source + /opt/poky/DISTRO/environment-setup-i586-poky-linux + +4. *Generate the local aclocal.m4 files and create the configure + script:* The following GNU Autotools generate the local + ``aclocal.m4`` files and create the configure script: $ aclocal $ + autoconf + +5. *Generate files needed by GNU coding standards:* GNU coding + standards require certain files in order for the project to be + compliant. This command creates those files: $ touch NEWS README + AUTHORS ChangeLog + +6. *Generate the configure file:* This command generates the + ``configure``: $ automake -a + +7. *Cross-compile the project:* This command compiles the project using + the cross-compiler. The + :term:`CONFIGURE_FLAGS` + environment variable provides the minimal arguments for GNU + configure: $ ./configure ${CONFIGURE_FLAGS} + +8. *Make and install the project:* These two commands generate and + install the project into the destination directory: $ make $ make + install DESTDIR=./tmp + +9. *Verify the installation:* This command is a simple way to verify + the installation of your project. Running the command prints the + architecture on which the binary file can run. This architecture + should be the same architecture that the installed cross-toolchain + supports. $ file ./tmp/usr/local/bin/hello + +10. *Execute your project:* To execute the project in the shell, simply + enter the name. You could also copy the binary to the actual target + hardware and run the project there as well: $ ./hello As expected, + the project displays the "Hello World!" message. + +Passing Host Options +-------------------- + +For an Autotools-based project, you can use the cross-toolchain by just +passing the appropriate host option to ``configure.sh``. The host option +you use is derived from the name of the environment setup script found +in the directory in which you installed the cross-toolchain. For +example, the host option for an ARM-based target that uses the GNU EABI +is ``armv5te-poky-linux-gnueabi``. You will notice that the name of the +script is ``environment-setup-armv5te-poky-linux-gnueabi``. Thus, the +following command works to update your project and rebuild it using the +appropriate cross-toolchain tools: $ ./configure +--host=armv5te-poky-linux-gnueabi \\ --with-libtool-sysroot=sysroot_dir + +.. note:: + + If the + configure + script results in problems recognizing the + --with-libtool-sysroot= + sysroot-dir + option, regenerate the script to enable the support by doing the + following and then run the script again: + :: + + $ libtoolize --automake + $ aclocal -I ${OECORE_NATIVE_SYSROOT}/usr/share/aclocal \ + [-I dir_containing_your_project-specific_m4_macros] + $ autoconf + $ autoheader + $ automake -a + + +Makefile-Based Projects +======================= + +For Makefile-based projects, the cross-toolchain environment variables +established by running the cross-toolchain environment setup script are +subject to general ``make`` rules. + +To illustrate this, consider the following four cross-toolchain +environment variables: +:term:`CC`\ =i586-poky-linux-gcc -m32 +-march=i586 --sysroot=/opt/poky/1.8/sysroots/i586-poky-linux +:term:`LD`\ =i586-poky-linux-ld +--sysroot=/opt/poky/1.8/sysroots/i586-poky-linux +:term:`CFLAGS`\ =-O2 -pipe -g +-feliminate-unused-debug-types +:term:`CXXFLAGS`\ =-O2 -pipe -g +-feliminate-unused-debug-types Now, consider the following three cases: + +- *Case 1 - No Variables Set in the ``Makefile``:* Because these + variables are not specifically set in the ``Makefile``, the variables + retain their values based on the environment. + +- *Case 2 - Variables Set in the ``Makefile``:* Specifically setting + variables in the ``Makefile`` during the build results in the + environment settings of the variables being overwritten. + +- *Case 3 - Variables Set when the ``Makefile`` is Executed from the + Command Line:* Executing the ``Makefile`` from the command line + results in the variables being overwritten with command-line content + regardless of what is being set in the ``Makefile``. In this case, + environment variables are not considered unless you use the "-e" flag + during the build: $ make -e file If you use this flag, then the + environment values of the variables override any variables + specifically set in the ``Makefile``. + +.. note:: + + For the list of variables set up by the cross-toolchain environment + setup script, see the " + Setting Up the Cross-Development Environment + " section. diff --git a/poky/documentation/adt-manual/adt-intro.rst b/poky/documentation/adt-manual/adt-intro.rst new file mode 100644 index 000000000..5372f4f54 --- /dev/null +++ b/poky/documentation/adt-manual/adt-intro.rst @@ -0,0 +1,138 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************************** +The Application Development Toolkit (ADT) +***************************************** + +Part of the Yocto Project development solution is an Application +Development Toolkit (ADT). The ADT provides you with a custom-built, +cross-development platform suited for developing a user-targeted product +application. + +Fundamentally, the ADT consists of the following: + +- An architecture-specific cross-toolchain and matching sysroot both + built by the :term:`OpenEmbedded Build System`. + The toolchain and + sysroot are based on a `Metadata <&YOCTO_DOCS_DEV_URL;#metadata>`__ + configuration and extensions, which allows you to cross-develop on + the host machine for the target hardware. + +- The Eclipse IDE Yocto Plug-in. + +- The Quick EMUlator (QEMU), which lets you simulate target hardware. + +- Various user-space tools that greatly enhance your application + development experience. + +The Cross-Development Toolchain +=============================== + +The `Cross-Development +Toolchain <&YOCTO_DOCS_DEV_URL;#cross-development-toolchain>`__ consists +of a cross-compiler, cross-linker, and cross-debugger that are used to +develop user-space applications for targeted hardware. This toolchain is +created either by running the ADT Installer script, a toolchain +installer script, or through a :term:`Build Directory` +that is based on +your Metadata configuration or extension for your targeted device. The +cross-toolchain works with a matching target sysroot. + +Sysroot +======= + +The matching target sysroot contains needed headers and libraries for +generating binaries that run on the target architecture. The sysroot is +based on the target root filesystem image that is built by the +OpenEmbedded build system and uses the same Metadata configuration used +to build the cross-toolchain. + +.. _eclipse-overview: + +Eclipse Yocto Plug-in +===================== + +The Eclipse IDE is a popular development environment and it fully +supports development using the Yocto Project. When you install and +configure the Eclipse Yocto Project Plug-in into the Eclipse IDE, you +maximize your Yocto Project experience. Installing and configuring the +Plug-in results in an environment that has extensions specifically +designed to let you more easily develop software. These extensions allow +for cross-compilation, deployment, and execution of your output into a +QEMU emulation session. You can also perform cross-debugging and +profiling. The environment also supports a suite of tools that allows +you to perform remote profiling, tracing, collection of power data, +collection of latency data, and collection of performance data. + +For information about the application development workflow that uses the +Eclipse IDE and for a detailed example of how to install and configure +the Eclipse Yocto Project Plug-in, see the "`Working Within +Eclipse <&YOCTO_DOCS_DEV_URL;#adt-eclipse>`__" section of the Yocto +Project Development Manual. + +The QEMU Emulator +================= + +The QEMU emulator allows you to simulate your hardware while running +your application or image. QEMU is made available a number of ways: + +- If you use the ADT Installer script to install ADT, you can specify + whether or not to install QEMU. + +- If you have cloned the ``poky`` Git repository to create a + :term:`Source Directory` and you have + sourced the environment setup script, QEMU is installed and + automatically available. + +- If you have downloaded a Yocto Project release and unpacked it to + create a :term:`Source Directory` + and you have sourced the environment setup script, QEMU is installed + and automatically available. + +- If you have installed the cross-toolchain tarball and you have + sourced the toolchain's setup environment script, QEMU is also + installed and automatically available. + +User-Space Tools +================ + +User-space tools are included as part of the Yocto Project. You will +find these tools helpful during development. The tools include +LatencyTOP, PowerTOP, OProfile, Perf, SystemTap, and Lttng-ust. These +tools are common development tools for the Linux platform. + +- *LatencyTOP:* LatencyTOP focuses on latency that causes skips in + audio, stutters in your desktop experience, or situations that + overload your server even when you have plenty of CPU power left. + +- *PowerTOP:* Helps you determine what software is using the most + power. You can find out more about PowerTOP at + https://01.org/powertop/. + +- *OProfile:* A system-wide profiler for Linux systems that is capable + of profiling all running code at low overhead. You can find out more + about OProfile at http://oprofile.sourceforge.net/about/. For + examples on how to setup and use this tool, see the + "`OProfile <&YOCTO_DOCS_PROF_URL;#profile-manual-oprofile>`__" + section in the Yocto Project Profiling and Tracing Manual. + +- *Perf:* Performance counters for Linux used to keep track of certain + types of hardware and software events. For more information on these + types of counters see https://perf.wiki.kernel.org/. For + examples on how to setup and use this tool, see the + "`perf <&YOCTO_DOCS_PROF_URL;#profile-manual-perf>`__" section in the + Yocto Project Profiling and Tracing Manual. + +- *SystemTap:* A free software infrastructure that simplifies + information gathering about a running Linux system. This information + helps you diagnose performance or functional problems. SystemTap is + not available as a user-space tool through the Eclipse IDE Yocto + Plug-in. See http://sourceware.org/systemtap for more + information on SystemTap. For examples on how to setup and use this + tool, see the + "`SystemTap <&YOCTO_DOCS_PROF_URL;#profile-manual-systemtap>`__" + section in the Yocto Project Profiling and Tracing Manual. + +- *Lttng-ust:* A User-space Tracer designed to provide detailed + information on user-space activity. See http://lttng.org/ust + for more information on Lttng-ust. diff --git a/poky/documentation/adt-manual/adt-manual-intro.rst b/poky/documentation/adt-manual/adt-manual-intro.rst new file mode 100644 index 000000000..4e98da16d --- /dev/null +++ b/poky/documentation/adt-manual/adt-manual-intro.rst @@ -0,0 +1,24 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Introduction +************ + +Welcome to the Yocto Project Application Developer's Guide. This manual +provides information that lets you begin developing applications using +the Yocto Project. + +The Yocto Project provides an application development environment based +on an Application Development Toolkit (ADT) and the availability of +stand-alone cross-development toolchains and other tools. This manual +describes the ADT and how you can configure and install it, how to +access and use the cross-development toolchains, how to customize the +development packages installation, how to use command-line development +for both Autotools-based and Makefile-based projects, and an +introduction to the Eclipse IDE Yocto Plug-in. + +.. note:: + + The ADT is distribution-neutral and does not require the Yocto + Project reference distribution, which is called Poky. This manual, + however, uses examples that use the Poky distribution. diff --git a/poky/documentation/adt-manual/adt-manual.rst b/poky/documentation/adt-manual/adt-manual.rst new file mode 100644 index 000000000..695230c5c --- /dev/null +++ b/poky/documentation/adt-manual/adt-manual.rst @@ -0,0 +1,17 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +=========================================== +Yocto Project Application Developer's Guide +=========================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + adt-manual-intro + adt-intro + adt-prepare + adt-package + adt-command diff --git a/poky/documentation/adt-manual/adt-package.rst b/poky/documentation/adt-manual/adt-package.rst new file mode 100644 index 000000000..787d406e6 --- /dev/null +++ b/poky/documentation/adt-manual/adt-package.rst @@ -0,0 +1,70 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************************************************ +Optionally Customizing the Development Packages Installation +************************************************************ + +Because the Yocto Project is suited for embedded Linux development, it +is likely that you will need to customize your development packages +installation. For example, if you are developing a minimal image, then +you might not need certain packages (e.g. graphics support packages). +Thus, you would like to be able to remove those packages from your +target sysroot. + +Package Management Systems +========================== + +The OpenEmbedded build system supports the generation of sysroot files +using three different Package Management Systems (PMS): + +- *OPKG:* A less well known PMS whose use originated in the + OpenEmbedded and OpenWrt embedded Linux projects. This PMS works with + files packaged in an ``.ipk`` format. See + http://en.wikipedia.org/wiki/Opkg for more information about + OPKG. + +- *RPM:* A more widely known PMS intended for GNU/Linux distributions. + This PMS works with files packaged in an ``.rpm`` format. The build + system currently installs through this PMS by default. See + http://en.wikipedia.org/wiki/RPM_Package_Manager for more + information about RPM. + +- *Debian:* The PMS for Debian-based systems is built on many PMS + tools. The lower-level PMS tool ``dpkg`` forms the base of the Debian + PMS. For information on dpkg see + http://en.wikipedia.org/wiki/Dpkg. + +Configuring the PMS +=================== + +Whichever PMS you are using, you need to be sure that the +:term:`PACKAGE_CLASSES` +variable in the ``conf/local.conf`` file is set to reflect that system. +The first value you choose for the variable specifies the package file +format for the root filesystem at sysroot. Additional values specify +additional formats for convenience or testing. See the +``conf/local.conf`` configuration file for details. + +.. note:: + + For build performance information related to the PMS, see the " + package.bbclass + " section in the Yocto Project Reference Manual. + +As an example, consider a scenario where you are using OPKG and you want +to add the ``libglade`` package to the target sysroot. + +First, you should generate the IPK file for the ``libglade`` package and +add it into a working ``opkg`` repository. Use these commands: $ bitbake +libglade $ bitbake package-index + +Next, source the cross-toolchain environment setup script found in the +:term:`Source Directory`. Follow +that by setting up the installation destination to point to your sysroot +as sysroot_dir. Finally, have an OPKG configuration file conf_file that +corresponds to the ``opkg`` repository you have just created. The +following command forms should now work: $ opkg-cl –f conf_file -o +sysroot_dir update $ opkg-cl –f cconf_file -o sysroot_dir \\ +--force-overwrite install libglade $ opkg-cl –f cconf_file -o +sysroot_dir \\ --force-overwrite install libglade-dbg $ opkg-cl –f +conf_file> -osysroot_dir> \\ --force-overwrite install libglade-dev diff --git a/poky/documentation/adt-manual/adt-prepare.rst b/poky/documentation/adt-manual/adt-prepare.rst new file mode 100644 index 000000000..9b6bd0514 --- /dev/null +++ b/poky/documentation/adt-manual/adt-prepare.rst @@ -0,0 +1,752 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************************* +Preparing for Application Development +************************************* + +In order to develop applications, you need set up your host development +system. Several ways exist that allow you to install cross-development +tools, QEMU, the Eclipse Yocto Plug-in, and other tools. This chapter +describes how to prepare for application development. + +.. _installing-the-adt: + +Installing the ADT and Toolchains +================================= + +The following list describes installation methods that set up varying +degrees of tool availability on your system. Regardless of the +installation method you choose, you must ``source`` the cross-toolchain +environment setup script, which establishes several key environment +variables, before you use a toolchain. See the "`Setting Up the +Cross-Development +Environment <#setting-up-the-cross-development-environment>`__" section +for more information. + +.. note:: + + Avoid mixing installation methods when installing toolchains for + different architectures. For example, avoid using the ADT Installer + to install some toolchains and then hand-installing cross-development + toolchains by running the toolchain installer for different + architectures. Mixing installation methods can result in situations + where the ADT Installer becomes unreliable and might not install the + toolchain. + + If you must mix installation methods, you might avoid problems by + deleting ``/var/lib/opkg``, thus purging the ``opkg`` package + metadata. + +- *Use the ADT installer script:* This method is the recommended way to + install the ADT because it automates much of the process for you. For + example, you can configure the installation to install the QEMU + emulator and the user-space NFS, specify which root filesystem + profiles to download, and define the target sysroot location. + +- *Use an existing toolchain:* Using this method, you select and + download an architecture-specific toolchain installer and then run + the script to hand-install the toolchain. If you use this method, you + just get the cross-toolchain and QEMU - you do not get any of the + other mentioned benefits had you run the ADT Installer script. + +- *Use the toolchain from within the Build Directory:* If you already + have a :term:`Build Directory`, + you can build the cross-toolchain within the directory. However, like + the previous method mentioned, you only get the cross-toolchain and + QEMU - you do not get any of the other benefits without taking + separate steps. + +Using the ADT Installer +----------------------- + +To run the ADT Installer, you need to get the ADT Installer tarball, be +sure you have the necessary host development packages that support the +ADT Installer, and then run the ADT Installer Script. + +For a list of the host packages needed to support ADT installation and +use, see the "ADT Installer Extras" lists in the "`Required Packages for +the Host Development +System <&YOCTO_DOCS_REF_URL;#required-packages-for-the-host-development-system>`__" +section of the Yocto Project Reference Manual. + +Getting the ADT Installer Tarball +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ADT Installer is contained in the ADT Installer tarball. You can get +the tarball using either of these methods: + +- *Download the Tarball:* You can download the tarball from + ` <&YOCTO_ADTINSTALLER_DL_URL;>`__ into any directory. + +- *Build the Tarball:* You can use + :term:`BitBake` to generate the + tarball inside an existing :term:`Build Directory`. + + If you use BitBake to generate the ADT Installer tarball, you must + ``source`` the environment setup script + (````` <&YOCTO_DOCS_REF_URL;#structure-core-script>`__ or + ```oe-init-build-env-memres`` <&YOCTO_DOCS_REF_URL;#structure-memres-core-script>`__) + located in the Source Directory before running the ``bitbake`` + command that creates the tarball. + + The following example commands establish the + :term:`Source Directory`, check out the + current release branch, set up the build environment while also + creating the default Build Directory, and run the ``bitbake`` command + that results in the tarball + ``poky/build/tmp/deploy/sdk/adt_installer.tar.bz2``: + + .. note:: + + Before using BitBake to build the ADT tarball, be sure to make + sure your + local.conf + file is properly configured. See the " + User Configuration + " section in the Yocto Project Reference Manual for general + configuration information. + + $ cd ~ $ git clone git://git.yoctoproject.org/poky $ cd poky $ git + checkout -b DISTRO_NAME origin/DISTRO_NAME $ source OE_INIT_FILE $ + bitbake adt-installer + +Configuring and Running the ADT Installer Script +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before running the ADT Installer script, you need to unpack the tarball. +You can unpack the tarball in any directory you wish. For example, this +command copies the ADT Installer tarball from where it was built into +the home directory and then unpacks the tarball into a top-level +directory named ``adt-installer``: $ cd ~ $ cp +poky/build/tmp/deploy/sdk/adt_installer.tar.bz2 $HOME $ tar -xjf +adt_installer.tar.bz2 Unpacking it creates the directory +``adt-installer``, which contains the ADT Installer script +(``adt_installer``) and its configuration file (``adt_installer.conf``). + +Before you run the script, however, you should examine the ADT Installer +configuration file and be sure you are going to get what you want. Your +configurations determine which kernel and filesystem image are +downloaded. + +The following list describes the configurations you can define for the +ADT Installer. For configuration values and restrictions, see the +comments in the ``adt-installer.conf`` file: + +- ``YOCTOADT_REPO``: This area includes the IPKG-based packages and the + root filesystem upon which the installation is based. If you want to + set up your own IPKG repository pointed to by ``YOCTOADT_REPO``, you + need to be sure that the directory structure follows the same layout + as the reference directory set up at + http://adtrepo.yoctoproject.org. Also, your repository needs + to be accessible through HTTP. + +- ``YOCTOADT_TARGETS``: The machine target architectures for which you + want to set up cross-development environments. + +- ``YOCTOADT_QEMU``: Indicates whether or not to install the emulator + QEMU. + +- ``YOCTOADT_NFS_UTIL``: Indicates whether or not to install user-mode + NFS. If you plan to use the Eclipse IDE Yocto plug-in against QEMU, + you should install NFS. + + .. note:: + + To boot QEMU images using our userspace NFS server, you need to be + running + portmap + or + rpcbind + . If you are running + rpcbind + , you will also need to add the + -i + option when + rpcbind + starts up. Please make sure you understand the security + implications of doing this. You might also have to modify your + firewall settings to allow NFS booting to work. + +- ``YOCTOADT_ROOTFS_``\ arch: The root filesystem images you want to + download from the ``YOCTOADT_IPKG_REPO`` repository. + +- ``YOCTOADT_TARGET_SYSROOT_IMAGE_``\ arch: The particular root + filesystem used to extract and create the target sysroot. The value + of this variable must have been specified with + ``YOCTOADT_ROOTFS_``\ arch. For example, if you downloaded both + ``minimal`` and ``sato-sdk`` images by setting + ``YOCTOADT_ROOTFS_``\ arch to "minimal sato-sdk", then + ``YOCTOADT_ROOTFS_``\ arch must be set to either "minimal" or + "sato-sdk". + +- ``YOCTOADT_TARGET_SYSROOT_LOC_``\ arch: The location on the + development host where the target sysroot is created. + +After you have configured the ``adt_installer.conf`` file, run the +installer using the following command: $ cd adt-installer $ +./adt_installer Once the installer begins to run, you are asked to enter +the location for cross-toolchain installation. The default location is +``/opt/poky/``\ release. After either accepting the default location or +selecting your own location, you are prompted to run the installation +script interactively or in silent mode. If you want to closely monitor +the installation, choose "I" for interactive mode rather than "S" for +silent mode. Follow the prompts from the script to complete the +installation. + +Once the installation completes, the ADT, which includes the +cross-toolchain, is installed in the selected installation directory. +You will notice environment setup files for the cross-toolchain in the +installation directory, and image tarballs in the ``adt-installer`` +directory according to your installer configurations, and the target +sysroot located according to the ``YOCTOADT_TARGET_SYSROOT_LOC_``\ arch +variable also in your configuration file. + +.. _using-an-existing-toolchain-tarball: + +Using a Cross-Toolchain Tarball +------------------------------- + +If you want to simply install a cross-toolchain by hand, you can do so +by running the toolchain installer. The installer includes the pre-built +cross-toolchain, the ``runqemu`` script, and support files. If you use +this method to install the cross-toolchain, you might still need to +install the target sysroot by installing and extracting it separately. +For information on how to install the sysroot, see the "`Extracting the +Root Filesystem <#extracting-the-root-filesystem>`__" section. + +Follow these steps: + +1. *Get your toolchain installer using one of the following methods:* + + - Go to ` <&YOCTO_TOOLCHAIN_DL_URL;>`__ and find the folder that + matches your host development system (i.e. ``i686`` for 32-bit + machines or ``x86_64`` for 64-bit machines). + + Go into that folder and download the toolchain installer whose + name includes the appropriate target architecture. The toolchains + provided by the Yocto Project are based off of the + ``core-image-sato`` image and contain libraries appropriate for + developing against that image. For example, if your host + development system is a 64-bit x86 system and you are going to use + your cross-toolchain for a 32-bit x86 target, go into the + ``x86_64`` folder and download the following installer: + poky-glibc-x86_64-core-image-sato-i586-toolchain-DISTRO.sh + + - Build your own toolchain installer. For cases where you cannot use + an installer from the download area, you can build your own as + described in the "`Optionally Building a Toolchain + Installer <#optionally-building-a-toolchain-installer>`__" + section. + +2. *Once you have the installer, run it to install the toolchain:* + + .. note:: + + You must change the permissions on the toolchain installer script + so that it is executable. + + The following command shows how to run the installer given a + toolchain tarball for a 64-bit x86 development host system and a + 32-bit x86 target architecture. The example assumes the toolchain + installer is located in ``~/Downloads/``. $ + ~/Downloads/poky-glibc-x86_64-core-image-sato-i586-toolchain-DISTRO.sh + The first thing the installer prompts you for is the directory into + which you want to install the toolchain. The default directory used + is ``/opt/poky/DISTRO``. If you do not have write permissions for the + directory into which you are installing the toolchain, the toolchain + installer notifies you and exits. Be sure you have write permissions + in the directory and run the installer again. + + When the script finishes, the cross-toolchain is installed. You will + notice environment setup files for the cross-toolchain in the + installation directory. + +.. _using-the-toolchain-from-within-the-build-tree: + +Using BitBake and the Build Directory +------------------------------------- + +A final way of making the cross-toolchain available is to use BitBake to +generate the toolchain within an existing :term:`Build Directory`. +This method does +not install the toolchain into the default ``/opt`` directory. As with +the previous method, if you need to install the target sysroot, you must +do that separately as well. + +Follow these steps to generate the toolchain into the Build Directory: + +1. *Set up the Build Environment:* Source the OpenEmbedded build + environment setup script (i.e. + ````` <&YOCTO_DOCS_REF_URL;#structure-core-script>`__ or + ```oe-init-build-env-memres`` <&YOCTO_DOCS_REF_URL;#structure-memres-core-script>`__) + located in the :term:`Source Directory`. + +2. *Check your Local Configuration File:* At this point, you should be + sure that the :term:`MACHINE` + variable in the ``local.conf`` file found in the ``conf`` directory + of the Build Directory is set for the target architecture. Comments + within the ``local.conf`` file list the values you can use for the + ``MACHINE`` variable. If you do not change the ``MACHINE`` variable, + the OpenEmbedded build system uses ``qemux86`` as the default target + machine when building the cross-toolchain. + + .. note:: + + You can populate the Build Directory with the cross-toolchains for + more than a single architecture. You just need to edit the + MACHINE + variable in the + local.conf + file and re-run the + bitbake + command. + +3. *Make Sure Your Layers are Enabled:* Examine the + ``conf/bblayers.conf`` file and make sure that you have enabled all + the compatible layers for your target machine. The OpenEmbedded build + system needs to be aware of each layer you want included when + building images and cross-toolchains. For information on how to + enable a layer, see the "`Enabling Your + Layer <&YOCTO_DOCS_DEV_URL;#enabling-your-layer>`__" section in the + Yocto Project Development Manual. + +4. *Generate the Cross-Toolchain:* Run ``bitbake meta-ide-support`` to + complete the cross-toolchain generation. Once the ``bitbake`` command + finishes, the cross-toolchain is generated and populated within the + Build Directory. You will notice environment setup files for the + cross-toolchain that contain the string "``environment-setup``" in + the Build Directory's ``tmp`` folder. + + Be aware that when you use this method to install the toolchain, you + still need to separately extract and install the sysroot filesystem. + For information on how to do this, see the "`Extracting the Root + Filesystem <#extracting-the-root-filesystem>`__" section. + +Setting Up the Cross-Development Environment +============================================ + +Before you can develop using the cross-toolchain, you need to set up the +cross-development environment by sourcing the toolchain's environment +setup script. If you used the ADT Installer or hand-installed +cross-toolchain, then you can find this script in the directory you +chose for installation. For this release, the default installation +directory is ````. If you installed the toolchain in the +:term:`Build Directory`, you can find the +environment setup script for the toolchain in the Build Directory's +``tmp`` directory. + +Be sure to run the environment setup script that matches the +architecture for which you are developing. Environment setup scripts +begin with the string "``environment-setup``" and include as part of +their name the architecture. For example, the toolchain environment +setup script for a 64-bit IA-based architecture installed in the default +installation directory would be the following: +YOCTO_ADTPATH_DIR/environment-setup-x86_64-poky-linux When you run the +setup script, many environment variables are defined: +:term:`SDKTARGETSYSROOT` - +The path to the sysroot used for cross-compilation +:term:`PKG_CONFIG_PATH` - The +path to the target pkg-config files +:term:`CONFIG_SITE` - A GNU +autoconf site file preconfigured for the target +:term:`CC` - The minimal command and +arguments to run the C compiler +:term:`CXX` - The minimal command and +arguments to run the C++ compiler +:term:`CPP` - The minimal command and +arguments to run the C preprocessor +:term:`AS` - The minimal command and +arguments to run the assembler :term:`LD` +- The minimal command and arguments to run the linker +:term:`GDB` - The minimal command and +arguments to run the GNU Debugger +:term:`STRIP` - The minimal command and +arguments to run 'strip', which strips symbols +:term:`RANLIB` - The minimal command +and arguments to run 'ranlib' +:term:`OBJCOPY` - The minimal command +and arguments to run 'objcopy' +:term:`OBJDUMP` - The minimal command +and arguments to run 'objdump' :term:`AR` +- The minimal command and arguments to run 'ar' +:term:`NM` - The minimal command and +arguments to run 'nm' +:term:`TARGET_PREFIX` - The +toolchain binary prefix for the target tools +:term:`CROSS_COMPILE` - The +toolchain binary prefix for the target tools +:term:`CONFIGURE_FLAGS` - The +minimal arguments for GNU configure +:term:`CFLAGS` - Suggested C flags +:term:`CXXFLAGS` - Suggested C++ +flags :term:`LDFLAGS` - Suggested +linker flags when you use CC to link +:term:`CPPFLAGS` - Suggested +preprocessor flags + +Securing Kernel and Filesystem Images +===================================== + +You will need to have a kernel and filesystem image to boot using your +hardware or the QEMU emulator. Furthermore, if you plan on booting your +image using NFS or you want to use the root filesystem as the target +sysroot, you need to extract the root filesystem. + +Getting the Images +------------------ + +To get the kernel and filesystem images, you either have to build them +or download pre-built versions. For an example of how to build these +images, see the "`Buiding +Images <&YOCTO_DOCS_QS_URL;#qs-buiding-images>`__" section of the Yocto +Project Quick Start. For an example of downloading pre-build versions, +see the "`Example Using Pre-Built Binaries and +QEMU <#using-pre-built>`__" section. + +The Yocto Project ships basic kernel and filesystem images for several +architectures (``x86``, ``x86-64``, ``mips``, ``powerpc``, and ``arm``) +that you can use unaltered in the QEMU emulator. These kernel images +reside in the release area - ` <&YOCTO_MACHINES_DL_URL;>`__ and are +ideal for experimentation using Yocto Project. For information on the +image types you can build using the OpenEmbedded build system, see the +":ref:`ref-manual/ref-images:Images`" chapter in the Yocto +Project Reference Manual. + +If you are planning on developing against your image and you are not +building or using one of the Yocto Project development images (e.g. +``core-image-*-dev``), you must be sure to include the development +packages as part of your image recipe. + +If you plan on remotely deploying and debugging your application from +within the Eclipse IDE, you must have an image that contains the Yocto +Target Communication Framework (TCF) agent (``tcf-agent``). You can do +this by including the ``eclipse-debug`` image feature. + +.. note:: + + See the " + Image Features + " section in the Yocto Project Reference Manual for information on + image features. + +To include the ``eclipse-debug`` image feature, modify your +``local.conf`` file in the :term:`Build Directory` +so that the +:term:`EXTRA_IMAGE_FEATURES` +variable includes the "eclipse-debug" feature. After modifying the +configuration file, you can rebuild the image. Once the image is +rebuilt, the ``tcf-agent`` will be included in the image and is launched +automatically after the boot. + +Extracting the Root Filesystem +------------------------------ + +If you install your toolchain by hand or build it using BitBake and you +need a root filesystem, you need to extract it separately. If you use +the ADT Installer to install the ADT, the root filesystem is +automatically extracted and installed. + +Here are some cases where you need to extract the root filesystem: + +- You want to boot the image using NFS. + +- You want to use the root filesystem as the target sysroot. For + example, the Eclipse IDE environment with the Eclipse Yocto Plug-in + installed allows you to use QEMU to boot under NFS. + +- You want to develop your target application using the root filesystem + as the target sysroot. + +To extract the root filesystem, first ``source`` the cross-development +environment setup script to establish necessary environment variables. +If you built the toolchain in the Build Directory, you will find the +toolchain environment script in the ``tmp`` directory. If you installed +the toolchain by hand, the environment setup script is located in +``/opt/poky/DISTRO``. + +After sourcing the environment script, use the ``runqemu-extract-sdk`` +command and provide the filesystem image. + +Following is an example. The second command sets up the environment. In +this case, the setup script is located in the ``/opt/poky/DISTRO`` +directory. The third command extracts the root filesystem from a +previously built filesystem that is located in the ``~/Downloads`` +directory. Furthermore, this command extracts the root filesystem into +the ``qemux86-sato`` directory: $ cd ~ $ source +/opt/poky/DISTRO/environment-setup-i586-poky-linux $ runqemu-extract-sdk +\\ ~/Downloads/core-image-sato-sdk-qemux86-2011091411831.rootfs.tar.bz2 +\\ $HOME/qemux86-sato You could now point to the target sysroot at +``qemux86-sato``. + +Optionally Building a Toolchain Installer +========================================= + +As an alternative to locating and downloading a toolchain installer, you +can build the toolchain installer if you have a :term:`Build Directory`. + +.. note:: + + Although not the preferred method, it is also possible to use + bitbake meta-toolchain + to build the toolchain installer. If you do use this method, you must + separately install and extract the target sysroot. For information on + how to install the sysroot, see the " + Extracting the Root Filesystem + " section. + +To build the toolchain installer and populate the SDK image, use the +following command: $ bitbake image -c populate_sdk The command results +in a toolchain installer that contains the sysroot that matches your +target root filesystem. + +Another powerful feature is that the toolchain is completely +self-contained. The binaries are linked against their own copy of +``libc``, which results in no dependencies on the target system. To +achieve this, the pointer to the dynamic loader is configured at install +time since that path cannot be dynamically altered. This is the reason +for a wrapper around the ``populate_sdk`` archive. + +Another feature is that only one set of cross-canadian toolchain +binaries are produced per architecture. This feature takes advantage of +the fact that the target hardware can be passed to ``gcc`` as a set of +compiler options. Those options are set up by the environment script and +contained in variables such as :term:`CC` +and :term:`LD`. This reduces the space +needed for the tools. Understand, however, that a sysroot is still +needed for every target since those binaries are target-specific. + +Remember, before using any BitBake command, you must source the build +environment setup script (i.e. +````` <&YOCTO_DOCS_REF_URL;#structure-core-script>`__ or +```oe-init-build-env-memres`` <&YOCTO_DOCS_REF_URL;#structure-memres-core-script>`__) +located in the Source Directory and you must make sure your +``conf/local.conf`` variables are correct. In particular, you need to be +sure the :term:`MACHINE` variable +matches the architecture for which you are building and that the +:term:`SDKMACHINE` variable is +correctly set if you are building a toolchain designed to run on an +architecture that differs from your current development host machine +(i.e. the build machine). + +When the ``bitbake`` command completes, the toolchain installer will be +in ``tmp/deploy/sdk`` in the Build Directory. + +.. note:: + + By default, this toolchain does not build static binaries. If you + want to use the toolchain to build these types of libraries, you need + to be sure your image has the appropriate static development + libraries. Use the + IMAGE_INSTALL + variable inside your + local.conf + file to install the appropriate library packages. Following is an + example using + glibc + static development libraries: + :: + + IMAGE_INSTALL_append = " glibc-staticdev" + + +Optionally Using an External Toolchain +====================================== + +You might want to use an external toolchain as part of your development. +If this is the case, the fundamental steps you need to accomplish are as +follows: + +- Understand where the installed toolchain resides. For cases where you + need to build the external toolchain, you would need to take separate + steps to build and install the toolchain. + +- Make sure you add the layer that contains the toolchain to your + ``bblayers.conf`` file through the + :term:`BBLAYERS` variable. + +- Set the + :term:`EXTERNAL_TOOLCHAIN` + variable in your ``local.conf`` file to the location in which you + installed the toolchain. + +A good example of an external toolchain used with the Yocto Project is +Mentor Graphics Sourcery G++ Toolchain. You can see information on how +to use that particular layer in the ``README`` file at +http://github.com/MentorEmbedded/meta-sourcery/. You can find +further information by reading about the +:term:`TCMODE` variable in the Yocto +Project Reference Manual's variable glossary. + +.. _using-pre-built: + +Example Using Pre-Built Binaries and QEMU +========================================= + +If hardware, libraries and services are stable, you can get started by +using a pre-built binary of the filesystem image, kernel, and toolchain +and run it using the QEMU emulator. This scenario is useful for +developing application software. + +|Using a Pre-Built Image| + +For this scenario, you need to do several things: + +- Install the appropriate stand-alone toolchain tarball. + +- Download the pre-built image that will boot with QEMU. You need to be + sure to get the QEMU image that matches your target machine's + architecture (e.g. x86, ARM, etc.). + +- Download the filesystem image for your target machine's architecture. + +- Set up the environment to emulate the hardware and then start the + QEMU emulator. + +Installing the Toolchain +------------------------ + +You can download a tarball installer, which includes the pre-built +toolchain, the ``runqemu`` script, and support files from the +appropriate directory under ` <&YOCTO_TOOLCHAIN_DL_URL;>`__. Toolchains +are available for 32-bit and 64-bit x86 development systems from the +``i686`` and ``x86_64`` directories, respectively. The toolchains the +Yocto Project provides are based off the ``core-image-sato`` image and +contain libraries appropriate for developing against that image. Each +type of development system supports five or more target architectures. + +The names of the tarball installer scripts are such that a string +representing the host system appears first in the filename and then is +immediately followed by a string representing the target architecture. + +:: + + poky-glibc-host_system-image_type-arch-toolchain-release_version.sh + + Where: + host_system is a string representing your development system: + + i686 or x86_64. + + image_type is a string representing the image you wish to + develop a Software Development Toolkit (SDK) for use against. + The Yocto Project builds toolchain installers using the + following BitBake command: + + bitbake core-image-sato -c populate_sdk + + arch is a string representing the tuned target architecture: + + i586, x86_64, powerpc, mips, armv7a or armv5te + + release_version is a string representing the release number of the + Yocto Project: + + DISTRO, DISTRO+snapshot + + +For example, the following toolchain installer is for a 64-bit +development host system and a i586-tuned target architecture based off +the SDK for ``core-image-sato``: +poky-glibc-x86_64-core-image-sato-i586-toolchain-DISTRO.sh + +Toolchains are self-contained and by default are installed into +``/opt/poky``. However, when you run the toolchain installer, you can +choose an installation directory. + +The following command shows how to run the installer given a toolchain +tarball for a 64-bit x86 development host system and a 32-bit x86 target +architecture. You must change the permissions on the toolchain installer +script so that it is executable. + +The example assumes the toolchain installer is located in +``~/Downloads/``. + +.. note:: + + If you do not have write permissions for the directory into which you + are installing the toolchain, the toolchain installer notifies you + and exits. Be sure you have write permissions in the directory and + run the installer again. + +$ ~/Downloads/poky-glibc-x86_64-core-image-sato-i586-toolchain-DISTRO.sh + +For more information on how to install tarballs, see the "`Using a +Cross-Toolchain +Tarball <&YOCTO_DOCS_ADT_URL;#using-an-existing-toolchain-tarball>`__" +and "`Using BitBake and the Build +Directory <&YOCTO_DOCS_ADT_URL;#using-the-toolchain-from-within-the-build-tree>`__" +sections in the Yocto Project Application Developer's Guide. + +Downloading the Pre-Built Linux Kernel +-------------------------------------- + +You can download the pre-built Linux kernel suitable for running in the +QEMU emulator from ` <&YOCTO_QEMU_DL_URL;>`__. Be sure to use the kernel +that matches the architecture you want to simulate. Download areas exist +for the five supported machine architectures: ``qemuarm``, ``qemumips``, +``qemuppc``, ``qemux86``, and ``qemux86-64``. + +Most kernel files have one of the following forms: \*zImage-qemuarch.bin +vmlinux-qemuarch.bin Where: arch is a string representing the target +architecture: x86, x86-64, ppc, mips, or arm. + +You can learn more about downloading a Yocto Project kernel in the +"`Yocto Project Kernel <&YOCTO_DOCS_DEV_URL;#local-kernel-files>`__" +bulleted item in the Yocto Project Development Manual. + +Downloading the Filesystem +-------------------------- + +You can also download the filesystem image suitable for your target +architecture from ` <&YOCTO_QEMU_DL_URL;>`__. Again, be sure to use the +filesystem that matches the architecture you want to simulate. + +The filesystem image has two tarball forms: ``ext3`` and ``tar``. You +must use the ``ext3`` form when booting an image using the QEMU +emulator. The ``tar`` form can be flattened out in your host development +system and used for build purposes with the Yocto Project. +core-image-profile-qemuarch.ext3 core-image-profile-qemuarch.tar.bz2 +Where: profile is the filesystem image's profile: lsb, lsb-dev, lsb-sdk, +lsb-qt3, minimal, minimal-dev, sato, sato-dev, or sato-sdk. For +information on these types of image profiles, see the +":ref:`ref-manual/ref-images:Images`" chapter in the Yocto +Project Reference Manual. arch is a string representing the target +architecture: x86, x86-64, ppc, mips, or arm. + +Setting Up the Environment and Starting the QEMU Emulator +--------------------------------------------------------- + +Before you start the QEMU emulator, you need to set up the emulation +environment. The following command form sets up the emulation +environment. $ source +YOCTO_ADTPATH_DIR/environment-setup-arch-poky-linux-if Where: arch is a +string representing the target architecture: i586, x86_64, ppc603e, +mips, or armv5te. if is a string representing an embedded application +binary interface. Not all setup scripts include this string. + +Finally, this command form invokes the QEMU emulator $ runqemu qemuarch +kernel-image filesystem-image Where: qemuarch is a string representing +the target architecture: qemux86, qemux86-64, qemuppc, qemumips, or +qemuarm. kernel-image is the architecture-specific kernel image. +filesystem-image is the .ext3 filesystem image. + +Continuing with the example, the following two commands setup the +emulation environment and launch QEMU. This example assumes the root +filesystem (``.ext3`` file) and the pre-built kernel image file both +reside in your home directory. The kernel and filesystem are for a +32-bit target architecture. $ cd $HOME $ source +YOCTO_ADTPATH_DIR/environment-setup-i586-poky-linux $ runqemu qemux86 +bzImage-qemux86.bin \\ core-image-sato-qemux86.ext3 + +The environment in which QEMU launches varies depending on the +filesystem image and on the target architecture. For example, if you +source the environment for the ARM target architecture and then boot the +minimal QEMU image, the emulator comes up in a new shell in command-line +mode. However, if you boot the SDK image, QEMU comes up with a GUI. + +.. note:: + + Booting the PPC image results in QEMU launching in the same shell in + command-line mode. + +.. |Using a Pre-Built Image| image:: figures/using-a-pre-built-image.png diff --git a/poky/documentation/adt-manual/adt-prepare.xml b/poky/documentation/adt-manual/adt-prepare.xml index 684eb75c5..2dc984325 100644 --- a/poky/documentation/adt-manual/adt-prepare.xml +++ b/poky/documentation/adt-manual/adt-prepare.xml @@ -232,7 +232,7 @@ own location, you are prompted to run the installation script interactively or in silent mode. If you want to closely monitor the installation, - choose “I” for interactive mode rather than “S” for silent mode. + choose "I" for interactive mode rather than "S" for silent mode. Follow the prompts from the script to complete the installation. @@ -765,7 +765,7 @@ Install the appropriate stand-alone toolchain tarball. Download the pre-built image that will boot with QEMU. - You need to be sure to get the QEMU image that matches your target machine’s + You need to be sure to get the QEMU image that matches your target machine's architecture (e.g. x86, ARM, etc.). Download the filesystem image for your target machine's architecture. diff --git a/poky/documentation/boilerplate.rst b/poky/documentation/boilerplate.rst new file mode 100644 index 000000000..ddffdac24 --- /dev/null +++ b/poky/documentation/boilerplate.rst @@ -0,0 +1,18 @@ +.. include:: +.. include:: + +---- + +| |project_name| +| + +Permission is granted to copy, distribute and/or modify this document under the +terms of the `Creative Commons Attribution-Share Alike 2.0 UK: England & Wales +`_ as published by Creative +Commons. + +To report any inaccuracies or problems with this (or any other Yocto Project) +manual, or to send additions or changes, please send email/patches to the Yocto +Project documentation mailing list at ``docs@lists.yoctoproject.org`` or +log into the freenode ``#yocto`` channel. + diff --git a/poky/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst b/poky/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst new file mode 100644 index 000000000..7e24b9e68 --- /dev/null +++ b/poky/documentation/brief-yoctoprojectqs/brief-yoctoprojectqs.rst @@ -0,0 +1,430 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +========================= +Yocto Project Quick Build +========================= + +Welcome! +======== + +This short document steps you through the process for a typical +image build using the Yocto Project. The document also introduces how to +configure a build for specific hardware. You will use Yocto Project to +build a reference embedded OS called Poky. + +.. note:: + + - The examples in this paper assume you are using a native Linux + system running a recent Ubuntu Linux distribution. If the machine + you want to use Yocto Project on to build an image + (:term:`Build Host`) is not + a native Linux system, you can still perform these steps by using + CROss PlatformS (CROPS) and setting up a Poky container. See the + :ref:`dev-manual/dev-manual-start:setting up to use cross platforms (crops)` + section + in the Yocto Project Development Tasks Manual for more + information. + + - You may use Windows Subsystem For Linux v2 to set up a build host + using Windows 10. + + .. note:: + + The Yocto Project is not compatible with WSLv1, it is + compatible but not officially supported nor validated with + WSLv2, if you still decide to use WSL please upgrade to WSLv2. + + See the :ref:`dev-manual/dev-manual-start:setting up to use windows + subsystem for linux (wslv2)` section in the Yocto Project Development + Tasks Manual for more information. + +If you want more conceptual or background information on the Yocto +Project, see the :doc:`../overview-manual/overview-manual`. + +Compatible Linux Distribution +============================= + +Make sure your :term:`Build Host` meets the +following requirements: + +- 50 Gbytes of free disk space + +- Runs a supported Linux distribution (i.e. recent releases of Fedora, + openSUSE, CentOS, Debian, or Ubuntu). For a list of Linux + distributions that support the Yocto Project, see the + :ref:`ref-manual/ref-system-requirements:supported linux distributions` + section in the Yocto Project Reference Manual. For detailed + information on preparing your build host, see the + :ref:`dev-manual/dev-manual-start:preparing the build host` + section in the Yocto Project Development Tasks Manual. + +- + + - Git 1.8.3.1 or greater + - tar 1.28 or greater + - Python 3.5.0 or greater. + - gcc 5.0 or greater. + +If your build host does not meet any of these three listed version +requirements, you can take steps to prepare the system so that you +can still use the Yocto Project. See the +:ref:`ref-manual/ref-system-requirements:required git, tar, python and gcc versions` +section in the Yocto Project Reference Manual for information. + +Build Host Packages +=================== + +You must install essential host packages on your build host. The +following command installs the host packages based on an Ubuntu +distribution: + +.. code-block:: shell + + $ sudo apt-get install &UBUNTU_HOST_PACKAGES_ESSENTIAL; + +.. note:: + + For host package requirements on all supported Linux distributions, + see the :ref:`ref-manual/ref-system-requirements:required packages for the build host` + section in the Yocto Project Reference Manual. + +Use Git to Clone Poky +===================== + +Once you complete the setup instructions for your machine, you need to +get a copy of the Poky repository on your build host. Use the following +commands to clone the Poky repository. + +.. code-block:: shell + + $ git clone git://git.yoctoproject.org/poky + Cloning into 'poky'... + remote: Counting + objects: 432160, done. remote: Compressing objects: 100% + (102056/102056), done. remote: Total 432160 (delta 323116), reused + 432037 (delta 323000) Receiving objects: 100% (432160/432160), 153.81 MiB | 8.54 MiB/s, done. + Resolving deltas: 100% (323116/323116), done. + Checking connectivity... done. + +Move to the ``poky`` directory and take a look at the tags: + +.. code-block:: shell + + $ cd poky + $ git fetch --tags + $ git tag + 1.1_M1.final + 1.1_M1.rc1 + 1.1_M1.rc2 + 1.1_M2.final + 1.1_M2.rc1 + . + . + . + yocto-2.5 + yocto-2.5.1 + yocto-2.5.2 + yocto-2.6 + yocto-2.6.1 + yocto-2.6.2 + yocto-2.7 + yocto_1.5_M5.rc8 + +For this example, check out the branch based on the +``&DISTRO_REL_TAG;`` release: + +.. code-block:: shell + + $ git checkout tags/&DISTRO_REL_TAG; -b my-&DISTRO_REL_TAG; + Switched to a new branch 'my-&DISTRO_REL_TAG;' + +The previous Git checkout command creates a local branch named +``my-&DISTRO_REL_TAG;``. The files available to you in that branch exactly +match the repository's files in the ``&DISTRO_NAME_NO_CAP;`` development +branch at the time of the Yocto Project &DISTRO_REL_TAG; release. + +For more options and information about accessing Yocto Project related +repositories, see the +:ref:`dev-manual/dev-manual-start:locating yocto project source files` +section in the Yocto Project Development Tasks Manual. + +Building Your Image +=================== + +Use the following steps to build your image. The build process creates +an entire Linux distribution, including the toolchain, from source. + +.. note:: + + - If you are working behind a firewall and your build host is not + set up for proxies, you could encounter problems with the build + process when fetching source code (e.g. fetcher failures or Git + failures). + + - If you do not know your proxy settings, consult your local network + infrastructure resources and get that information. A good starting + point could also be to check your web browser settings. Finally, + you can find more information on the + ":yocto_wiki:`Working Behind a Network Proxy `" + page of the Yocto Project Wiki. + +#. **Initialize the Build Environment:** From within the ``poky`` + directory, run the :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\`` + environment + setup script to define Yocto Project's build environment on your + build host. + + .. code-block:: shell + + $ cd ~/poky + $ source &OE_INIT_FILE; + You had no conf/local.conf file. This configuration file has therefore been + created for you with some default values. You may wish to edit it to, for + example, select a different MACHINE (target hardware). See conf/local.conf + for more information as common configuration options are commented. + + You had no conf/bblayers.conf file. This configuration file has therefore + been created for you with some default values. To add additional metadata + layers into your configuration please add entries to conf/bblayers.conf. + + The Yocto Project has extensive documentation about OE including a reference + manual which can be found at: + http://yoctoproject.org/documentation + + For more information about OpenEmbedded see their website: + http://www.openembedded.org/ + + ### Shell environment set up for builds. ### + + You can now run 'bitbake ' + + Common targets are: + core-image-minimal + core-image-sato + meta-toolchain + meta-ide-support + + You can also run generated qemu images with a command like 'runqemu qemux86-64' + + Among other things, the script creates the :term:`Build Directory`, which is + ``build`` in this case and is located in the :term:`Source Directory`. After + the script runs, your current working directory is set to the Build + Directory. Later, when the build completes, the Build Directory contains all the + files created during the build. + +#. **Examine Your Local Configuration File:** When you set up the build + environment, a local configuration file named ``local.conf`` becomes + available in a ``conf`` subdirectory of the Build Directory. For this + example, the defaults are set to build for a ``qemux86`` target, + which is suitable for emulation. The package manager used is set to + the RPM package manager. + + .. tip:: + + You can significantly speed up your build and guard against fetcher + failures by using mirrors. To use mirrors, add these lines to your + local.conf file in the Build directory: :: + + SSTATE_MIRRORS = "\ + file://.* http://sstate.yoctoproject.org/dev/PATH;downloadfilename=PATH \n \ + file://.* http://sstate.yoctoproject.org/&YOCTO_DOC_VERSION_MINUS_ONE;/PATH;downloadfilename=PATH \n \ + file://.* http://sstate.yoctoproject.org/&YOCTO_DOC_VERSION;/PATH;downloadfilename=PATH \n \ + " + + + The previous examples showed how to add sstate paths for Yocto Project + &YOCTO_DOC_VERSION_MINUS_ONE;, &YOCTO_DOC_VERSION;, and a development + area. For a complete index of sstate locations, see http://sstate.yoctoproject.org/. + +#. **Start the Build:** Continue with the following command to build an OS + image for the target, which is ``core-image-sato`` in this example: + + .. code-block:: shell + + $ bitbake core-image-sato + + For information on using the ``bitbake`` command, see the + :ref:`usingpoky-components-bitbake` section in the Yocto Project Overview and + Concepts Manual, or see the ":ref:`BitBake Command + `" section in the BitBake User Manual. + +#. **Simulate Your Image Using QEMU:** Once this particular image is + built, you can start QEMU, which is a Quick EMUlator that ships with + the Yocto Project: + + .. code-block:: shell + + $ runqemu qemux86-64 + + If you want to learn more about running QEMU, see the + :ref:`dev-manual/dev-manual-qemu:using the quick emulator (qemu)` chapter in + the Yocto Project Development Tasks Manual. + +#. **Exit QEMU:** Exit QEMU by either clicking on the shutdown icon or by typing + ``Ctrl-C`` in the QEMU transcript window from which you evoked QEMU. + +Customizing Your Build for Specific Hardware +============================================ + +So far, all you have done is quickly built an image suitable for +emulation only. This section shows you how to customize your build for +specific hardware by adding a hardware layer into the Yocto Project +development environment. + +In general, layers are repositories that contain related sets of +instructions and configurations that tell the Yocto Project what to do. +Isolating related metadata into functionally specific layers facilitates +modular development and makes it easier to reuse the layer metadata. + +.. note:: + + By convention, layer names start with the string "meta-". + +Follow these steps to add a hardware layer: + +#. **Find a Layer:** Lots of hardware layers exist. The Yocto Project + :yocto_git:`Source Repositories <>` has many hardware layers. + This example adds the + `meta-altera `__ hardware layer. + +#. **Clone the Layer:** Use Git to make a local copy of the layer on your + machine. You can put the copy in the top level of the copy of the + Poky repository created earlier: + + .. code-block:: shell + + $ cd ~/poky + $ git clone https://github.com/kraj/meta-altera.git + Cloning into 'meta-altera'... + remote: Counting objects: 25170, done. + remote: Compressing objects: 100% (350/350), done. + remote: Total 25170 (delta 645), reused 719 (delta 538), pack-reused 24219 + Receiving objects: 100% (25170/25170), 41.02 MiB | 1.64 MiB/s, done. + Resolving deltas: 100% (13385/13385), done. + Checking connectivity... done. + + The hardware layer now exists + with other layers inside the Poky reference repository on your build + host as ``meta-altera`` and contains all the metadata needed to + support hardware from Altera, which is owned by Intel. + + .. note:: + + It is recommended for layers to have a branch per Yocto Project release. + Please make sure to checkout the layer branch supporting the Yocto Project + release you're using. + +#. **Change the Configuration to Build for a Specific Machine:** The + :term:`MACHINE` variable in the + ``local.conf`` file specifies the machine for the build. For this + example, set the ``MACHINE`` variable to ``cyclone5``. These + configurations are used: + https://github.com/kraj/meta-altera/blob/master/conf/machine/cyclone5.conf. + + .. note:: + + See the "Examine Your Local Configuration File" step earlier for more + information on configuring the build. + +#. **Add Your Layer to the Layer Configuration File:** Before you can use + a layer during a build, you must add it to your ``bblayers.conf`` + file, which is found in the + :term:`Build Directory` ``conf`` + directory. + + Use the ``bitbake-layers add-layer`` command to add the layer to the + configuration file: + + .. code-block:: shell + + $ cd ~/poky/build + $ bitbake-layers add-layer ../meta-altera + NOTE: Starting bitbake server... + Parsing recipes: 100% |##################################################################| Time: 0:00:32 + Parsing of 918 .bb files complete (0 cached, 918 parsed). 1401 targets, + 123 skipped, 0 masked, 0 errors. + + You can find + more information on adding layers in the + :ref:`dev-manual/dev-manual-common-tasks:adding a layer using the \`\`bitbake-layers\`\` script` + section. + +Completing these steps has added the ``meta-altera`` layer to your Yocto +Project development environment and configured it to build for the +``cyclone5`` machine. + +.. note:: + + The previous steps are for demonstration purposes only. If you were + to attempt to build an image for the ``cyclone5`` machine, you should + read the Altera ``README``. + +Creating Your Own General Layer +=============================== + +Maybe you have an application or specific set of behaviors you need to +isolate. You can create your own general layer using the +``bitbake-layers create-layer`` command. The tool automates layer +creation by setting up a subdirectory with a ``layer.conf`` +configuration file, a ``recipes-example`` subdirectory that contains an +``example.bb`` recipe, a licensing file, and a ``README``. + +The following commands run the tool to create a layer named +``meta-mylayer`` in the ``poky`` directory: + +.. code-block:: shell + + $ cd ~/poky + $ bitbake-layers create-layer meta-mylayer + NOTE: Starting bitbake server... + Add your new layer with 'bitbake-layers add-layer meta-mylayer' + +For more information +on layers and how to create them, see the +:ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script` +section in the Yocto Project Development Tasks Manual. + +Where To Go Next +================ + +Now that you have experienced using the Yocto Project, you might be +asking yourself "What now?". The Yocto Project has many sources of +information including the website, wiki pages, and user manuals: + +- **Website:** The :yocto_home:`Yocto Project Website <>` provides + background information, the latest builds, breaking news, full + development documentation, and access to a rich Yocto Project + Development Community into which you can tap. + +- **Developer Screencast:** The `Getting Started with the Yocto Project - + New Developer Screencast Tutorial `__ + provides a 30-minute video created for users unfamiliar with the + Yocto Project but familiar with Linux build hosts. While this + screencast is somewhat dated, the introductory and fundamental + concepts are useful for the beginner. + +- **Yocto Project Overview and Concepts Manual:** The + :doc:`../overview-manual/overview-manual` is a great + place to start to learn about the Yocto Project. This manual + introduces you to the Yocto Project and its development environment. + The manual also provides conceptual information for various aspects + of the Yocto Project. + +- **Yocto Project Wiki:** The :yocto_wiki:`Yocto Project Wiki <>` + provides additional information on where to go next when ramping up + with the Yocto Project, release information, project planning, and QA + information. + +- **Yocto Project Mailing Lists:** Related mailing lists provide a forum + for discussion, patch submission and announcements. Several mailing + lists exist and are grouped according to areas of concern. See the + :ref:`ref-manual/resources:mailing lists` + section in the Yocto Project Reference Manual for a complete list of + Yocto Project mailing lists. + +- **Comprehensive List of Links and Other Documentation:** The + :ref:`ref-manual/resources:links and related documentation` + section in the Yocto Project Reference Manual provides a + comprehensive list of all related links and other user documentation. + +.. include:: /boilerplate.rst diff --git a/poky/documentation/bsp-guide/bsp-guide.rst b/poky/documentation/bsp-guide/bsp-guide.rst new file mode 100644 index 000000000..435a399d5 --- /dev/null +++ b/poky/documentation/bsp-guide/bsp-guide.rst @@ -0,0 +1,16 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +===================================================== +Yocto Project Board Support Package Developer's Guide +===================================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + bsp + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/bsp-guide/bsp.rst b/poky/documentation/bsp-guide/bsp.rst new file mode 100644 index 000000000..024a240c2 --- /dev/null +++ b/poky/documentation/bsp-guide/bsp.rst @@ -0,0 +1,1527 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************************************ +Board Support Packages (BSP) - Developer's Guide +************************************************ + +A Board Support Package (BSP) is a collection of information that +defines how to support a particular hardware device, set of devices, or +hardware platform. The BSP includes information about the hardware +features present on the device and kernel configuration information +along with any additional hardware drivers required. The BSP also lists +any additional software components required in addition to a generic +Linux software stack for both essential and optional platform features. + +This guide presents information about BSP layers, defines a structure +for components so that BSPs follow a commonly understood layout, +discusses how to customize a recipe for a BSP, addresses BSP licensing, +and provides information that shows you how to create a BSP +Layer using the :ref:`bitbake-layers ` +tool. + +BSP Layers +========== + +A BSP consists of a file structure inside a base directory. +Collectively, you can think of the base directory, its file structure, +and the contents as a BSP layer. Although not a strict requirement, BSP +layers in the Yocto Project use the following well-established naming +convention: :: + + meta-bsp_root_name + +The string "meta-" is prepended to the +machine or platform name, which is bsp_root_name in the above form. + +.. note:: + + Because the BSP layer naming convention is well-established, it is + advisable to follow it when creating layers. Technically speaking, a + BSP layer name does not need to start with + meta-. However, various scripts and tools in the Yocto Project development + environment assume this convention. + +To help understand the BSP layer concept, consider the BSPs that the +Yocto Project supports and provides with each release. You can see the +layers in the +:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories` +through +a web interface at :yocto_git:`/`. If you go to that interface, +you will find a list of repositories under "Yocto Metadata Layers". + +.. note:: + + Layers that are no longer actively supported as part of the Yocto + Project appear under the heading "Yocto Metadata Layer Archive." + +Each repository is a BSP layer supported by the Yocto Project (e.g. +``meta-raspberrypi`` and ``meta-intel``). Each of these layers is a +repository unto itself and clicking on the layer name displays two URLs +from which you can clone the layer's repository to your local system. +Here is an example that clones the Raspberry Pi BSP layer: :: + + $ git clone git://git.yoctoproject.org/meta-raspberrypi + +In addition to BSP layers, the ``meta-yocto-bsp`` layer is part of the +shipped ``poky`` repository. The ``meta-yocto-bsp`` layer maintains +several "reference" BSPs including the ARM-based Beaglebone, MIPS-based +EdgeRouter, and generic versions of both 32-bit and 64-bit IA machines. + +For information on typical BSP development workflow, see the +:ref:`bsp-guide/bsp:developing a board support package (bsp)` +section. For more +information on how to set up a local copy of source files from a Git +repository, see the +:ref:`dev-manual/dev-manual-start:locating yocto project source files` +section in the Yocto Project Development Tasks Manual. + +The BSP layer's base directory (``meta-bsp_root_name``) is the root +directory of that Layer. This directory is what you add to the +:term:`BBLAYERS` variable in the +``conf/bblayers.conf`` file found in your +:term:`Build Directory`, which is +established after you run the OpenEmbedded build environment setup +script (i.e. :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\`` ). +Adding the root directory allows the :term:`OpenEmbedded Build System` +to recognize the BSP +layer and from it build an image. Here is an example: :: + + BBLAYERS ?= " \ + /usr/local/src/yocto/meta \ + /usr/local/src/yocto/meta-poky \ + /usr/local/src/yocto/meta-yocto-bsp \ + /usr/local/src/yocto/meta-mylayer \ + " + +.. note:: + + Ordering and ``BBFILE_PRIORITY`` for the layers listed in BBLAYERS matter. For + example, if multiple layers define a machine configuration, the OpenEmbedded + build system uses the last layer searched given similar layer priorities. The + build system works from the top-down through the layers listed in ``BBLAYERS``. + +Some BSPs require or depend on additional layers beyond the BSP's root +layer in order to be functional. In this case, you need to specify these +layers in the ``README`` "Dependencies" section of the BSP's root layer. +Additionally, if any build instructions exist for the BSP, you must add +them to the "Dependencies" section. + +Some layers function as a layer to hold other BSP layers. These layers +are knows as ":term:`container layers `". An example of +this type of layer is OpenEmbedded's +`meta-openembedded `__ +layer. The ``meta-openembedded`` layer contains many ``meta-*`` layers. +In cases like this, you need to include the names of the actual layers +you want to work with, such as: :: + + BBLAYERS ?= " \ + /usr/local/src/yocto/meta \ + /usr/local/src/yocto/meta-poky \ + /usr/local/src/yocto/meta-yocto-bsp \ + /usr/local/src/yocto/meta-mylayer \ + .../meta-openembedded/meta-oe \ + .../meta-openembedded/meta-perl \ + .../meta-openembedded/meta-networking \ + " + +and so on. + +For more information on layers, see the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section of the Yocto Project Development Tasks Manual. + +Preparing Your Build Host to Work With BSP Layers +================================================= + +This section describes how to get your build host ready to work with BSP +layers. Once you have the host set up, you can create the layer as +described in the +":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`" +section. + +.. note:: + + For structural information on BSPs, see the Example Filesystem Layout + section. + +#. *Set Up the Build Environment:* Be sure you are set up to use BitBake + in a shell. See the ":ref:`dev-manual/dev-manual-start:preparing the build host`" + section in the Yocto Project Development Tasks Manual for information on how + to get a build host ready that is either a native Linux machine or a machine + that uses CROPS. + +#. *Clone the ``poky`` Repository:* You need to have a local copy of the + Yocto Project :term:`Source Directory` (i.e. a local + ``poky`` repository). See the + "ref:`dev-manual/dev-manual-start:cloning the ``poky`` repository`" and + possibly the + ":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" or + ":ref:`dev-manual/dev-manual-start:checking out by tag in poky`" + sections + all in the Yocto Project Development Tasks Manual for information on + how to clone the ``poky`` repository and check out the appropriate + branch for your work. + +#. *Determine the BSP Layer You Want:* The Yocto Project supports many + BSPs, which are maintained in their own layers or in layers designed + to contain several BSPs. To get an idea of machine support through + BSP layers, you can look at the `index of + machines <&YOCTO_RELEASE_DL_URL;/machines>`__ for the release. + +#. *Optionally Clone the ``meta-intel`` BSP Layer:* If your hardware is + based on current Intel CPUs and devices, you can leverage this BSP + layer. For details on the ``meta-intel`` BSP layer, see the layer's + `README `__ + file. + + #. *Navigate to Your Source Directory:* Typically, you set up the + ``meta-intel`` Git repository inside the :term:`Source Directory` (e.g. + ``poky``). :: + + $ cd /home/you/poky + + #. *Clone the Layer:* :: + + $ git clone git://git.yoctoproject.org/meta-intel.git + Cloning into 'meta-intel'... + remote: Counting objects: 15585, done. + remote: Compressing objects: 100% (5056/5056), done. + remote: Total 15585 (delta 9123), reused 15329 (delta 8867) + Receiving objects: 100% (15585/15585), 4.51 MiB | 3.19 MiB/s, done. + Resolving deltas: 100% (9123/9123), done. + Checking connectivity... done. + + #. *Check Out the Proper Branch:* The branch you check out for + ``meta-intel`` must match the same branch you are using for the + Yocto Project release (e.g. &DISTRO_NAME_NO_CAP;): :: + + $ cd meta-intel + $ git checkout -b &DISTRO_NAME_NO_CAP; remotes/origin/&DISTRO_NAME_NO_CAP; + Branch &DISTRO_NAME_NO_CAP; set up to track remote branch + &DISTRO_NAME_NO_CAP; from origin. + Switched to a new branch '&DISTRO_NAME_NO_CAP;' + + .. note:: + + To see the available branch names in a cloned repository, use the ``git + branch -al`` command. See the + ":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" + section in the Yocto Project Development Tasks Manual for more + information. + +#. *Optionally Set Up an Alternative BSP Layer:* If your hardware can be + more closely leveraged to an existing BSP not within the + ``meta-intel`` BSP layer, you can clone that BSP layer. + + The process is identical to the process used for the ``meta-intel`` + layer except for the layer's name. For example, if you determine that + your hardware most closely matches the ``meta-raspberrypi``, clone + that layer: :: + + $ git clone git://git.yoctoproject.org/meta-raspberrypi + Cloning into 'meta-raspberrypi'... + remote: Counting objects: 4743, done. + remote: Compressing objects: 100% (2185/2185), done. + remote: Total 4743 (delta 2447), reused 4496 (delta 2258) + Receiving objects: 100% (4743/4743), 1.18 MiB | 0 bytes/s, done. + Resolving deltas: 100% (2447/2447), done. + Checking connectivity... done. + +#. *Initialize the Build Environment:* While in the root directory of + the Source Directory (i.e. ``poky``), run the + :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\`` environment + setup script to define the OpenEmbedded build environment on your + build host. :: + + $ source &OE_INIT_FILE; + + Among other things, the script creates the :term:`Build Directory`, which is + ``build`` in this case and is located in the :term:`Source Directory`. After + the script runs, your current working directory is set to the ``build`` + directory. + +.. _bsp-filelayout: + +Example Filesystem Layout +========================= + +Defining a common BSP directory structure allows end-users to understand +and become familiar with that standard. A common format also encourages +standardization of software support for hardware. + +The proposed form described in this section does have elements that are +specific to the OpenEmbedded build system. It is intended that +developers can use this structure with other build systems besides the +OpenEmbedded build system. It is also intended that it will be be simple +to extract information and convert it to other formats if required. The +OpenEmbedded build system, through its standard :ref:`layers mechanism +`, can +directly accept the format described as a layer. The BSP layer captures +all the hardware-specific details in one place using a standard format, +which is useful for any person wishing to use the hardware platform +regardless of the build system they are using. + +The BSP specification does not include a build system or other tools - +the specification is concerned with the hardware-specific components +only. At the end-distribution point, you can ship the BSP layer combined +with a build system and other tools. Realize that it is important to +maintain the distinction that the BSP layer, a build system, and tools +are separate components that could be combined in certain end products. + +Before looking at the recommended form for the directory structure +inside a BSP layer, you should be aware that some requirements do exist +in order for a BSP layer to be considered compliant with the Yocto +Project. For that list of requirements, see the +":ref:`bsp-guide/bsp:released bsp requirements`" section. + +Below is the typical directory structure for a BSP layer. While this +basic form represents the standard, realize that the actual layout for +individual BSPs could differ. :: + + meta-bsp_root_name/ + meta-bsp_root_name/bsp_license_file + meta-bsp_root_name/README + meta-bsp_root_name/README.sources + meta-bsp_root_name/binary/bootable_images + meta-bsp_root_name/conf/layer.conf + meta-bsp_root_name/conf/machine/*.conf + meta-bsp_root_name/recipes-bsp/* + meta-bsp_root_name/recipes-core/* + meta-bsp_root_name/recipes-graphics/* + meta-bsp_root_name/recipes-kernel/linux/linux-yocto_kernel_rev.bbappend + +Below is an example of the Raspberry Pi BSP layer that is available from +the :yocto_git:`Source Respositories <>`: :: + + meta-raspberrypi/COPYING.MIT + meta-raspberrypi/README.md + meta-raspberrypi/classes + meta-raspberrypi/classes/sdcard_image-rpi.bbclass + meta-raspberrypi/conf/ + meta-raspberrypi/conf/layer.conf + meta-raspberrypi/conf/machine/ + meta-raspberrypi/conf/machine/raspberrypi-cm.conf + meta-raspberrypi/conf/machine/raspberrypi-cm3.conf + meta-raspberrypi/conf/machine/raspberrypi.conf + meta-raspberrypi/conf/machine/raspberrypi0-wifi.conf + meta-raspberrypi/conf/machine/raspberrypi0.conf + meta-raspberrypi/conf/machine/raspberrypi2.conf + meta-raspberrypi/conf/machine/raspberrypi3-64.conf + meta-raspberrypi/conf/machine/raspberrypi3.conf + meta-raspberrypi/conf/machine/include + meta-raspberrypi/conf/machine/include/rpi-base.inc + meta-raspberrypi/conf/machine/include/rpi-default-providers.inc + meta-raspberrypi/conf/machine/include/rpi-default-settings.inc + meta-raspberrypi/conf/machine/include/rpi-default-versions.inc + meta-raspberrypi/conf/machine/include/tune-arm1176jzf-s.inc + meta-raspberrypi/docs + meta-raspberrypi/docs/Makefile + meta-raspberrypi/docs/conf.py + meta-raspberrypi/docs/contributing.md + meta-raspberrypi/docs/extra-apps.md + meta-raspberrypi/docs/extra-build-config.md + meta-raspberrypi/docs/index.rst + meta-raspberrypi/docs/layer-contents.md + meta-raspberrypi/docs/readme.md + meta-raspberrypi/files + meta-raspberrypi/files/custom-licenses + meta-raspberrypi/files/custom-licenses/Broadcom + meta-raspberrypi/recipes-bsp + meta-raspberrypi/recipes-bsp/bootfiles + meta-raspberrypi/recipes-bsp/bootfiles/bcm2835-bootfiles.bb + meta-raspberrypi/recipes-bsp/bootfiles/rpi-config_git.bb + meta-raspberrypi/recipes-bsp/common + meta-raspberrypi/recipes-bsp/common/firmware.inc + meta-raspberrypi/recipes-bsp/formfactor + meta-raspberrypi/recipes-bsp/formfactor/formfactor + meta-raspberrypi/recipes-bsp/formfactor/formfactor/raspberrypi + meta-raspberrypi/recipes-bsp/formfactor/formfactor/raspberrypi/machconfig + meta-raspberrypi/recipes-bsp/formfactor/formfactor_0.0.bbappend + meta-raspberrypi/recipes-bsp/rpi-u-boot-src + meta-raspberrypi/recipes-bsp/rpi-u-boot-src/files + meta-raspberrypi/recipes-bsp/rpi-u-boot-src/files/boot.cmd.in + meta-raspberrypi/recipes-bsp/rpi-u-boot-src/rpi-u-boot-scr.bb + meta-raspberrypi/recipes-bsp/u-boot + meta-raspberrypi/recipes-bsp/u-boot/u-boot + meta-raspberrypi/recipes-bsp/u-boot/u-boot/*.patch + meta-raspberrypi/recipes-bsp/u-boot/u-boot_%.bbappend + meta-raspberrypi/recipes-connectivity + meta-raspberrypi/recipes-connectivity/bluez5 + meta-raspberrypi/recipes-connectivity/bluez5/bluez5 + meta-raspberrypi/recipes-connectivity/bluez5/bluez5/*.patch + meta-raspberrypi/recipes-connectivity/bluez5/bluez5/BCM43430A1.hcd + meta-raspberrypi/recipes-connectivity/bluez5/bluez5brcm43438.service + meta-raspberrypi/recipes-connectivity/bluez5/bluez5_%.bbappend + meta-raspberrypi/recipes-core + meta-raspberrypi/recipes-core/images + meta-raspberrypi/recipes-core/images/rpi-basic-image.bb + meta-raspberrypi/recipes-core/images/rpi-hwup-image.bb + meta-raspberrypi/recipes-core/images/rpi-test-image.bb + meta-raspberrypi/recipes-core/packagegroups + meta-raspberrypi/recipes-core/packagegroups/packagegroup-rpi-test.bb + meta-raspberrypi/recipes-core/psplash + meta-raspberrypi/recipes-core/psplash/files + meta-raspberrypi/recipes-core/psplash/files/psplash-raspberrypi-img.h + meta-raspberrypi/recipes-core/psplash/psplash_git.bbappend + meta-raspberrypi/recipes-core/udev + meta-raspberrypi/recipes-core/udev/udev-rules-rpi + meta-raspberrypi/recipes-core/udev/udev-rules-rpi/99-com.rules + meta-raspberrypi/recipes-core/udev/udev-rules-rpi.bb + meta-raspberrypi/recipes-devtools + meta-raspberrypi/recipes-devtools/bcm2835 + meta-raspberrypi/recipes-devtools/bcm2835/bcm2835_1.52.bb + meta-raspberrypi/recipes-devtools/pi-blaster + meta-raspberrypi/recipes-devtools/pi-blaster/files + meta-raspberrypi/recipes-devtools/pi-blaster/files/*.patch + meta-raspberrypi/recipes-devtools/pi-blaster/pi-blaster_git.bb + meta-raspberrypi/recipes-devtools/python + meta-raspberrypi/recipes-devtools/python/python-rtimu + meta-raspberrypi/recipes-devtools/python/python-rtimu/*.patch + meta-raspberrypi/recipes-devtools/python/python-rtimu_git.bb + meta-raspberrypi/recipes-devtools/python/python-sense-hat_2.2.0.bb + meta-raspberrypi/recipes-devtools/python/rpi-gpio + meta-raspberrypi/recipes-devtools/python/rpi-gpio/*.patch + meta-raspberrypi/recipes-devtools/python/rpi-gpio_0.6.3.bb + meta-raspberrypi/recipes-devtools/python/rpio + meta-raspberrypi/recipes-devtools/python/rpio/*.patch + meta-raspberrypi/recipes-devtools/python/rpio_0.10.0.bb + meta-raspberrypi/recipes-devtools/wiringPi + meta-raspberrypi/recipes-devtools/wiringPi/files + meta-raspberrypi/recipes-devtools/wiringPi/files/*.patch + meta-raspberrypi/recipes-devtools/wiringPi/wiringpi_git.bb + meta-raspberrypi/recipes-graphics + meta-raspberrypi/recipes-graphics/eglinfo + meta-raspberrypi/recipes-graphics/eglinfo/eglinfo-fb_%.bbappend + meta-raspberrypi/recipes-graphics/eglinfo/eglinfo-x11_%.bbappend + meta-raspberrypi/recipes-graphics/mesa + meta-raspberrypi/recipes-graphics/mesa/mesa-gl_%.bbappend + meta-raspberrypi/recipes-graphics/mesa/mesa_%.bbappend + meta-raspberrypi/recipes-graphics/userland + meta-raspberrypi/recipes-graphics/userland/userland + meta-raspberrypi/recipes-graphics/userland/userland/*.patch + meta-raspberrypi/recipes-graphics/userland/userland_git.bb + meta-raspberrypi/recipes-graphics/vc-graphics + meta-raspberrypi/recipes-graphics/vc-graphics/files + meta-raspberrypi/recipes-graphics/vc-graphics/files/egl.pc + meta-raspberrypi/recipes-graphics/vc-graphics/files/vchiq.sh + meta-raspberrypi/recipes-graphics/vc-graphics/vc-graphics-hardfp.bb + meta-raspberrypi/recipes-graphics/vc-graphics/vc-graphics.bb + meta-raspberrypi/recipes-graphics/vc-graphics/vc-graphics.inc + meta-raspberrypi/recipes-graphics/wayland + meta-raspberrypi/recipes-graphics/wayland/weston_%.bbappend + meta-raspberrypi/recipes-graphics/xorg-xserver + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi/xorg.conf + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi/xorg.conf.d + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi/xorg.conf.d/10-evdev.conf + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi/xorg.conf.d/98-pitft.conf + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config/rpi/xorg.conf.d/99-calibration.conf + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xf86-config_0.1.bbappend + meta-raspberrypi/recipes-graphics/xorg-xserver/xserver-xorg_%.bbappend + meta-raspberrypi/recipes-kernel + meta-raspberrypi/recipes-kernel/linux-firmware + meta-raspberrypi/recipes-kernel/linux-firmware/files + meta-raspberrypi/recipes-kernel/linux-firmware/files/brcmfmac43430-sdio.bin + meta-raspberrypi/recipes-kernel/linux-firmware/files/brcfmac43430-sdio.txt + meta-raspberrypi/recipes-kernel/linux-firmware/linux-firmware_%.bbappend + meta-raspberrypi/recipes-kernel/linux + meta-raspberrypi/recipes-kernel/linux/linux-raspberrypi-dev.bb + meta-raspberrypi/recipes-kernel/linux/linux-raspberrypi.inc + meta-raspberrypi/recipes-kernel/linux/linux-raspberrypi_4.14.bb + meta-raspberrypi/recipes-kernel/linux/linux-raspberrypi_4.9.bb + meta-raspberrypi/recipes-multimedia + meta-raspberrypi/recipes-multimedia/gstreamer + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-omx + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-omx/*.patch + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-omx_%.bbappend + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-plugins-bad_%.bbappend + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-omx-1.12 + meta-raspberrypi/recipes-multimedia/gstreamer/gstreamer1.0-omx-1.12/*.patch + meta-raspberrypi/recipes-multimedia/omxplayer + meta-raspberrypi/recipes-multimedia/omxplayer/omxplayer + meta-raspberrypi/recipes-multimedia/omxplayer/omxplayer/*.patch + meta-raspberrypi/recipes-multimedia/omxplayer/omxplayer_git.bb + meta-raspberrypi/recipes-multimedia/x264 + meta-raspberrypi/recipes-multimedia/x264/x264_git.bbappend + meta-raspberrypi/wic meta-raspberrypi/wic/sdimage-raspberrypi.wks + +The following sections describe each part of the proposed BSP format. + +.. _bsp-filelayout-license: + +License Files +------------- + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/bsp_license_file + +These optional files satisfy licensing requirements for the BSP. The +type or types of files here can vary depending on the licensing +requirements. For example, in the Raspberry Pi BSP, all licensing +requirements are handled with the ``COPYING.MIT`` file. + +Licensing files can be MIT, BSD, GPLv*, and so forth. These files are +recommended for the BSP but are optional and totally up to the BSP +developer. For information on how to maintain license compliance, see +the ":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. + +.. _bsp-filelayout-readme: + +README File +----------- + +You can find this file in the BSP Layer at: :: + + meta-bsp_root_name/README + +This file provides information on how to boot the live images that are +optionally included in the ``binary/`` directory. The ``README`` file +also provides information needed for building the image. + +At a minimum, the ``README`` file must contain a list of dependencies, +such as the names of any other layers on which the BSP depends and the +name of the BSP maintainer with his or her contact information. + +.. _bsp-filelayout-readme-sources: + +README.sources File +------------------- + +You can find this file in the BSP Layer at: :: + + meta-bsp_root_name/README.sources + +This file provides information on where to locate the BSP source files +used to build the images (if any) that reside in +``meta-bsp_root_name/binary``. Images in the ``binary`` would be images +released with the BSP. The information in the ``README.sources`` file +also helps you find the :term:`Metadata` +used to generate the images that ship with the BSP. + +.. note:: + + If the BSP's ``binary`` directory is missing or the directory has no images, an + existing ``README.sources`` file is meaningless and usually does not exist. + +.. _bsp-filelayout-binary: + +Pre-built User Binaries +----------------------- + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/binary/bootable_images + +This optional area contains useful pre-built kernels and user-space +filesystem images released with the BSP that are appropriate to the +target system. This directory typically contains graphical (e.g. Sato) +and minimal live images when the BSP tarball has been created and made +available in the :yocto_home:`Yocto Project <>` website. You can +use these kernels and images to get a system running and quickly get +started on development tasks. + +The exact types of binaries present are highly hardware-dependent. The +:ref:`README ` file should be present in the +BSP Layer and it explains how to use the images with the target +hardware. Additionally, the +:ref:`README.sources ` file should be +present to locate the sources used to build the images and provide +information on the Metadata. + +.. _bsp-filelayout-layer: + +Layer Configuration File +------------------------ + +You can find this file in the BSP Layer at: :: + + meta-bsp_root_name/conf/layer.conf + +The ``conf/layer.conf`` file identifies the file structure as a layer, +identifies the contents of the layer, and contains information about how +the build system should use it. Generally, a standard boilerplate file +such as the following works. In the following example, you would replace +bsp with the actual name of the BSP (i.e. bsp_root_name from the example +template). :: + + # We have a conf and classes directory, add to BBPATH + BBPATH .= ":${LAYERDIR}" + + # We have a recipes directory, add to BBFILES + BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + + BBFILE_COLLECTIONS += "bsp" + BBFILE_PATTERN_bsp = "^${LAYERDIR}/" + BBFILE_PRIORITY_bsp = "6" + LAYERDEPENDS_bsp = "intel" + +To illustrate the string substitutions, here are the corresponding +statements from the Raspberry Pi ``conf/layer.conf`` file: :: + + # We have a conf and classes directory, append to BBPATH + BBPATH .= ":${LAYERDIR}" + + # We have a recipes directory containing .bb and .bbappend files, add to BBFILES + BBFILES += "${LAYERDIR}/recipes*/*/*.bb \ + ${LAYERDIR}/recipes*/*/*.bbappend" + + BBFILE_COLLECTIONS += "raspberrypi" + BBFILE_PATTERN_raspberrypi := "^${LAYERDIR}/" + BBFILE_PRIORITY_raspberrypi = "9" + + # Additional license directories. + LICENSE_PATH += "${LAYERDIR}/files/custom-licenses" + . + . + . + +This file simply makes :term:`BitBake` aware of the recipes and configuration +directories. The file must exist so that the OpenEmbedded build system can +recognize the BSP. + +.. _bsp-filelayout-machine: + +Hardware Configuration Options +------------------------------ + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/conf/machine/*.conf + +The machine files bind together all the information contained elsewhere +in the BSP into a format that the build system can understand. Each BSP +Layer requires at least one machine file. If the BSP supports multiple +machines, multiple machine configuration files can exist. These +filenames correspond to the values to which users have set the +:term:`MACHINE` variable. + +These files define things such as the kernel package to use +(:term:`PREFERRED_PROVIDER` of +:ref:`virtual/kernel `), +the hardware drivers to include in different types of images, any +special software components that are needed, any bootloader information, +and also any special image format requirements. + +This configuration file could also include a hardware "tuning" file that +is commonly used to define the package architecture and specify +optimization flags, which are carefully chosen to give best performance +on a given processor. + +Tuning files are found in the ``meta/conf/machine/include`` directory +within the :term:`Source Directory`. +For example, many ``tune-*`` files (e.g. ``tune-arm1136jf-s.inc``, +``tune-1586-nlp.inc``, and so forth) reside in the +``poky/meta/conf/machine/include`` directory. + +To use an include file, you simply include them in the machine +configuration file. For example, the Raspberry Pi BSP +``raspberrypi3.conf`` contains the following statement: :: + + include conf/machine/include/rpi-base.inc + +.. _bsp-filelayout-misc-recipes: + +Miscellaneous BSP-Specific Recipe Files +--------------------------------------- + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/recipes-bsp/* + +This optional directory contains miscellaneous recipe files for the BSP. +Most notably would be the formfactor files. For example, in the +Raspberry Pi BSP, there is the ``formfactor_0.0.bbappend`` file, which +is an append file used to augment the recipe that starts the build. +Furthermore, there are machine-specific settings used during the build +that are defined by the ``machconfig`` file further down in the +directory. Here is the ``machconfig`` file for the Raspberry Pi BSP: :: + + HAVE_TOUCHSCREEN=0 + HAVE_KEYBOARD=1 + + DISPLAY_CAN_ROTATE=0 + DISPLAY_ORIENTATION=0 + DISPLAY_DPI=133 + +.. note:: + + If a BSP does not have a formfactor entry, defaults are established + according to the formfactor configuration file that is installed by + the main formfactor recipe + ``meta/recipes-bsp/formfactor/formfactor_0.0.bb``, which is found in + the :term:`Source Directory`. + +.. _bsp-filelayout-recipes-graphics: + +Display Support Files +--------------------- + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/recipes-graphics/* + +This optional directory contains recipes for the BSP if it has special +requirements for graphics support. All files that are needed for the BSP +to support a display are kept here. + +.. _bsp-filelayout-kernel: + +Linux Kernel Configuration +-------------------------- + +You can find these files in the BSP Layer at: :: + + meta-bsp_root_name/recipes-kernel/linux/linux*.bbappend + meta-bsp_root_name/recipes-kernel/linux/*.bb + +Append files (``*.bbappend``) modify the main kernel recipe being used +to build the image. The ``*.bb`` files would be a developer-supplied +kernel recipe. This area of the BSP hierarchy can contain both these +types of files although, in practice, it is likely that you would have +one or the other. + +For your BSP, you typically want to use an existing Yocto Project kernel +recipe found in the :term:`Source Directory` +at +``meta/recipes-kernel/linux``. You can append machine-specific changes +to the kernel recipe by using a similarly named append file, which is +located in the BSP Layer for your target device (e.g. the +``meta-bsp_root_name/recipes-kernel/linux`` directory). + +Suppose you are using the ``linux-yocto_4.4.bb`` recipe to build the +kernel. In other words, you have selected the kernel in your +bsp_root_name\ ``.conf`` file by adding +:term:`PREFERRED_PROVIDER` and :term:`PREFERRED_VERSION` +statements as follows: :: + + PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" + PREFERRED_VERSION_linux-yocto ?= "4.4%" + +.. note:: + + When the preferred provider is assumed by default, the ``PREFERRED_PROVIDER`` + statement does not appear in the ``bsp_root_name`` .conf file. + +You would use the ``linux-yocto_4.4.bbappend`` file to append specific +BSP settings to the kernel, thus configuring the kernel for your +particular BSP. + +You can find more information on what your append file should contain in +the ":ref:`kernel-dev/kernel-dev-common:creating the append file`" section +in the Yocto Project Linux Kernel Development Manual. + +An alternate scenario is when you create your own kernel recipe for the +BSP. A good example of this is the Raspberry Pi BSP. If you examine the +``recipes-kernel/linux`` directory you see the following: :: + + linux-raspberrypi-dev.bb + linux-raspberrypi.inc + linux-raspberrypi_4.14.bb + linux-raspberrypi_4.9.bb + +The directory contains three kernel recipes and a common include file. + +Developing a Board Support Package (BSP) +======================================== + +This section describes the high-level procedure you can follow to create +a BSP. Although not required for BSP creation, the ``meta-intel`` +repository, which contains many BSPs supported by the Yocto Project, is +part of the example. + +For an example that shows how to create a new layer using the tools, see +the ":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`" +section. + +The following illustration and list summarize the BSP creation general +workflow. + +.. image:: figures/bsp-dev-flow.png + :align: center + +#. *Set up Your Host Development System to Support Development Using the + Yocto Project*: See the ":ref:`dev-manual/dev-manual-start:preparing the build host`" + section in the Yocto Project Development Tasks Manual for options on how to + get a system ready to use the Yocto Project. + +#. *Establish the meta-intel Repository on Your System:* Having + local copies of these supported BSP layers on your system gives you + access to layers you might be able to leverage when creating your + BSP. For information on how to get these files, see the + ":ref:`bsp-guide/bsp:preparing your build host to work with bsp layers`" + section. + +#. *Create Your Own BSP Layer Using the bitbake-layers Script:* + Layers are ideal for isolating and storing work for a given piece of + hardware. A layer is really just a location or area in which you + place the recipes and configurations for your BSP. In fact, a BSP is, + in itself, a special type of layer. The simplest way to create a new + BSP layer that is compliant with the Yocto Project is to use the + ``bitbake-layers`` script. For information about that script, see the + ":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`" + section. + + Another example that illustrates a layer is an application. Suppose + you are creating an application that has library or other + dependencies in order for it to compile and run. The layer, in this + case, would be where all the recipes that define those dependencies + are kept. The key point for a layer is that it is an isolated area + that contains all the relevant information for the project that the + OpenEmbedded build system knows about. For more information on + layers, see the ":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`" + section in the Yocto Project Overview and Concepts Manual. You can also + reference the ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" + section in the Yocto Project Development Tasks Manual. For more + information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`" + section. + + .. note:: + + - Five hardware reference BSPs exist that are part of the Yocto + Project release and are located in the ``poky/meta-yocto-bsp`` + BSP layer: + + - Texas Instruments Beaglebone (``beaglebone-yocto``) + + - Ubiquiti Networks EdgeRouter Lite (``edgerouter``) + + - Two general IA platforms (``genericx86`` and ``genericx86-64``) + + - Three core Intel BSPs exist as part of the Yocto Project + release in the ``meta-intel`` layer: + + - ``intel-core2-32``, which is a BSP optimized for the Core2 + family of CPUs as well as all CPUs prior to the Silvermont + core. + + - ``intel-corei7-64``, which is a BSP optimized for Nehalem + and later Core and Xeon CPUs as well as Silvermont and later + Atom CPUs, such as the Baytrail SoCs. + + - ``intel-quark``, which is a BSP optimized for the Intel + Galileo gen1 & gen2 development boards. + + When you set up a layer for a new BSP, you should follow a standard + layout. This layout is described in the ":ref:`bsp-guide/bsp:example filesystem layout`" + section. In the standard layout, notice + the suggested structure for recipes and configuration information. + You can see the standard layout for a BSP by examining any supported + BSP found in the ``meta-intel`` layer inside the Source Directory. + +#. *Make Configuration Changes to Your New BSP Layer:* The standard BSP + layer structure organizes the files you need to edit in ``conf`` and + several ``recipes-*`` directories within the BSP layer. Configuration + changes identify where your new layer is on the local system and + identifies the kernel you are going to use. When you run the + ``bitbake-layers`` script, you are able to interactively configure + many things for the BSP (e.g. keyboard, touchscreen, and so forth). + +#. *Make Recipe Changes to Your New BSP Layer:* Recipe changes include + altering recipes (``*.bb`` files), removing recipes you do not use, + and adding new recipes or append files (``.bbappend``) that support + your hardware. + +#. *Prepare for the Build:* Once you have made all the changes to your + BSP layer, there remains a few things you need to do for the + OpenEmbedded build system in order for it to create your image. You + need to get the build environment ready by sourcing an environment + setup script (i.e. ``oe-init-build-env``) and you need to be sure two + key configuration files are configured appropriately: the + ``conf/local.conf`` and the ``conf/bblayers.conf`` file. You must + make the OpenEmbedded build system aware of your new layer. See the + ":ref:`dev-manual/dev-manual-common-tasks:enabling your layer`" + section in the Yocto Project Development Tasks Manual for information + on how to let the build system know about your new layer. + +#. *Build the Image:* The OpenEmbedded build system uses the BitBake + tool to build images based on the type of image you want to create. + You can find more information about BitBake in the + :doc:`BitBake User Manual `. + + The build process supports several types of images to satisfy + different needs. See the + ":ref:`ref-manual/ref-images:Images`" chapter in the Yocto + Project Reference Manual for information on supported images. + +Requirements and Recommendations for Released BSPs +================================================== + +Certain requirements exist for a released BSP to be considered compliant +with the Yocto Project. Additionally, recommendations also exist. This +section describes the requirements and recommendations for released +BSPs. + +Released BSP Requirements +------------------------- + +Before looking at BSP requirements, you should consider the following: + +- The requirements here assume the BSP layer is a well-formed, "legal" + layer that can be added to the Yocto Project. For guidelines on + creating a layer that meets these base requirements, see the + ":ref:`bsp-guide/bsp:bsp layers`" section in this manual and the + ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" + section in the Yocto Project Development Tasks Manual. + +- The requirements in this section apply regardless of how you package + a BSP. You should consult the packaging and distribution guidelines + for your specific release process. For an example of packaging and + distribution requirements, see the "`Third Party BSP Release + Process `__" + wiki page. + +- The requirements for the BSP as it is made available to a developer + are completely independent of the released form of the BSP. For + example, the BSP Metadata can be contained within a Git repository + and could have a directory structure completely different from what + appears in the officially released BSP layer. + +- It is not required that specific packages or package modifications + exist in the BSP layer, beyond the requirements for general + compliance with the Yocto Project. For example, no requirement exists + dictating that a specific kernel or kernel version be used in a given + BSP. + +Following are the requirements for a released BSP that conform to the +Yocto Project: + +- *Layer Name:* The BSP must have a layer name that follows the Yocto + Project standards. For information on BSP layer names, see the + ":ref:`bsp-guide/bsp:bsp layers`" section. + +- *File System Layout:* When possible, use the same directory names in + your BSP layer as listed in the ``recipes.txt`` file, which is found + in ``poky/meta`` directory of the :term:`Source Directory` + or in the OpenEmbedded-Core Layer (``openembedded-core``) at + http://git.openembedded.org/openembedded-core/tree/meta. + + You should place recipes (``*.bb`` files) and recipe modifications + (``*.bbappend`` files) into ``recipes-*`` subdirectories by + functional area as outlined in ``recipes.txt``. If you cannot find a + category in ``recipes.txt`` to fit a particular recipe, you can make + up your own ``recipes-*`` subdirectory. + + Within any particular ``recipes-*`` category, the layout should match + what is found in the OpenEmbedded-Core Git repository + (``openembedded-core``) or the Source Directory (``poky``). In other + words, make sure you place related files in appropriately-related + ``recipes-*`` subdirectories specific to the recipe's function, or + within a subdirectory containing a set of closely-related recipes. + The recipes themselves should follow the general guidelines for + recipes used in the Yocto Project found in the "`OpenEmbedded Style + Guide `__". + +- *License File:* You must include a license file in the + ``meta-bsp_root_name`` directory. This license covers the BSP + Metadata as a whole. You must specify which license to use since no + default license exists when one is not specified. See the + :yocto_git:`COPYING.MIT ` + file for the Raspberry Pi BSP in the ``meta-raspberrypi`` BSP layer + as an example. + +- *README File:* You must include a ``README`` file in the + ``meta-bsp_root_name`` directory. See the + :yocto_git:`README.md ` + file for the Raspberry Pi BSP in the ``meta-raspberrypi`` BSP layer + as an example. + + At a minimum, the ``README`` file should contain the following: + + - A brief description of the target hardware. + + - A list of all the dependencies of the BSP. These dependencies are + typically a list of required layers needed to build the BSP. + However, the dependencies should also contain information + regarding any other dependencies the BSP might have. + + - Any required special licensing information. For example, this + information includes information on special variables needed to + satisfy a EULA, or instructions on information needed to build or + distribute binaries built from the BSP Metadata. + + - The name and contact information for the BSP layer maintainer. + This is the person to whom patches and questions should be sent. + For information on how to find the right person, see the + ":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" + section in the Yocto Project Development Tasks Manual. + + - Instructions on how to build the BSP using the BSP layer. + + - Instructions on how to boot the BSP build from the BSP layer. + + - Instructions on how to boot the binary images contained in the + ``binary`` directory, if present. + + - Information on any known bugs or issues that users should know + about when either building or booting the BSP binaries. + +- *README.sources File:* If your BSP contains binary images in the + ``binary`` directory, you must include a ``README.sources`` file in + the ``meta-bsp_root_name`` directory. This file specifies exactly + where you can find the sources used to generate the binary images. + +- *Layer Configuration File:* You must include a ``conf/layer.conf`` + file in the ``meta-bsp_root_name`` directory. This file identifies + the ``meta-bsp_root_name`` BSP layer as a layer to the build + system. + +- *Machine Configuration File:* You must include one or more + ``conf/machine/bsp_root_name.conf`` files in the + ``meta-bsp_root_name`` directory. These configuration files define + machine targets that can be built using the BSP layer. Multiple + machine configuration files define variations of machine + configurations that the BSP supports. If a BSP supports multiple + machine variations, you need to adequately describe each variation in + the BSP ``README`` file. Do not use multiple machine configuration + files to describe disparate hardware. If you do have very different + targets, you should create separate BSP layers for each target. + + .. note:: + + It is completely possible for a developer to structure the working + repository as a conglomeration of unrelated BSP files, and to possibly + generate BSPs targeted for release from that directory using scripts or + some other mechanism (e.g. ``meta-yocto-bsp`` layer). Such considerations + are outside the scope of this document. + +Released BSP Recommendations +---------------------------- + +Following are recommendations for released BSPs that conform to the +Yocto Project: + +- *Bootable Images:* Released BSPs can contain one or more bootable + images. Including bootable images allows users to easily try out the + BSP using their own hardware. + + In some cases, it might not be convenient to include a bootable + image. If so, you might want to make two versions of the BSP + available: one that contains binary images, and one that does not. + The version that does not contain bootable images avoids unnecessary + download times for users not interested in the images. + + If you need to distribute a BSP and include bootable images or build + kernel and filesystems meant to allow users to boot the BSP for + evaluation purposes, you should put the images and artifacts within a + ``binary/`` subdirectory located in the ``meta-bsp_root_name`` + directory. + + .. note:: + + If you do include a bootable image as part of the BSP and the + image was built by software covered by the GPL or other open + source licenses, it is your responsibility to understand and meet + all licensing requirements, which could include distribution of + source files. + +- *Use a Yocto Linux Kernel:* Kernel recipes in the BSP should be based + on a Yocto Linux kernel. Basing your recipes on these kernels reduces + the costs for maintaining the BSP and increases its scalability. See + the ``Yocto Linux Kernel`` category in the + :yocto_git:`Source Repositories <>` for these kernels. + +Customizing a Recipe for a BSP +============================== + +If you plan on customizing a recipe for a particular BSP, you need to do +the following: + +- Create a ``*.bbappend`` file for the modified recipe. For information on using + append files, see the ":ref:`dev-manual/dev-manual-common-tasks:using + .bbappend files in your layer`" section in the Yocto Project Development + Tasks Manual. + +- Ensure your directory structure in the BSP layer that supports your + machine is such that the OpenEmbedded build system can find it. See + the example later in this section for more information. + +- Put the append file in a directory whose name matches the machine's + name and is located in an appropriate sub-directory inside the BSP + layer (i.e. ``recipes-bsp``, ``recipes-graphics``, ``recipes-core``, + and so forth). + +- Place the BSP-specific files in the proper directory inside the BSP + layer. How expansive the layer is affects where you must place these + files. For example, if your layer supports several different machine + types, you need to be sure your layer's directory structure includes + hierarchy that separates the files according to machine. If your + layer does not support multiple machines, the layer would not have + that additional hierarchy and the files would obviously not be able + to reside in a machine-specific directory. + +Following is a specific example to help you better understand the +process. This example customizes customizes a recipe by adding a +BSP-specific configuration file named ``interfaces`` to the +``init-ifupdown_1.0.bb`` recipe for machine "xyz" where the BSP layer +also supports several other machines: + +#. Edit the ``init-ifupdown_1.0.bbappend`` file so that it contains the + following: :: + + FILESEXTRAPATHS_prepend := "${THISDIR}/files:" + + The append file needs to be in the ``meta-xyz/recipes-core/init-ifupdown`` + directory. + +#. Create and place the new ``interfaces`` configuration file in the + BSP's layer here: :: + + meta-xyz/recipes-core/init-ifupdown/files/xyz-machine-one/interfaces + + .. note:: + + If the meta-xyz layer did not support multiple machines, you would place + the interfaces configuration file in the layer here: :: + + meta-xyz/recipes-core/init-ifupdown/files/interfaces + + The :term:`FILESEXTRAPATHS` variable in the append files extends the search + path the build system uses to find files during the build. Consequently, for + this example you need to have the ``files`` directory in the same location as + your append file. + +BSP Licensing Considerations +============================ + +In some cases, a BSP contains separately-licensed Intellectual Property +(IP) for a component or components. For these cases, you are required to +accept the terms of a commercial or other type of license that requires +some kind of explicit End User License Agreement (EULA). Once you accept +the license, the OpenEmbedded build system can then build and include +the corresponding component in the final BSP image. If the BSP is +available as a pre-built image, you can download the image after +agreeing to the license or EULA. + +You could find that some separately-licensed components that are +essential for normal operation of the system might not have an +unencumbered (or free) substitute. Without these essential components, +the system would be non-functional. Then again, you might find that +other licensed components that are simply 'good-to-have' or purely +elective do have an unencumbered, free replacement component that you +can use rather than agreeing to the separately-licensed component. Even +for components essential to the system, you might find an unencumbered +component that is not identical but will work as a less-capable version +of the licensed version in the BSP recipe. + +For cases where you can substitute a free component and still maintain +the system's functionality, the "DOWNLOADS" selection from the +"SOFTWARE" tab on the :yocto_home:`Yocto Project Website <>` makes +available de-featured BSPs that are completely free of any IP +encumbrances. For these cases, you can use the substitution directly and +without any further licensing requirements. If present, these fully +de-featured BSPs are named appropriately different as compared to the +names of their respective encumbered BSPs. If available, these +substitutions are your simplest and most preferred options. Obviously, +use of these substitutions assumes the resulting functionality meets +system requirements. + +.. note:: + + If however, a non-encumbered version is unavailable or it provides + unsuitable functionality or quality, you can use an encumbered + version. + +A couple different methods exist within the OpenEmbedded build system to +satisfy the licensing requirements for an encumbered BSP. The following +list describes them in order of preference: + +#. *Use the LICENSE_FLAGS Variable to Define the Recipes that Have Commercial or + Other Types of Specially-Licensed Packages:* For each of those recipes, you can + specify a matching license string in a ``local.conf`` variable named + :term:`LICENSE_FLAGS_WHITELIST`. + Specifying the matching license string signifies that you agree to + the license. Thus, the build system can build the corresponding + recipe and include the component in the image. See the + ":ref:`dev-manual/dev-manual-common-tasks:enabling commercially licensed recipes`" + section in the Yocto Project Development Tasks Manual for details on + how to use these variables. + + If you build as you normally would, without specifying any recipes in + the ``LICENSE_FLAGS_WHITELIST``, the build stops and provides you + with the list of recipes that you have tried to include in the image + that need entries in the ``LICENSE_FLAGS_WHITELIST``. Once you enter + the appropriate license flags into the whitelist, restart the build + to continue where it left off. During the build, the prompt will not + appear again since you have satisfied the requirement. + + Once the appropriate license flags are on the white list in the + ``LICENSE_FLAGS_WHITELIST`` variable, you can build the encumbered + image with no change at all to the normal build process. + +#. *Get a Pre-Built Version of the BSP:* You can get this type of BSP by + selecting the "DOWNLOADS" item from the "SOFTWARE" tab on the + :yocto_home:`Yocto Project website <>`. You can download BSP tarballs + that contain proprietary components after agreeing to the licensing + requirements of each of the individually encumbered packages as part + of the download process. Obtaining the BSP this way allows you to + access an encumbered image immediately after agreeing to the + click-through license agreements presented by the website. If you + want to build the image yourself using the recipes contained within + the BSP tarball, you will still need to create an appropriate + ``LICENSE_FLAGS_WHITELIST`` to match the encumbered recipes in the + BSP. + +.. note:: + + Pre-compiled images are bundled with a time-limited kernel that runs + for a predetermined amount of time (10 days) before it forces the + system to reboot. This limitation is meant to discourage direct + redistribution of the image. You must eventually rebuild the image if + you want to remove this restriction. + +Creating a new BSP Layer Using the ``bitbake-layers`` Script +============================================================ + +The ``bitbake-layers create-layer`` script automates creating a BSP +layer. What makes a layer a "BSP layer" is the presence of at least one +machine configuration file. Additionally, a BSP layer usually has a +kernel recipe or an append file that leverages off an existing kernel +recipe. The primary requirement, however, is the machine configuration. + +Use these steps to create a BSP layer: + +- *Create a General Layer:* Use the ``bitbake-layers`` script with the + ``create-layer`` subcommand to create a new general layer. For + instructions on how to create a general layer using the + ``bitbake-layers`` script, see the + ":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`" + section in the Yocto Project Development Tasks Manual. + +- *Create a Layer Configuration File:* Every layer needs a layer + configuration file. This configuration file establishes locations for + the layer's recipes, priorities for the layer, and so forth. You can + find examples of ``layer.conf`` files in the Yocto Project + :yocto_git:`Source Repositories <>`. To get examples of what you need + in your configuration file, locate a layer (e.g. "meta-ti") and + examine the + :yocto_git:`local.conf ` + file. + +- *Create a Machine Configuration File:* Create a + ``conf/machine/bsp_root_name.conf`` file. See + :yocto_git:`meta-yocto-bsp/conf/machine ` + for sample ``bsp_root_name.conf`` files. Other samples such as + :yocto_git:`meta-ti ` + and + :yocto_git:`meta-freescale ` + exist from other vendors that have more specific machine and tuning + examples. + +- *Create a Kernel Recipe:* Create a kernel recipe in + ``recipes-kernel/linux`` by either using a kernel append file or a + new custom kernel recipe file (e.g. ``yocto-linux_4.12.bb``). The BSP + layers mentioned in the previous step also contain different kernel + examples. See the ":ref:`kernel-dev/kernel-dev-common:modifying an existing recipe`" + section in the Yocto Project Linux Kernel Development Manual for + information on how to create a custom kernel. + +The remainder of this section provides a description of the Yocto +Project reference BSP for Beaglebone, which resides in the +:yocto_git:`meta-yocto-bsp ` +layer. + +BSP Layer Configuration Example +------------------------------- + +The layer's ``conf`` directory contains the ``layer.conf`` configuration +file. In this example, the ``conf/layer.conf`` is the following: :: + + # We have a conf and classes directory, add to BBPATH + BBPATH .= ":${LAYERDIR}" + + # We have recipes-\* directories, add to BBFILES + BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + + BBFILE_COLLECTIONS += "yoctobsp" + BBFILE_PATTERN_yoctobsp = "^${LAYERDIR}/" + BBFILE_PRIORITY_yoctobsp = "5" + LAYERVERSION_yoctobsp = "4" + LAYERSERIES_COMPAT_yoctobsp = "&DISTRO_NAME_NO_CAP;" + +The variables used in this file configure the layer. A good way to learn about layer +configuration files is to examine various files for BSP from the +:yocto_git:`Source Repositories <>`. + +For a detailed description of this particular layer configuration file, +see ":ref:`step 3 `" +in the discussion that describes how to create layers in the Yocto +Project Development Tasks Manual. + +BSP Machine Configuration Example +--------------------------------- + +As mentioned earlier in this section, the existence of a machine +configuration file is what makes a layer a BSP layer as compared to a +general or kernel layer. + +One or more machine configuration files exist in the +``bsp_layer/conf/machine/`` directory of the layer: :: + + bsp_layer/conf/machine/machine1\.conf`` + bsp_layer/conf/machine/machine2\.conf`` + bsp_layer/conf/machine/machine3\.conf`` + ... more ... + +For example, the machine configuration file for the `BeagleBone and +BeagleBone Black development boards `__ is +located in the layer ``poky/meta-yocto-bsp/conf/machine`` and is named +``beaglebone-yocto.conf``: :: + + #@TYPE: Machine + #@NAME: Beaglebone-yocto machine + #@DESCRIPTION: Reference machine configuration for http://beagleboard.org/bone and http://beagleboard.org/black boards + + PREFERRED_PROVIDER_virtual/xserver ?= "xserver-xorg" + XSERVER ?= "xserver-xorg \ + xf86-video-modesetting \ + " + + MACHINE_EXTRA_RRECOMMENDS = "kernel-modules kernel-devicetree" + + EXTRA_IMAGEDEPENDS += "u-boot" + + DEFAULTTUNE ?= "cortexa8hf-neon" + include conf/machine/include/tune-cortexa8.inc + + IMAGE_FSTYPES += "tar.bz2 jffs2 wic wic.bmap" + EXTRA_IMAGECMD_jffs2 = "-lnp " + WKS_FILE ?= "beaglebone-yocto.wks" + IMAGE_INSTALL_append = " kernel-devicetree kernel-image-zimage" + do_image_wic[depends] += "mtools-native:do_populate_sysroot dosfstools-native:do_populate_sysroot" + + SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyO0" + SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" + + PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" + PREFERRED_VERSION_linux-yocto ?= "5.0%" + + KERNEL_IMAGETYPE = "zImage" + KERNEL_DEVICETREE = "am335x-bone.dtb am335x-boneblack.dtb am335x-bonegreen.dtb" + KERNEL_EXTRA_ARGS += "LOADADDR=${UBOOT_ENTRYPOINT}" + + SPL_BINARY = "MLO" + UBOOT_SUFFIX = "img" + UBOOT_MACHINE = "am335x_evm_defconfig" + UBOOT_ENTRYPOINT = "0x80008000" + UBOOT_LOADADDRESS = "0x80008000" + + MACHINE_FEATURES = "usbgadget usbhost vfat alsa" + + IMAGE_BOOT_FILES ?= "u-boot.${UBOOT_SUFFIX} MLO zImage am335x-bone.dtb am335x-boneblack.dtb am335x-bonegreen.dtb" + +The variables used to configure the machine define machine-specific properties; for +example, machine-dependent packages, machine tunings, the type of kernel +to build, and U-Boot configurations. + +The following list provides some explanation for the statements found in +the example reference machine configuration file for the BeagleBone +development boards. Realize that much more can be defined as part of a +machine's configuration file. In general, you can learn about related +variables that this example does not have by locating the variables in +the ":ref:`ref-manual/ref-variables:variables glossary`" in the Yocto +Project Reference Manual. + +- :term:`PREFERRED_PROVIDER_virtual/xserver `: + The recipe that provides "virtual/xserver" when more than one + provider is found. In this case, the recipe that provides + "virtual/xserver" is "xserver-xorg", which exists in + ``poky/meta/recipes-graphics/xorg-xserver``. + +- :term:`XSERVER`: The packages that + should be installed to provide an X server and drivers for the + machine. In this example, the "xserver-xorg" and + "xf86-video-modesetting" are installed. + +- :term:`MACHINE_EXTRA_RRECOMMENDS`: + A list of machine-dependent packages not essential for booting the + image. Thus, the build does not fail if the packages do not exist. + However, the packages are required for a fully-featured image. + + .. tip:: + + Many ``MACHINE\*`` variables exist that help you configure a particular piece + of hardware. + +- :term:`EXTRA_IMAGEDEPENDS`: + Recipes to build that do not provide packages for installing into the + root filesystem but building the image depends on the recipes. + Sometimes a recipe is required to build the final image but is not + needed in the root filesystem. In this case, the U-Boot recipe must + be built for the image. + +- :term:`DEFAULTTUNE`: Machines + use tunings to optimize machine, CPU, and application performance. + These features, which are collectively known as "tuning features", + exist in the :term:`OpenEmbedded-Core (OE-Core)` layer (e.g. + ``poky/meta/conf/machine/include``). In this example, the default + tunning file is "cortexa8hf-neon". + + .. note:: + + The include statement that pulls in the + conf/machine/include/tune-cortexa8.inc file provides many tuning + possibilities. + +- :term:`IMAGE_FSTYPES`: The + formats the OpenEmbedded build system uses during the build when + creating the root filesystem. In this example, four types of images + are supported. + +- :term:`EXTRA_IMAGECMD`: + Specifies additional options for image creation commands. In this + example, the "-lnp " option is used when creating the + `JFFS2 `__ image. + +- :term:`WKS_FILE`: The location of + the :ref:`Wic kickstart ` file used + by the OpenEmbedded build system to create a partitioned image + (image.wic). + +- :term:`IMAGE_INSTALL`: + Specifies packages to install into an image through the + :ref:`image ` class. Recipes + use the ``IMAGE_INSTALL`` variable. + +- ``do_image_wic[depends]``: A task that is constructed during the + build. In this example, the task depends on specific tools in order + to create the sysroot when buiding a Wic image. + +- :term:`SERIAL_CONSOLES`: + Defines a serial console (TTY) to enable using getty. In this case, + the baud rate is "115200" and the device name is "ttyO0". + +- :term:`PREFERRED_PROVIDER_virtual/kernel `: + Specifies the recipe that provides "virtual/kernel" when more than + one provider is found. In this case, the recipe that provides + "virtual/kernel" is "linux-yocto", which exists in the layer's + ``recipes-kernel/linux`` directory. + +- :term:`PREFERRED_VERSION_linux-yocto `: + Defines the version of the recipe used to build the kernel, which is + "5.0" in this case. + +- :term:`KERNEL_IMAGETYPE`: + The type of kernel to build for the device. In this case, the + OpenEmbedded build system creates a "zImage" image type. + +- :term:`KERNEL_DEVICETREE`: + The names of the generated Linux kernel device trees (i.e. the + ``*.dtb``) files. All the device trees for the various BeagleBone + devices are included. + +- :term:`KERNEL_EXTRA_ARGS`: + Additional ``make`` command-line arguments the OpenEmbedded build + system passes on when compiling the kernel. In this example, + ``LOADADDR=${UBOOT_ENTRYPOINT}`` is passed as a command-line argument. + +- :term:`SPL_BINARY`: Defines the + Secondary Program Loader (SPL) binary type. In this case, the SPL + binary is set to "MLO", which stands for Multimedia card LOader. + + The BeagleBone development board requires an SPL to boot and that SPL + file type must be MLO. Consequently, the machine configuration needs + to define ``SPL_BINARY`` as ``MLO``. + + .. note:: + + For more information on how the SPL variables are used, see the u-boot.inc + include file. + +- :term:`UBOOT_* `: Defines + various U-Boot configurations needed to build a U-Boot image. In this + example, a U-Boot image is required to boot the BeagleBone device. + See the following variables for more information: + + - :term:`UBOOT_SUFFIX`: + Points to the generated U-Boot extension. + + - :term:`UBOOT_MACHINE`: + Specifies the value passed on the make command line when building + a U-Boot image. + + - :term:`UBOOT_ENTRYPOINT`: + Specifies the entry point for the U-Boot image. + + - :term:`UBOOT_LOADADDRESS`: + Specifies the load address for the U-Boot image. + +- :term:`MACHINE_FEATURES`: + Specifies the list of hardware features the BeagleBone device is + capable of supporting. In this case, the device supports "usbgadget + usbhost vfat alsa". + +- :term:`IMAGE_BOOT_FILES`: + Files installed into the device's boot partition when preparing the + image using the Wic tool with the ``bootimg-partition`` or + ``bootimg-efi`` source plugin. + +BSP Kernel Recipe Example +------------------------- + +The kernel recipe used to build the kernel image for the BeagleBone +device was established in the machine configuration: :: + + PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" + PREFERRED_VERSION_linux-yocto ?= "5.0%" + +The ``meta-yocto-bsp/recipes-kernel/linux`` directory in the layer contains +metadata used to build the kernel. In this case, a kernel append file +(i.e. ``linux-yocto_5.0.bbappend``) is used to override an established +kernel recipe (i.e. ``linux-yocto_5.0.bb``), which is located in +https://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/meta/recipes-kernel/linux. + +Following is the contents of the append file: :: + + KBRANCH_genericx86 = "v5.0/standard/base" + KBRANCH_genericx86-64 = "v5.0/standard/base" + KBRANCH_edgerouter = "v5.0/standard/edgerouter" + KBRANCH_beaglebone-yocto = "v5.0/standard/beaglebone" + + KMACHINE_genericx86 ?= "common-pc" + KMACHINE_genericx86-64 ?= "common-pc-64" + KMACHINE_beaglebone-yocto ?= "beaglebone" + + SRCREV_machine_genericx86 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d" + SRCREV_machine_genericx86-64 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d" + SRCREV_machine_edgerouter ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d" + SRCREV_machine_beaglebone-yocto ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d" + + COMPATIBLE_MACHINE_genericx86 = "genericx86" + COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" + COMPATIBLE_MACHINE_edgerouter = "edgerouter" + COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto" + + LINUX_VERSION_genericx86 = "5.0.3" + LINUX_VERSION_genericx86-64 = "5.0.3" + LINUX_VERSION_edgerouter = "5.0.3" + LINUX_VERSION_beaglebone-yocto = "5.0.3" + +This particular append file works for all the machines that are +part of the ``meta-yocto-bsp`` layer. The relevant statements are +appended with the "beaglebone-yocto" string. The OpenEmbedded build +system uses these statements to override similar statements in the +kernel recipe: + +- :term:`KBRANCH`: Identifies the + kernel branch that is validated, patched, and configured during the + build. + +- :term:`KMACHINE`: Identifies the + machine name as known by the kernel, which is sometimes a different + name than what is known by the OpenEmbedded build system. + +- :term:`SRCREV`: Identifies the + revision of the source code used to build the image. + +- :term:`COMPATIBLE_MACHINE`: + A regular expression that resolves to one or more target machines + with which the recipe is compatible. + +- :term:`LINUX_VERSION`: The + Linux version from kernel.org used by the OpenEmbedded build system + to build the kernel image. diff --git a/poky/documentation/bsp-guide/history.rst b/poky/documentation/bsp-guide/history.rst new file mode 100644 index 000000000..b52006adf --- /dev/null +++ b/poky/documentation/bsp-guide/history.rst @@ -0,0 +1,73 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 0.9 + - November 2010 + - The initial document released with the Yocto Project 0.9 Release + * - 1.0 + - April 2011 + - Released with the Yocto Project 1.0 Release. + * - 1.1 + - October 2011 + - Released with the Yocto Project 1.1 Release. + * - 1.2 + - April 2012 + - Released with the Yocto Project 1.2 Release. + * - 1.3 + - October 2012 + - Released with the Yocto Project 1.3 Release. + * - 1.4 + - April 2013 + - Released with the Yocto Project 1.4 Release. + * - 1.5 + - October 2013 + - Released with the Yocto Project 1.5 Release. + * - 1.6 + - April 2014 + - Released with the Yocto Project 1.6 Release. + * - 1.7 + - October 2014 + - Released with the Yocto Project 1.7 Release. + * - 1.8 + - April 2015 + - Released with the Yocto Project 1.8 Release. + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/conf.py b/poky/documentation/conf.py new file mode 100644 index 000000000..34d1bc97a --- /dev/null +++ b/poky/documentation/conf.py @@ -0,0 +1,121 @@ +# Configuration file for the Sphinx documentation builder. +# +# SPDX-License-Identifier: CC-BY-2.0-UK +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import datetime + +current_version = "dev" + +# String used in sidebar +version = 'Version: ' + current_version +if current_version == 'dev': + version = 'Version: Current Development' +# Version seen in documentation_options.js and hence in js switchers code +release = current_version + + +# -- Project information ----------------------------------------------------- +project = 'The Yocto Project' +copyright = '2010-%s, The Linux Foundation' % datetime.datetime.now().year +author = 'The Linux Foundation' + +# -- General configuration --------------------------------------------------- + +# to load local extension from the folder 'sphinx' +sys.path.insert(0, os.path.abspath('sphinx')) + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autosectionlabel', + 'sphinx.ext.extlinks', + 'sphinx.ext.intersphinx', + 'yocto-vars' +] +autosectionlabel_prefix_document = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'boilerplate.rst', + 'adt-manual/*.rst'] + +# master document name. The default changed from contents to index. so better +# set it ourselves. +master_doc = 'index' + +# create substitution for project configuration variables +rst_prolog = """ +.. |project_name| replace:: %s +.. |copyright| replace:: %s +.. |author| replace:: %s +""" % (project, copyright, author) + +# external links and substitutions +extlinks = { + 'yocto_home': ('https://yoctoproject.org%s', None), + 'yocto_wiki': ('https://wiki.yoctoproject.org%s', None), + 'yocto_dl': ('https://downloads.yoctoproject.org%s', None), + 'yocto_lists': ('https://lists.yoctoproject.org%s', None), + 'yocto_bugs': ('https://bugzilla.yoctoproject.org%s', None), + 'yocto_ab': ('https://autobuilder.yoctoproject.org%s', None), + 'yocto_docs': ('https://docs.yoctoproject.org%s', None), + 'yocto_git': ('https://git.yoctoproject.org%s', None), + 'oe_home': ('https://www.openembedded.org%s', None), + 'oe_lists': ('https://lists.openembedded.org%s', None), +} + +# Intersphinx config to use cross reference with Bitbake user manual +intersphinx_mapping = { + 'bitbake': ('https://docs.yoctoproject.org/bitbake/', None) +} + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'sphinx_rtd_theme' +html_theme_options = { + 'sticky_navigation': False, +} + +html_logo = 'sphinx-static/YoctoProject_Logo_RGB.jpg' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['sphinx-static'] + +html_context = { + 'current_version': current_version, +} + +# Add customm CSS and JS files +html_css_files = ['theme_overrides.css'] +html_js_files = ['switchers.js'] + +# Hide 'Created using Sphinx' text +html_show_sphinx = False + +# Add 'Last updated' on each page +html_last_updated_fmt = '%b %d, %Y' + +# Remove the trailing 'dot' in section numbers +html_secnumber_suffix = " " diff --git a/poky/documentation/dev-manual/dev-manual-common-tasks.rst b/poky/documentation/dev-manual/dev-manual-common-tasks.rst new file mode 100644 index 000000000..5eb7c5164 --- /dev/null +++ b/poky/documentation/dev-manual/dev-manual-common-tasks.rst @@ -0,0 +1,11802 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Common Tasks +************ + +This chapter describes fundamental procedures such as creating layers, +adding new software packages, extending or customizing images, porting +work to new hardware (adding a new machine), and so forth. You will find +that the procedures documented here occur often in the development cycle +using the Yocto Project. + +Understanding and Creating Layers +================================= + +The OpenEmbedded build system supports organizing +:term:`Metadata` into multiple layers. +Layers allow you to isolate different types of customizations from each +other. For introductory information on the Yocto Project Layer Model, +see the +":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`" +section in the Yocto Project Overview and Concepts Manual. + +Creating Your Own Layer +----------------------- + +It is very easy to create your own layers to use with the OpenEmbedded +build system. The Yocto Project ships with tools that speed up creating +layers. This section describes the steps you perform by hand to create +layers so that you can better understand them. For information about the +layer-creation tools, see the +":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`" +section in the Yocto Project Board Support Package (BSP) Developer's +Guide and the ":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`" +section further down in this manual. + +Follow these general steps to create your layer without using tools: + +1. *Check Existing Layers:* Before creating a new layer, you should be + sure someone has not already created a layer containing the Metadata + you need. You can see the `OpenEmbedded Metadata + Index `__ for a + list of layers from the OpenEmbedded community that can be used in + the Yocto Project. You could find a layer that is identical or close + to what you need. + +2. *Create a Directory:* Create the directory for your layer. When you + create the layer, be sure to create the directory in an area not + associated with the Yocto Project :term:`Source Directory` + (e.g. the cloned ``poky`` repository). + + While not strictly required, prepend the name of the directory with + the string "meta-". For example: + :: + + meta-mylayer + meta-GUI_xyz + meta-mymachine + + With rare exceptions, a layer's name follows this form: + :: + + meta-root_name + + Following this layer naming convention can save + you trouble later when tools, components, or variables "assume" your + layer name begins with "meta-". A notable example is in configuration + files as shown in the following step where layer names without the + "meta-" string are appended to several variables used in the + configuration. + +3. *Create a Layer Configuration File:* Inside your new layer folder, + you need to create a ``conf/layer.conf`` file. It is easiest to take + an existing layer configuration file and copy that to your layer's + ``conf`` directory and then modify the file as needed. + + The ``meta-yocto-bsp/conf/layer.conf`` file in the Yocto Project + :yocto_git:`Source Repositories ` + demonstrates the required syntax. For your layer, you need to replace + "yoctobsp" with a unique identifier for your layer (e.g. "machinexyz" + for a layer named "meta-machinexyz"): + :: + + # We have a conf and classes directory, add to BBPATH + BBPATH .= ":${LAYERDIR}" + + # We have recipes-\* directories, add to BBFILES + BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + + BBFILE_COLLECTIONS += "yoctobsp" + BBFILE_PATTERN_yoctobsp = "^${LAYERDIR}/" + BBFILE_PRIORITY_yoctobsp = "5" + LAYERVERSION_yoctobsp = "4" + LAYERSERIES_COMPAT_yoctobsp = "dunfell" + + Following is an explanation of the layer configuration file: + + - :term:`BBPATH`: Adds the layer's + root directory to BitBake's search path. Through the use of the + ``BBPATH`` variable, BitBake locates class files (``.bbclass``), + configuration files, and files that are included with ``include`` + and ``require`` statements. For these cases, BitBake uses the + first file that matches the name found in ``BBPATH``. This is + similar to the way the ``PATH`` variable is used for binaries. It + is recommended, therefore, that you use unique class and + configuration filenames in your custom layer. + + - :term:`BBFILES`: Defines the + location for all recipes in the layer. + + - :term:`BBFILE_COLLECTIONS`: + Establishes the current layer through a unique identifier that is + used throughout the OpenEmbedded build system to refer to the + layer. In this example, the identifier "yoctobsp" is the + representation for the container layer named "meta-yocto-bsp". + + - :term:`BBFILE_PATTERN`: + Expands immediately during parsing to provide the directory of the + layer. + + - :term:`BBFILE_PRIORITY`: + Establishes a priority to use for recipes in the layer when the + OpenEmbedded build finds recipes of the same name in different + layers. + + - :term:`LAYERVERSION`: + Establishes a version number for the layer. You can use this + version number to specify this exact version of the layer as a + dependency when using the + :term:`LAYERDEPENDS` + variable. + + - :term:`LAYERDEPENDS`: + Lists all layers on which this layer depends (if any). + + - :term:`LAYERSERIES_COMPAT`: + Lists the :yocto_wiki:`Yocto Project ` + releases for which the current version is compatible. This + variable is a good way to indicate if your particular layer is + current. + +4. *Add Content:* Depending on the type of layer, add the content. If + the layer adds support for a machine, add the machine configuration + in a ``conf/machine/`` file within the layer. If the layer adds + distro policy, add the distro configuration in a ``conf/distro/`` + file within the layer. If the layer introduces new recipes, put the + recipes you need in ``recipes-*`` subdirectories within the layer. + + .. note:: + + For an explanation of layer hierarchy that is compliant with the + Yocto Project, see the " + Example Filesystem Layout + " section in the Yocto Project Board Support Package (BSP) + Developer's Guide. + +5. *Optionally Test for Compatibility:* If you want permission to use + the Yocto Project Compatibility logo with your layer or application + that uses your layer, perform the steps to apply for compatibility. + See the "`Making Sure Your Layer is Compatible With Yocto + Project <#making-sure-your-layer-is-compatible-with-yocto-project>`__" + section for more information. + +.. _best-practices-to-follow-when-creating-layers: + +Following Best Practices When Creating Layers +--------------------------------------------- + +To create layers that are easier to maintain and that will not impact +builds for other machines, you should consider the information in the +following list: + +- *Avoid "Overlaying" Entire Recipes from Other Layers in Your + Configuration:* In other words, do not copy an entire recipe into + your layer and then modify it. Rather, use an append file + (``.bbappend``) to override only those parts of the original recipe + you need to modify. + +- *Avoid Duplicating Include Files:* Use append files (``.bbappend``) + for each recipe that uses an include file. Or, if you are introducing + a new recipe that requires the included file, use the path relative + to the original layer directory to refer to the file. For example, + use ``require recipes-core/``\ package\ ``/``\ file\ ``.inc`` instead + of ``require``\ file\ ``.inc``. If you're finding you have to overlay + the include file, it could indicate a deficiency in the include file + in the layer to which it originally belongs. If this is the case, you + should try to address that deficiency instead of overlaying the + include file. For example, you could address this by getting the + maintainer of the include file to add a variable or variables to make + it easy to override the parts needing to be overridden. + +- *Structure Your Layers:* Proper use of overrides within append files + and placement of machine-specific files within your layer can ensure + that a build is not using the wrong Metadata and negatively impacting + a build for a different machine. Following are some examples: + + - *Modify Variables to Support a Different Machine:* Suppose you + have a layer named ``meta-one`` that adds support for building + machine "one". To do so, you use an append file named + ``base-files.bbappend`` and create a dependency on "foo" by + altering the :term:`DEPENDS` + variable: + :: + + DEPENDS = "foo" + + The dependency is created during any + build that includes the layer ``meta-one``. However, you might not + want this dependency for all machines. For example, suppose you + are building for machine "two" but your ``bblayers.conf`` file has + the ``meta-one`` layer included. During the build, the + ``base-files`` for machine "two" will also have the dependency on + ``foo``. + + To make sure your changes apply only when building machine "one", + use a machine override with the ``DEPENDS`` statement: DEPENDS_one + = "foo" You should follow the same strategy when using ``_append`` + and ``_prepend`` operations: + :: + + DEPENDS_append_one = " foo" + DEPENDS_prepend_one = "foo " + + As an actual example, here's a + snippet from the generic kernel include file ``linux-yocto.inc``, + wherein the kernel compile and link options are adjusted in the + case of a subset of the supported architectures: + :: + + DEPENDS_append_aarch64 = " libgcc" + KERNEL_CC_append_aarch64 = " ${TOOLCHAIN_OPTIONS}" + KERNEL_LD_append_aarch64 = " ${TOOLCHAIN_OPTIONS}" + + DEPENDS_append_nios2 = " libgcc" + KERNEL_CC_append_nios2 = " ${TOOLCHAIN_OPTIONS}" + KERNEL_LD_append_nios2 = " ${TOOLCHAIN_OPTIONS}" + + DEPENDS_append_arc = " libgcc" + KERNEL_CC_append_arc = " ${TOOLCHAIN_OPTIONS}" + KERNEL_LD_append_arc = " ${TOOLCHAIN_OPTIONS}" + + KERNEL_FEATURES_append_qemuall=" features/debug/printk.scc" + + .. note:: + + Avoiding "+=" and "=+" and using machine-specific + \_append + and + \_prepend + operations is recommended as well. + + - *Place Machine-Specific Files in Machine-Specific Locations:* When + you have a base recipe, such as ``base-files.bb``, that contains a + :term:`SRC_URI` statement to a + file, you can use an append file to cause the build to use your + own version of the file. For example, an append file in your layer + at ``meta-one/recipes-core/base-files/base-files.bbappend`` could + extend :term:`FILESPATH` + using + :term:`FILESEXTRAPATHS` + as follows: FILESEXTRAPATHS_prepend := "${THISDIR}/${BPN}:" The + build for machine "one" will pick up your machine-specific file as + long as you have the file in + ``meta-one/recipes-core/base-files/base-files/``. However, if you + are building for a different machine and the ``bblayers.conf`` + file includes the ``meta-one`` layer and the location of your + machine-specific file is the first location where that file is + found according to ``FILESPATH``, builds for all machines will + also use that machine-specific file. + + You can make sure that a machine-specific file is used for a + particular machine by putting the file in a subdirectory specific + to the machine. For example, rather than placing the file in + ``meta-one/recipes-core/base-files/base-files/`` as shown above, + put it in ``meta-one/recipes-core/base-files/base-files/one/``. + Not only does this make sure the file is used only when building + for machine "one", but the build process locates the file more + quickly. + + In summary, you need to place all files referenced from + ``SRC_URI`` in a machine-specific subdirectory within the layer in + order to restrict those files to machine-specific builds. + +- *Perform Steps to Apply for Yocto Project Compatibility:* If you want + permission to use the Yocto Project Compatibility logo with your + layer or application that uses your layer, perform the steps to apply + for compatibility. See the "`Making Sure Your Layer is Compatible + With Yocto + Project <#making-sure-your-layer-is-compatible-with-yocto-project>`__" + section for more information. + +- *Follow the Layer Naming Convention:* Store custom layers in a Git + repository that use the ``meta-layer_name`` format. + +- *Group Your Layers Locally:* Clone your repository alongside other + cloned ``meta`` directories from the :term:`Source Directory`. + +Making Sure Your Layer is Compatible With Yocto Project +------------------------------------------------------- + +When you create a layer used with the Yocto Project, it is advantageous +to make sure that the layer interacts well with existing Yocto Project +layers (i.e. the layer is compatible with the Yocto Project). Ensuring +compatibility makes the layer easy to be consumed by others in the Yocto +Project community and could allow you permission to use the Yocto +Project Compatible Logo. + +.. note:: + + Only Yocto Project member organizations are permitted to use the + Yocto Project Compatible Logo. The logo is not available for general + use. For information on how to become a Yocto Project member + organization, see the + Yocto Project Website + . + +The Yocto Project Compatibility Program consists of a layer application +process that requests permission to use the Yocto Project Compatibility +Logo for your layer and application. The process consists of two parts: + +1. Successfully passing a script (``yocto-check-layer``) that when run + against your layer, tests it against constraints based on experiences + of how layers have worked in the real world and where pitfalls have + been found. Getting a "PASS" result from the script is required for + successful compatibility registration. + +2. Completion of an application acceptance form, which you can find at + https://www.yoctoproject.org/webform/yocto-project-compatible-registration. + +To be granted permission to use the logo, you need to satisfy the +following: + +- Be able to check the box indicating that you got a "PASS" when + running the script against your layer. + +- Answer "Yes" to the questions on the form or have an acceptable + explanation for any questions answered "No". + +- Be a Yocto Project Member Organization. + +The remainder of this section presents information on the registration +form and on the ``yocto-check-layer`` script. + +Yocto Project Compatible Program Application +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Use the form to apply for your layer's approval. Upon successful +application, you can use the Yocto Project Compatibility Logo with your +layer and the application that uses your layer. + +To access the form, use this link: +https://www.yoctoproject.org/webform/yocto-project-compatible-registration. +Follow the instructions on the form to complete your application. + +The application consists of the following sections: + +- *Contact Information:* Provide your contact information as the fields + require. Along with your information, provide the released versions + of the Yocto Project for which your layer is compatible. + +- *Acceptance Criteria:* Provide "Yes" or "No" answers for each of the + items in the checklist. Space exists at the bottom of the form for + any explanations for items for which you answered "No". + +- *Recommendations:* Provide answers for the questions regarding Linux + kernel use and build success. + +``yocto-check-layer`` Script +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``yocto-check-layer`` script provides you a way to assess how +compatible your layer is with the Yocto Project. You should run this +script prior to using the form to apply for compatibility as described +in the previous section. You need to achieve a "PASS" result in order to +have your application form successfully processed. + +The script divides tests into three areas: COMMON, BSP, and DISTRO. For +example, given a distribution layer (DISTRO), the layer must pass both +the COMMON and DISTRO related tests. Furthermore, if your layer is a BSP +layer, the layer must pass the COMMON and BSP set of tests. + +To execute the script, enter the following commands from your build +directory: +:: + + $ source oe-init-build-env + $ yocto-check-layer your_layer_directory + +Be sure to provide the actual directory for your +layer as part of the command. + +Entering the command causes the script to determine the type of layer +and then to execute a set of specific tests against the layer. The +following list overviews the test: + +- ``common.test_readme``: Tests if a ``README`` file exists in the + layer and the file is not empty. + +- ``common.test_parse``: Tests to make sure that BitBake can parse the + files without error (i.e. ``bitbake -p``). + +- ``common.test_show_environment``: Tests that the global or per-recipe + environment is in order without errors (i.e. ``bitbake -e``). + +- ``common.test_world``: Verifies that ``bitbake world`` works. + +- ``common.test_signatures``: Tests to be sure that BSP and DISTRO + layers do not come with recipes that change signatures. + +- ``common.test_layerseries_compat``: Verifies layer compatibility is + set properly. + +- ``bsp.test_bsp_defines_machines``: Tests if a BSP layer has machine + configurations. + +- ``bsp.test_bsp_no_set_machine``: Tests to ensure a BSP layer does not + set the machine when the layer is added. + +- ``bsp.test_machine_world``: Verifies that ``bitbake world`` works + regardless of which machine is selected. + +- ``bsp.test_machine_signatures``: Verifies that building for a + particular machine affects only the signature of tasks specific to + that machine. + +- ``distro.test_distro_defines_distros``: Tests if a DISTRO layer has + distro configurations. + +- ``distro.test_distro_no_set_distros``: Tests to ensure a DISTRO layer + does not set the distribution when the layer is added. + +Enabling Your Layer +------------------- + +Before the OpenEmbedded build system can use your new layer, you need to +enable it. To enable your layer, simply add your layer's path to the +``BBLAYERS`` variable in your ``conf/bblayers.conf`` file, which is +found in the :term:`Build Directory`. +The following example shows how to enable a layer named +``meta-mylayer``: +:: + + # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf + # changes incompatibly + POKY_BBLAYERS_CONF_VERSION = "2" + BBPATH = "${TOPDIR}" + BBFILES ?= "" + BBLAYERS ?= " \ + /home/user/poky/meta \ + /home/user/poky/meta-poky \ + /home/user/poky/meta-yocto-bsp \ + /home/user/poky/meta-mylayer \ + " + +BitBake parses each ``conf/layer.conf`` file from the top down as +specified in the ``BBLAYERS`` variable within the ``conf/bblayers.conf`` +file. During the processing of each ``conf/layer.conf`` file, BitBake +adds the recipes, classes and configurations contained within the +particular layer to the source directory. + +.. _using-bbappend-files: + +Using .bbappend Files in Your Layer +----------------------------------- + +A recipe that appends Metadata to another recipe is called a BitBake +append file. A BitBake append file uses the ``.bbappend`` file type +suffix, while the corresponding recipe to which Metadata is being +appended uses the ``.bb`` file type suffix. + +You can use a ``.bbappend`` file in your layer to make additions or +changes to the content of another layer's recipe without having to copy +the other layer's recipe into your layer. Your ``.bbappend`` file +resides in your layer, while the main ``.bb`` recipe file to which you +are appending Metadata resides in a different layer. + +Being able to append information to an existing recipe not only avoids +duplication, but also automatically applies recipe changes from a +different layer into your layer. If you were copying recipes, you would +have to manually merge changes as they occur. + +When you create an append file, you must use the same root name as the +corresponding recipe file. For example, the append file +``someapp_DISTRO.bbappend`` must apply to ``someapp_DISTRO.bb``. This +means the original recipe and append file names are version +number-specific. If the corresponding recipe is renamed to update to a +newer version, you must also rename and possibly update the +corresponding ``.bbappend`` as well. During the build process, BitBake +displays an error on starting if it detects a ``.bbappend`` file that +does not have a corresponding recipe with a matching name. See the +:term:`BB_DANGLINGAPPENDS_WARNONLY` +variable for information on how to handle this error. + +As an example, consider the main formfactor recipe and a corresponding +formfactor append file both from the :term:`Source Directory`. +Here is the main +formfactor recipe, which is named ``formfactor_0.0.bb`` and located in +the "meta" layer at ``meta/recipes-bsp/formfactor``: +:: + + SUMMARY = "Device formfactor information" + SECTION = "base" + LICENSE = "MIT" + LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420" + PR = "r45" + + SRC_URI = "file://config file://machconfig" + S = "${WORKDIR}" + + PACKAGE_ARCH = "${MACHINE_ARCH}" + INHIBIT_DEFAULT_DEPS = "1" + + do_install() { + # Install file only if it has contents + install -d ${D}${sysconfdir}/formfactor/ + install -m 0644 ${S}/config ${D}${sysconfdir}/formfactor/ + if [ -s "${S}/machconfig" ]; then + install -m 0644 ${S}/machconfig ${D}${sysconfdir}/formfactor/ + fi + } + +In the main recipe, note the :term:`SRC_URI` +variable, which tells the OpenEmbedded build system where to find files +during the build. + +Following is the append file, which is named ``formfactor_0.0.bbappend`` +and is from the Raspberry Pi BSP Layer named ``meta-raspberrypi``. The +file is in the layer at ``recipes-bsp/formfactor``: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + +By default, the build system uses the +:term:`FILESPATH` variable to +locate files. This append file extends the locations by setting the +:term:`FILESEXTRAPATHS` +variable. Setting this variable in the ``.bbappend`` file is the most +reliable and recommended method for adding directories to the search +path used by the build system to find files. + +The statement in this example extends the directories to include +``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}``, +which resolves to a directory named ``formfactor`` in the same directory +in which the append file resides (i.e. +``meta-raspberrypi/recipes-bsp/formfactor``. This implies that you must +have the supporting directory structure set up that will contain any +files or patches you will be including from the layer. + +Using the immediate expansion assignment operator ``:=`` is important +because of the reference to ``THISDIR``. The trailing colon character is +important as it ensures that items in the list remain colon-separated. + +.. note:: + + BitBake automatically defines the ``THISDIR`` variable. You should + never set this variable yourself. Using "_prepend" as part of the + ``FILESEXTRAPATHS`` ensures your path will be searched prior to other + paths in the final list. + + Also, not all append files add extra files. Many append files simply + exist to add build options (e.g. ``systemd``). For these cases, your + append file would not even use the ``FILESEXTRAPATHS`` statement. + +Prioritizing Your Layer +----------------------- + +Each layer is assigned a priority value. Priority values control which +layer takes precedence if there are recipe files with the same name in +multiple layers. For these cases, the recipe file from the layer with a +higher priority number takes precedence. Priority values also affect the +order in which multiple ``.bbappend`` files for the same recipe are +applied. You can either specify the priority manually, or allow the +build system to calculate it based on the layer's dependencies. + +To specify the layer's priority manually, use the +:term:`BBFILE_PRIORITY` +variable and append the layer's root name: +:: + + BBFILE_PRIORITY_mylayer = "1" + +.. note:: + + It is possible for a recipe with a lower version number + :term:`PV` in a layer that has a higher + priority to take precedence. + + Also, the layer priority does not currently affect the precedence + order of ``.conf`` or ``.bbclass`` files. Future versions of BitBake + might address this. + +Managing Layers +--------------- + +You can use the BitBake layer management tool ``bitbake-layers`` to +provide a view into the structure of recipes across a multi-layer +project. Being able to generate output that reports on configured layers +with their paths and priorities and on ``.bbappend`` files and their +applicable recipes can help to reveal potential problems. + +For help on the BitBake layer management tool, use the following +command: +:: + + $ bitbake-layers --help NOTE: Starting bitbake server... usage: + NOTE: Starting bitbake server... + usage: bitbake-layers [-d] [-q] [-F] [--color COLOR] [-h] ... + + BitBake layers utility + + optional arguments: + -d, --debug Enable debug output + -q, --quiet Print only errors + -F, --force Force add without recipe parse verification + --color COLOR Colorize output (where COLOR is auto, always, never) + -h, --help show this help message and exit + + subcommands: + + layerindex-fetch Fetches a layer from a layer index along with its + dependent layers, and adds them to conf/bblayers.conf. + layerindex-show-depends + Find layer dependencies from layer index. + add-layer Add one or more layers to bblayers.conf. + remove-layer Remove one or more layers from bblayers.conf. + flatten flatten layer configuration into a separate output + directory. + show-layers show current configured layers. + show-overlayed list overlayed recipes (where the same recipe exists + in another layer) + show-recipes list available recipes, showing the layer they are + provided by + show-appends list bbappend files and recipe files they apply to + show-cross-depends Show dependencies between recipes that cross layer + boundaries. + create-layer Create a basic layer + + Use bitbake-layers --help to get help on a specific command + +The following list describes the available commands: + +- ``help:`` Displays general help or help on a specified command. + +- ``show-layers:`` Shows the current configured layers. + +- ``show-overlayed:`` Lists overlayed recipes. A recipe is overlayed + when a recipe with the same name exists in another layer that has a + higher layer priority. + +- ``show-recipes:`` Lists available recipes and the layers that + provide them. + +- ``show-appends:`` Lists ``.bbappend`` files and the recipe files to + which they apply. + +- ``show-cross-depends:`` Lists dependency relationships between + recipes that cross layer boundaries. + +- ``add-layer:`` Adds a layer to ``bblayers.conf``. + +- ``remove-layer:`` Removes a layer from ``bblayers.conf`` + +- ``flatten:`` Flattens the layer configuration into a separate + output directory. Flattening your layer configuration builds a + "flattened" directory that contains the contents of all layers, with + any overlayed recipes removed and any ``.bbappend`` files appended to + the corresponding recipes. You might have to perform some manual + cleanup of the flattened layer as follows: + + - Non-recipe files (such as patches) are overwritten. The flatten + command shows a warning for these files. + + - Anything beyond the normal layer setup has been added to the + ``layer.conf`` file. Only the lowest priority layer's + ``layer.conf`` is used. + + - Overridden and appended items from ``.bbappend`` files need to be + cleaned up. The contents of each ``.bbappend`` end up in the + flattened recipe. However, if there are appended or changed + variable values, you need to tidy these up yourself. Consider the + following example. Here, the ``bitbake-layers`` command adds the + line ``#### bbappended ...`` so that you know where the following + lines originate: + :: + + ... + DESCRIPTION = "A useful utility" + ... + EXTRA_OECONF = "--enable-something" + ... + + #### bbappended from meta-anotherlayer #### + + DESCRIPTION = "Customized utility" + EXTRA_OECONF += "--enable-somethingelse" + + + Ideally, you would tidy up these utilities as follows: + :: + + ... + DESCRIPTION = "Customized utility" + ... + EXTRA_OECONF = "--enable-something --enable-somethingelse" + ... + +- ``layerindex-fetch``: Fetches a layer from a layer index, along + with its dependent layers, and adds the layers to the + ``conf/bblayers.conf`` file. + +- ``layerindex-show-depends``: Finds layer dependencies from the + layer index. + +- ``create-layer``: Creates a basic layer. + +Creating a General Layer Using the ``bitbake-layers`` Script +------------------------------------------------------------ + +The ``bitbake-layers`` script with the ``create-layer`` subcommand +simplifies creating a new general layer. + +.. note:: + + - For information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`" + section in the Yocto + Project Board Specific (BSP) Developer's Guide. + + - In order to use a layer with the OpenEmbedded build system, you + need to add the layer to your ``bblayers.conf`` configuration + file. See the ":ref:`dev-manual/dev-manual-common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`" + section for more information. + +The default mode of the script's operation with this subcommand is to +create a layer with the following: + +- A layer priority of 6. + +- A ``conf`` subdirectory that contains a ``layer.conf`` file. + +- A ``recipes-example`` subdirectory that contains a further + subdirectory named ``example``, which contains an ``example.bb`` + recipe file. + +- A ``COPYING.MIT``, which is the license statement for the layer. The + script assumes you want to use the MIT license, which is typical for + most layers, for the contents of the layer itself. + +- A ``README`` file, which is a file describing the contents of your + new layer. + +In its simplest form, you can use the following command form to create a +layer. The command creates a layer whose name corresponds to +your_layer_name in the current directory: $ bitbake-layers create-layer +your_layer_name As an example, the following command creates a layer +named ``meta-scottrif`` in your home directory: +:: + + $ cd /usr/home + $ bitbake-layers create-layer meta-scottrif + NOTE: Starting bitbake server... + Add your new layer with 'bitbake-layers add-layer meta-scottrif' + +If you want to set the priority of the layer to other than the default +value of "6", you can either use the ``DASHDASHpriority`` option or you +can edit the +:term:`BBFILE_PRIORITY` value +in the ``conf/layer.conf`` after the script creates it. Furthermore, if +you want to give the example recipe file some name other than the +default, you can use the ``DASHDASHexample-recipe-name`` option. + +The easiest way to see how the ``bitbake-layers create-layer`` command +works is to experiment with the script. You can also read the usage +information by entering the following: +:: + + $ bitbake-layers create-layer --help + NOTE: Starting bitbake server... + usage: bitbake-layers create-layer [-h] [--priority PRIORITY] + [--example-recipe-name EXAMPLERECIPE] + layerdir + + Create a basic layer + + positional arguments: + layerdir Layer directory to create + + optional arguments: + -h, --help show this help message and exit + --priority PRIORITY, -p PRIORITY + Layer directory to create + --example-recipe-name EXAMPLERECIPE, -e EXAMPLERECIPE + Filename of the example recipe + +Adding a Layer Using the ``bitbake-layers`` Script +-------------------------------------------------- + +Once you create your general layer, you must add it to your +``bblayers.conf`` file. Adding the layer to this configuration file +makes the OpenEmbedded build system aware of your layer so that it can +search it for metadata. + +Add your layer by using the ``bitbake-layers add-layer`` command: +:: + + $ bitbake-layers add-layer your_layer_name + +Here is an example that adds a +layer named ``meta-scottrif`` to the configuration file. Following the +command that adds the layer is another ``bitbake-layers`` command that +shows the layers that are in your ``bblayers.conf`` file: +:: + + $ bitbake-layers add-layer meta-scottrif + NOTE: Starting bitbake server... + Parsing recipes: 100% |##########################################################| Time: 0:00:49 + Parsing of 1441 .bb files complete (0 cached, 1441 parsed). 2055 targets, 56 skipped, 0 masked, 0 errors. + $ bitbake-layers show-layers + NOTE: Starting bitbake server... + layer path priority + ========================================================================== + meta /home/scottrif/poky/meta 5 + meta-poky /home/scottrif/poky/meta-poky 5 + meta-yocto-bsp /home/scottrif/poky/meta-yocto-bsp 5 + workspace /home/scottrif/poky/build/workspace 99 + meta-scottrif /home/scottrif/poky/build/meta-scottrif 6 + + +Adding the layer to this file +enables the build system to locate the layer during the build. + +.. note:: + + During a build, the OpenEmbedded build system looks in the layers + from the top of the list down to the bottom in that order. + +.. _usingpoky-extend-customimage: + +Customizing Images +================== + +You can customize images to satisfy particular requirements. This +section describes several methods and provides guidelines for each. + +.. _usingpoky-extend-customimage-localconf: + +Customizing Images Using ``local.conf`` +--------------------------------------- + +Probably the easiest way to customize an image is to add a package by +way of the ``local.conf`` configuration file. Because it is limited to +local use, this method generally only allows you to add packages and is +not as flexible as creating your own customized image. When you add +packages using local variables this way, you need to realize that these +variable changes are in effect for every build and consequently affect +all images, which might not be what you require. + +To add a package to your image using the local configuration file, use +the ``IMAGE_INSTALL`` variable with the ``_append`` operator: +:: + + IMAGE_INSTALL_append = " strace" + +Use of the syntax is important - +specifically, the space between the quote and the package name, which is +``strace`` in this example. This space is required since the ``_append`` +operator does not add the space. + +Furthermore, you must use ``_append`` instead of the ``+=`` operator if +you want to avoid ordering issues. The reason for this is because doing +so unconditionally appends to the variable and avoids ordering problems +due to the variable being set in image recipes and ``.bbclass`` files +with operators like ``?=``. Using ``_append`` ensures the operation +takes affect. + +As shown in its simplest use, ``IMAGE_INSTALL_append`` affects all +images. It is possible to extend the syntax so that the variable applies +to a specific image only. Here is an example: +IMAGE_INSTALL_append_pn-core-image-minimal = " strace" This example adds +``strace`` to the ``core-image-minimal`` image only. + +You can add packages using a similar approach through the +``CORE_IMAGE_EXTRA_INSTALL`` variable. If you use this variable, only +``core-image-*`` images are affected. + +.. _usingpoky-extend-customimage-imagefeatures: + +Customizing Images Using Custom ``IMAGE_FEATURES`` and ``EXTRA_IMAGE_FEATURES`` +------------------------------------------------------------------------------- + +Another method for customizing your image is to enable or disable +high-level image features by using the +:term:`IMAGE_FEATURES` and +:term:`EXTRA_IMAGE_FEATURES` +variables. Although the functions for both variables are nearly +equivalent, best practices dictate using ``IMAGE_FEATURES`` from within +a recipe and using ``EXTRA_IMAGE_FEATURES`` from within your +``local.conf`` file, which is found in the +:term:`Build Directory`. + +To understand how these features work, the best reference is +``meta/classes/core-image.bbclass``. This class lists out the available +``IMAGE_FEATURES`` of which most map to package groups while some, such +as ``debug-tweaks`` and ``read-only-rootfs``, resolve as general +configuration settings. + +In summary, the file looks at the contents of the ``IMAGE_FEATURES`` +variable and then maps or configures the feature accordingly. Based on +this information, the build system automatically adds the appropriate +packages or configurations to the +:term:`IMAGE_INSTALL` variable. +Effectively, you are enabling extra features by extending the class or +creating a custom class for use with specialized image ``.bb`` files. + +Use the ``EXTRA_IMAGE_FEATURES`` variable from within your local +configuration file. Using a separate area from which to enable features +with this variable helps you avoid overwriting the features in the image +recipe that are enabled with ``IMAGE_FEATURES``. The value of +``EXTRA_IMAGE_FEATURES`` is added to ``IMAGE_FEATURES`` within +``meta/conf/bitbake.conf``. + +To illustrate how you can use these variables to modify your image, +consider an example that selects the SSH server. The Yocto Project ships +with two SSH servers you can use with your images: Dropbear and OpenSSH. +Dropbear is a minimal SSH server appropriate for resource-constrained +environments, while OpenSSH is a well-known standard SSH server +implementation. By default, the ``core-image-sato`` image is configured +to use Dropbear. The ``core-image-full-cmdline`` and ``core-image-lsb`` +images both include OpenSSH. The ``core-image-minimal`` image does not +contain an SSH server. + +You can customize your image and change these defaults. Edit the +``IMAGE_FEATURES`` variable in your recipe or use the +``EXTRA_IMAGE_FEATURES`` in your ``local.conf`` file so that it +configures the image you are working with to include +``ssh-server-dropbear`` or ``ssh-server-openssh``. + +.. note:: + + See the " + Images + " section in the Yocto Project Reference Manual for a complete list + of image features that ship with the Yocto Project. + +.. _usingpoky-extend-customimage-custombb: + +Customizing Images Using Custom .bb Files +----------------------------------------- + +You can also customize an image by creating a custom recipe that defines +additional software as part of the image. The following example shows +the form for the two lines you need: +:: + + IMAGE_INSTALL = "packagegroup-core-x11-base package1 package2" + inherit core-image + +Defining the software using a custom recipe gives you total control over +the contents of the image. It is important to use the correct names of +packages in the ``IMAGE_INSTALL`` variable. You must use the +OpenEmbedded notation and not the Debian notation for the names (e.g. +``glibc-dev`` instead of ``libc6-dev``). + +The other method for creating a custom image is to base it on an +existing image. For example, if you want to create an image based on +``core-image-sato`` but add the additional package ``strace`` to the +image, copy the ``meta/recipes-sato/images/core-image-sato.bb`` to a new +``.bb`` and add the following line to the end of the copy: +:: + + IMAGE_INSTALL += "strace" + +.. _usingpoky-extend-customimage-customtasks: + +Customizing Images Using Custom Package Groups +---------------------------------------------- + +For complex custom images, the best approach for customizing an image is +to create a custom package group recipe that is used to build the image +or images. A good example of a package group recipe is +``meta/recipes-core/packagegroups/packagegroup-base.bb``. + +If you examine that recipe, you see that the ``PACKAGES`` variable lists +the package group packages to produce. The ``inherit packagegroup`` +statement sets appropriate default values and automatically adds +``-dev``, ``-dbg``, and ``-ptest`` complementary packages for each +package specified in the ``PACKAGES`` statement. + +.. note:: + + The + inherit packagegroup + line should be located near the top of the recipe, certainly before + the + PACKAGES + statement. + +For each package you specify in ``PACKAGES``, you can use ``RDEPENDS`` +and ``RRECOMMENDS`` entries to provide a list of packages the parent +task package should contain. You can see examples of these further down +in the ``packagegroup-base.bb`` recipe. + +Here is a short, fabricated example showing the same basic pieces for a +hypothetical packagegroup defined in ``packagegroup-custom.bb``, where +the variable ``PN`` is the standard way to abbreviate the reference to +the full packagegroup name ``packagegroup-custom``: +:: + + DESCRIPTION = "My Custom Package Groups" + + inherit packagegroup + + PACKAGES = "\ + ${PN}-apps \ + ${PN}-tools \ + " + + RDEPENDS_${PN}-apps = "\ + dropbear \ + portmap \ + psplash" + + RDEPENDS_${PN}-tools = "\ + oprofile \ + oprofileui-server \ + lttng-tools" + + RRECOMMENDS_${PN}-tools = "\ + kernel-module-oprofile" + +In the previous example, two package group packages are created with +their dependencies and their recommended package dependencies listed: +``packagegroup-custom-apps``, and ``packagegroup-custom-tools``. To +build an image using these package group packages, you need to add +``packagegroup-custom-apps`` and/or ``packagegroup-custom-tools`` to +``IMAGE_INSTALL``. For other forms of image dependencies see the other +areas of this section. + +.. _usingpoky-extend-customimage-image-name: + +Customizing an Image Hostname +----------------------------- + +By default, the configured hostname (i.e. ``/etc/hostname``) in an image +is the same as the machine name. For example, if +:term:`MACHINE` equals "qemux86", the +configured hostname written to ``/etc/hostname`` is "qemux86". + +You can customize this name by altering the value of the "hostname" +variable in the ``base-files`` recipe using either an append file or a +configuration file. Use the following in an append file: +:: + + hostname = "myhostname" + +Use the following in a configuration file: +:: + + hostname_pn-base-files = "myhostname" + +Changing the default value of the variable "hostname" can be useful in +certain situations. For example, suppose you need to do extensive +testing on an image and you would like to easily identify the image +under test from existing images with typical default hostnames. In this +situation, you could change the default hostname to "testme", which +results in all the images using the name "testme". Once testing is +complete and you do not need to rebuild the image for test any longer, +you can easily reset the default hostname. + +Another point of interest is that if you unset the variable, the image +will have no default hostname in the filesystem. Here is an example that +unsets the variable in a configuration file: +:: + + hostname_pn-base-files = "" + +Having no default hostname in the filesystem is suitable for +environments that use dynamic hostnames such as virtual machines. + +.. _new-recipe-writing-a-new-recipe: + +Writing a New Recipe +==================== + +Recipes (``.bb`` files) are fundamental components in the Yocto Project +environment. Each software component built by the OpenEmbedded build +system requires a recipe to define the component. This section describes +how to create, write, and test a new recipe. + +.. note:: + + For information on variables that are useful for recipes and for + information about recipe naming issues, see the " + Required + " section of the Yocto Project Reference Manual. + +.. _new-recipe-overview: + +Overview +-------- + +The following figure shows the basic process for creating a new recipe. +The remainder of the section provides details for the steps. + +.. image:: figures/recipe-workflow.png + :align: center + +.. _new-recipe-locate-or-automatically-create-a-base-recipe: + +Locate or Automatically Create a Base Recipe +-------------------------------------------- + +You can always write a recipe from scratch. However, three choices exist +that can help you quickly get a start on a new recipe: + +- ``devtool add``: A command that assists in creating a recipe and an + environment conducive to development. + +- ``recipetool create``: A command provided by the Yocto Project that + automates creation of a base recipe based on the source files. + +- *Existing Recipes:* Location and modification of an existing recipe + that is similar in function to the recipe you need. + +.. note:: + + For information on recipe syntax, see the " + Recipe Syntax + " section. + +.. _new-recipe-creating-the-base-recipe-using-devtool: + +Creating the Base Recipe Using ``devtool add`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``devtool add`` command uses the same logic for auto-creating the +recipe as ``recipetool create``, which is listed below. Additionally, +however, ``devtool add`` sets up an environment that makes it easy for +you to patch the source and to make changes to the recipe as is often +necessary when adding a recipe to build a new piece of software to be +included in a build. + +You can find a complete description of the ``devtool add`` command in +the ":ref:`sdk-a-closer-look-at-devtool-add`" section +in the Yocto Project Application Development and the Extensible Software +Development Kit (eSDK) manual. + +.. _new-recipe-creating-the-base-recipe-using-recipetool: + +Creating the Base Recipe Using ``recipetool create`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``recipetool create`` automates creation of a base recipe given a set of +source code files. As long as you can extract or point to the source +files, the tool will construct a recipe and automatically configure all +pre-build information into the recipe. For example, suppose you have an +application that builds using Autotools. Creating the base recipe using +``recipetool`` results in a recipe that has the pre-build dependencies, +license requirements, and checksums configured. + +To run the tool, you just need to be in your +:term:`Build Directory` and have sourced the +build environment setup script (i.e. +`:ref:`structure-core-script`). +To get help on the tool, use the following command: +:: + + $ recipetool -h + NOTE: Starting bitbake server... + usage: recipetool [-d] [-q] [--color COLOR] [-h] ... + + OpenEmbedded recipe tool + + options: + -d, --debug Enable debug output + -q, --quiet Print only errors + --color COLOR Colorize output (where COLOR is auto, always, never) + -h, --help show this help message and exit + + subcommands: + create Create a new recipe + newappend Create a bbappend for the specified target in the specified + layer + setvar Set a variable within a recipe + appendfile Create/update a bbappend to replace a target file + appendsrcfiles Create/update a bbappend to add or replace source files + appendsrcfile Create/update a bbappend to add or replace a source file + Use recipetool --help to get help on a specific command + +Running ``recipetool create -o`` OUTFILE creates the base recipe and +locates it properly in the layer that contains your source files. +Following are some syntax examples: + +Use this syntax to generate a recipe based on source. Once generated, +the recipe resides in the existing source code layer: +:: + + recipetool create -o OUTFILE source + +Use this syntax to generate a recipe using code that +you extract from source. The extracted code is placed in its own layer +defined by EXTERNALSRC. +:: + + recipetool create -o OUTFILE -x EXTERNALSRC source + +Use this syntax to generate a recipe based on source. The options +direct ``recipetool`` to generate debugging information. Once generated, +the recipe resides in the existing source code layer: +:: + + recipetool create -d -o OUTFILE source + +.. _new-recipe-locating-and-using-a-similar-recipe: + +Locating and Using a Similar Recipe +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before writing a recipe from scratch, it is often useful to discover +whether someone else has already written one that meets (or comes close +to meeting) your needs. The Yocto Project and OpenEmbedded communities +maintain many recipes that might be candidates for what you are doing. +You can find a good central index of these recipes in the `OpenEmbedded +Layer Index `__. + +Working from an existing recipe or a skeleton recipe is the best way to +get started. Here are some points on both methods: + +- *Locate and modify a recipe that is close to what you want to do:* + This method works when you are familiar with the current recipe + space. The method does not work so well for those new to the Yocto + Project or writing recipes. + + Some risks associated with this method are using a recipe that has + areas totally unrelated to what you are trying to accomplish with + your recipe, not recognizing areas of the recipe that you might have + to add from scratch, and so forth. All these risks stem from + unfamiliarity with the existing recipe space. + +- *Use and modify the following skeleton recipe:* If for some reason + you do not want to use ``recipetool`` and you cannot find an existing + recipe that is close to meeting your needs, you can use the following + structure to provide the fundamental areas of a new recipe. + :: + + DESCRIPTION = "" + HOMEPAGE = "" + LICENSE = "" + SECTION = "" + DEPENDS = "" + LIC_FILES_CHKSUM = "" + + SRC_URI = "" + +.. _new-recipe-storing-and-naming-the-recipe: + +Storing and Naming the Recipe +----------------------------- + +Once you have your base recipe, you should put it in your own layer and +name it appropriately. Locating it correctly ensures that the +OpenEmbedded build system can find it when you use BitBake to process +the recipe. + +- *Storing Your Recipe:* The OpenEmbedded build system locates your + recipe through the layer's ``conf/layer.conf`` file and the + :term:`BBFILES` variable. This + variable sets up a path from which the build system can locate + recipes. Here is the typical use: + :: + + BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + + Consequently, you need to be sure you locate your new recipe inside + your layer such that it can be found. + + You can find more information on how layers are structured in the + "`Understanding and Creating + Layers <#understanding-and-creating-layers>`__" section. + +- *Naming Your Recipe:* When you name your recipe, you need to follow + this naming convention: basename_version.bb Use lower-cased + characters and do not include the reserved suffixes ``-native``, + ``-cross``, ``-initial``, or ``-dev`` casually (i.e. do not use them + as part of your recipe name unless the string applies). Here are some + examples: + :: + + cups_1.7.0.bb + gawk_4.0.2.bb + irssi_0.8.16-rc1.bb + +.. _new-recipe-running-a-build-on-the-recipe: + +Running a Build on the Recipe +----------------------------- + +Creating a new recipe is usually an iterative process that requires +using BitBake to process the recipe multiple times in order to +progressively discover and add information to the recipe file. + +Assuming you have sourced the build environment setup script (i.e. +:ref:`structure-core-script`) and you are in +the :term:`Build Directory`, use +BitBake to process your recipe. All you need to provide is the +``basename`` of the recipe as described in the previous section: +:: + + $ bitbake basename + +During the build, the OpenEmbedded build system creates a temporary work +directory for each recipe +(``${``\ :term:`WORKDIR`\ ``}``) +where it keeps extracted source files, log files, intermediate +compilation and packaging files, and so forth. + +The path to the per-recipe temporary work directory depends on the +context in which it is being built. The quickest way to find this path +is to have BitBake return it by running the following: +:: + + $ bitbake -e basename \| grep ^WORKDIR= + +As an example, assume a Source Directory +top-level folder named ``poky``, a default Build Directory at +``poky/build``, and a ``qemux86-poky-linux`` machine target system. +Furthermore, suppose your recipe is named ``foo_1.3.0.bb``. In this +case, the work directory the build system uses to build the package +would be as follows: poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0 +Inside this directory you can find sub-directories such as ``image``, +``packages-split``, and ``temp``. After the build, you can examine these +to determine how well the build went. + +.. note:: + + You can find log files for each task in the recipe's + temp + directory (e.g. + poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0/temp + ). Log files are named + log. + taskname + (e.g. + log.do_configure + , + log.do_fetch + , and + log.do_compile + ). + +You can find more information about the build process in +":doc:`../overview-manual/overview-manual-development-environment`" +chapter of the Yocto Project Overview and Concepts Manual. + +.. _new-recipe-fetching-code: + +Fetching Code +------------- + +The first thing your recipe must do is specify how to fetch the source +files. Fetching is controlled mainly through the +:term:`SRC_URI` variable. Your recipe +must have a ``SRC_URI`` variable that points to where the source is +located. For a graphical representation of source locations, see the +":ref:`sources-dev-environment`" section in +the Yocto Project Overview and Concepts Manual. + +The :ref:`ref-tasks-fetch` task uses +the prefix of each entry in the ``SRC_URI`` variable value to determine +which :ref:`fetcher ` to use to get your +source files. It is the ``SRC_URI`` variable that triggers the fetcher. +The :ref:`ref-tasks-patch` task uses +the variable after source is fetched to apply patches. The OpenEmbedded +build system uses +:term:`FILESOVERRIDES` for +scanning directory locations for local files in ``SRC_URI``. + +The ``SRC_URI`` variable in your recipe must define each unique location +for your source files. It is good practice to not hard-code version +numbers in a URL used in ``SRC_URI``. Rather than hard-code these +values, use ``${``\ :term:`PV`\ ``}``, +which causes the fetch process to use the version specified in the +recipe filename. Specifying the version in this manner means that +upgrading the recipe to a future version is as simple as renaming the +recipe to match the new version. + +Here is a simple example from the +``meta/recipes-devtools/strace/strace_5.5.bb`` recipe where the source +comes from a single tarball. Notice the use of the +:term:`PV` variable: +:: + + SRC_URI = "https://strace.io/files/${PV}/strace-${PV}.tar.xz \\ + +Files mentioned in ``SRC_URI`` whose names end in a typical archive +extension (e.g. ``.tar``, ``.tar.gz``, ``.tar.bz2``, ``.zip``, and so +forth), are automatically extracted during the +:ref:`ref-tasks-unpack` task. For +another example that specifies these types of files, see the +"`Autotooled Package <#new-recipe-autotooled-package>`__" section. + +Another way of specifying source is from an SCM. For Git repositories, +you must specify :term:`SRCREV` and +you should specify :term:`PV` to include +the revision with :term:`SRCPV`. Here +is an example from the recipe +``meta/recipes-kernel/blktrace/blktrace_git.bb``: +:: + + SRCREV = "d6918c8832793b4205ed3bfede78c2f915c23385" + + PR = "r6" + PV = "1.0.5+git${SRCPV}" + + SRC_URI = "git://git.kernel.dk/blktrace.git \ + file://ldflags.patch" + +If your ``SRC_URI`` statement includes URLs pointing to individual files +fetched from a remote server other than a version control system, +BitBake attempts to verify the files against checksums defined in your +recipe to ensure they have not been tampered with or otherwise modified +since the recipe was written. Two checksums are used: +``SRC_URI[md5sum]`` and ``SRC_URI[sha256sum]``. + +If your ``SRC_URI`` variable points to more than a single URL (excluding +SCM URLs), you need to provide the ``md5`` and ``sha256`` checksums for +each URL. For these cases, you provide a name for each URL as part of +the ``SRC_URI`` and then reference that name in the subsequent checksum +statements. Here is an example combining lines from the files +``git.inc`` and ``git_2.24.1.bb``: +:: + + SRC_URI = "${KERNELORG_MIRROR}/software/scm/git/git-${PV}.tar.gz;name=tarball \ + ${KERNELORG_MIRROR}/software/scm/git/git-manpages-${PV}.tar.gz;name=manpages" + + SRC_URI[tarball.md5sum] = "166bde96adbbc11c8843d4f8f4f9811b" + SRC_URI[tarball.sha256sum] = "ad5334956301c86841eb1e5b1bb20884a6bad89a10a6762c958220c7cf64da02" + SRC_URI[manpages.md5sum] = "31c2272a8979022497ba3d4202df145d" + SRC_URI[manpages.sha256sum] = "9a7ae3a093bea39770eb96ca3e5b40bff7af0b9f6123f089d7821d0e5b8e1230" + +Proper values for ``md5`` and ``sha256`` checksums might be available +with other signatures on the download page for the upstream source (e.g. +``md5``, ``sha1``, ``sha256``, ``GPG``, and so forth). Because the +OpenEmbedded build system only deals with ``sha256sum`` and ``md5sum``, +you should verify all the signatures you find by hand. + +If no ``SRC_URI`` checksums are specified when you attempt to build the +recipe, or you provide an incorrect checksum, the build will produce an +error for each missing or incorrect checksum. As part of the error +message, the build system provides the checksum string corresponding to +the fetched file. Once you have the correct checksums, you can copy and +paste them into your recipe and then run the build again to continue. + +.. note:: + + As mentioned, if the upstream source provides signatures for + verifying the downloaded source code, you should verify those + manually before setting the checksum values in the recipe and + continuing with the build. + +This final example is a bit more complicated and is from the +``meta/recipes-sato/rxvt-unicode/rxvt-unicode_9.20.bb`` recipe. The +example's ``SRC_URI`` statement identifies multiple files as the source +files for the recipe: a tarball, a patch file, a desktop file, and an +icon. +:: + + SRC_URI = "http://dist.schmorp.de/rxvt-unicode/Attic/rxvt-unicode-${PV}.tar.bz2 \ + file://xwc.patch \ + file://rxvt.desktop \ + file://rxvt.png" + +When you specify local files using the ``file://`` URI protocol, the +build system fetches files from the local machine. The path is relative +to the :term:`FILESPATH` variable +and searches specific directories in a certain order: +``${``\ :term:`BP`\ ``}``, +``${``\ :term:`BPN`\ ``}``, and +``files``. The directories are assumed to be subdirectories of the +directory in which the recipe or append file resides. For another +example that specifies these types of files, see the "`Single .c File +Package (Hello +World!) <#new-recipe-single-c-file-package-hello-world>`__" section. + +The previous example also specifies a patch file. Patch files are files +whose names usually end in ``.patch`` or ``.diff`` but can end with +compressed suffixes such as ``diff.gz`` and ``patch.bz2``, for example. +The build system automatically applies patches as described in the +"`Patching Code <#new-recipe-patching-code>`__" section. + +.. _new-recipe-unpacking-code: + +Unpacking Code +-------------- + +During the build, the +:ref:`ref-tasks-unpack` task unpacks +the source with ``${``\ :term:`S`\ ``}`` +pointing to where it is unpacked. + +If you are fetching your source files from an upstream source archived +tarball and the tarball's internal structure matches the common +convention of a top-level subdirectory named +``${``\ :term:`BPN`\ ``}-${``\ :term:`PV`\ ``}``, +then you do not need to set ``S``. However, if ``SRC_URI`` specifies to +fetch source from an archive that does not use this convention, or from +an SCM like Git or Subversion, your recipe needs to define ``S``. + +If processing your recipe using BitBake successfully unpacks the source +files, you need to be sure that the directory pointed to by ``${S}`` +matches the structure of the source. + +.. _new-recipe-patching-code: + +Patching Code +------------- + +Sometimes it is necessary to patch code after it has been fetched. Any +files mentioned in ``SRC_URI`` whose names end in ``.patch`` or +``.diff`` or compressed versions of these suffixes (e.g. ``diff.gz`` are +treated as patches. The +:ref:`ref-tasks-patch` task +automatically applies these patches. + +The build system should be able to apply patches with the "-p1" option +(i.e. one directory level in the path will be stripped off). If your +patch needs to have more directory levels stripped off, specify the +number of levels using the "striplevel" option in the ``SRC_URI`` entry +for the patch. Alternatively, if your patch needs to be applied in a +specific subdirectory that is not specified in the patch file, use the +"patchdir" option in the entry. + +As with all local files referenced in +:term:`SRC_URI` using ``file://``, +you should place patch files in a directory next to the recipe either +named the same as the base name of the recipe +(:term:`BP` and +:term:`BPN`) or "files". + +.. _new-recipe-licensing: + +Licensing +--------- + +Your recipe needs to have both the +:term:`LICENSE` and +:term:`LIC_FILES_CHKSUM` +variables: + +- ``LICENSE``: This variable specifies the license for the software. + If you do not know the license under which the software you are + building is distributed, you should go to the source code and look + for that information. Typical files containing this information + include ``COPYING``, ``LICENSE``, and ``README`` files. You could + also find the information near the top of a source file. For example, + given a piece of software licensed under the GNU General Public + License version 2, you would set ``LICENSE`` as follows: + :: + + LICENSE = "GPLv2" + + The licenses you specify within ``LICENSE`` can have any name as long + as you do not use spaces, since spaces are used as separators between + license names. For standard licenses, use the names of the files in + ``meta/files/common-licenses/`` or the ``SPDXLICENSEMAP`` flag names + defined in ``meta/conf/licenses.conf``. + +- ``LIC_FILES_CHKSUM``: The OpenEmbedded build system uses this + variable to make sure the license text has not changed. If it has, + the build produces an error and it affords you the chance to figure + it out and correct the problem. + + You need to specify all applicable licensing files for the software. + At the end of the configuration step, the build process will compare + the checksums of the files to be sure the text has not changed. Any + differences result in an error with the message containing the + current checksum. For more explanation and examples of how to set the + ``LIC_FILES_CHKSUM`` variable, see the "`Tracking License + Changes <#>`__" section. + + To determine the correct checksum string, you can list the + appropriate files in the ``LIC_FILES_CHKSUM`` variable with incorrect + md5 strings, attempt to build the software, and then note the + resulting error messages that will report the correct md5 strings. + See the "`Fetching Code <#new-recipe-fetching-code>`__" section for + additional information. + + Here is an example that assumes the software has a ``COPYING`` file: + :: + + LIC_FILES_CHKSUM = "file://COPYING;md5=xxx" + + When you try to build the + software, the build system will produce an error and give you the + correct string that you can substitute into the recipe file for a + subsequent build. + +.. _new-dependencies: + +Dependencies +------------ + +Most software packages have a short list of other packages that they +require, which are called dependencies. These dependencies fall into two +main categories: build-time dependencies, which are required when the +software is built; and runtime dependencies, which are required to be +installed on the target in order for the software to run. + +Within a recipe, you specify build-time dependencies using the +:term:`DEPENDS` variable. Although +nuances exist, items specified in ``DEPENDS`` should be names of other +recipes. It is important that you specify all build-time dependencies +explicitly. If you do not, due to the parallel nature of BitBake's +execution, you can end up with a race condition where the dependency is +present for one task of a recipe (e.g. +:ref:`ref-tasks-configure`) and +then gone when the next task runs (e.g. +:ref:`ref-tasks-compile`). + +Another consideration is that configure scripts might automatically +check for optional dependencies and enable corresponding functionality +if those dependencies are found. This behavior means that to ensure +deterministic results and thus avoid more race conditions, you need to +either explicitly specify these dependencies as well, or tell the +configure script explicitly to disable the functionality. If you wish to +make a recipe that is more generally useful (e.g. publish the recipe in +a layer for others to use), instead of hard-disabling the functionality, +you can use the +:term:`PACKAGECONFIG` variable +to allow functionality and the corresponding dependencies to be enabled +and disabled easily by other users of the recipe. + +Similar to build-time dependencies, you specify runtime dependencies +through a variable - +:term:`RDEPENDS`, which is +package-specific. All variables that are package-specific need to have +the name of the package added to the end as an override. Since the main +package for a recipe has the same name as the recipe, and the recipe's +name can be found through the +``${``\ :term:`PN`\ ``}`` variable, then +you specify the dependencies for the main package by setting +``RDEPENDS_${PN}``. If the package were named ``${PN}-tools``, then you +would set ``RDEPENDS_${PN}-tools``, and so forth. + +Some runtime dependencies will be set automatically at packaging time. +These dependencies include any shared library dependencies (i.e. if a +package "example" contains "libexample" and another package "mypackage" +contains a binary that links to "libexample" then the OpenEmbedded build +system will automatically add a runtime dependency to "mypackage" on +"example"). See the +":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" +section in the Yocto Project Overview and Concepts Manual for further +details. + +.. _new-recipe-configuring-the-recipe: + +Configuring the Recipe +---------------------- + +Most software provides some means of setting build-time configuration +options before compilation. Typically, setting these options is +accomplished by running a configure script with options, or by modifying +a build configuration file. + +.. note:: + + As of Yocto Project Release 1.7, some of the core recipes that + package binary configuration scripts now disable the scripts due to + the scripts previously requiring error-prone path substitution. The + OpenEmbedded build system uses + pkg-config + now, which is much more robust. You can find a list of the + \*-config + scripts that are disabled list in the " + Binary Configuration Scripts Disabled + " section in the Yocto Project Reference Manual. + +A major part of build-time configuration is about checking for +build-time dependencies and possibly enabling optional functionality as +a result. You need to specify any build-time dependencies for the +software you are building in your recipe's +:term:`DEPENDS` value, in terms of +other recipes that satisfy those dependencies. You can often find +build-time or runtime dependencies described in the software's +documentation. + +The following list provides configuration items of note based on how +your software is built: + +- *Autotools:* If your source files have a ``configure.ac`` file, then + your software is built using Autotools. If this is the case, you just + need to worry about modifying the configuration. + + When using Autotools, your recipe needs to inherit the + :ref:`autotools ` class + and your recipe does not have to contain a + :ref:`ref-tasks-configure` task. + However, you might still want to make some adjustments. For example, + you can set + :term:`EXTRA_OECONF` or + :term:`PACKAGECONFIG_CONFARGS` + to pass any needed configure options that are specific to the recipe. + +- *CMake:* If your source files have a ``CMakeLists.txt`` file, then + your software is built using CMake. If this is the case, you just + need to worry about modifying the configuration. + + When you use CMake, your recipe needs to inherit the + :ref:`cmake ` class and your + recipe does not have to contain a + :ref:`ref-tasks-configure` task. + You can make some adjustments by setting + :term:`EXTRA_OECMAKE` to + pass any needed configure options that are specific to the recipe. + + .. note:: + + If you need to install one or more custom CMake toolchain files + that are supplied by the application you are building, install the + files to + ${D}${datadir}/cmake/ + Modules during + do_install + . + +- *Other:* If your source files do not have a ``configure.ac`` or + ``CMakeLists.txt`` file, then your software is built using some + method other than Autotools or CMake. If this is the case, you + normally need to provide a + :ref:`ref-tasks-configure` task + in your recipe unless, of course, there is nothing to configure. + + Even if your software is not being built by Autotools or CMake, you + still might not need to deal with any configuration issues. You need + to determine if configuration is even a required step. You might need + to modify a Makefile or some configuration file used for the build to + specify necessary build options. Or, perhaps you might need to run a + provided, custom configure script with the appropriate options. + + For the case involving a custom configure script, you would run + ``./configure --help`` and look for the options you need to set. + +Once configuration succeeds, it is always good practice to look at the +``log.do_configure`` file to ensure that the appropriate options have +been enabled and no additional build-time dependencies need to be added +to ``DEPENDS``. For example, if the configure script reports that it +found something not mentioned in ``DEPENDS``, or that it did not find +something that it needed for some desired optional functionality, then +you would need to add those to ``DEPENDS``. Looking at the log might +also reveal items being checked for, enabled, or both that you do not +want, or items not being found that are in ``DEPENDS``, in which case +you would need to look at passing extra options to the configure script +as needed. For reference information on configure options specific to +the software you are building, you can consult the output of the +``./configure --help`` command within ``${S}`` or consult the software's +upstream documentation. + +.. _new-recipe-using-headers-to-interface-with-devices: + +Using Headers to Interface with Devices +--------------------------------------- + +If your recipe builds an application that needs to communicate with some +device or needs an API into a custom kernel, you will need to provide +appropriate header files. Under no circumstances should you ever modify +the existing +``meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc`` file. +These headers are used to build ``libc`` and must not be compromised +with custom or machine-specific header information. If you customize +``libc`` through modified headers all other applications that use +``libc`` thus become affected. + +.. note:: + + Never copy and customize the + libc + header file (i.e. + meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc + ). + +The correct way to interface to a device or custom kernel is to use a +separate package that provides the additional headers for the driver or +other unique interfaces. When doing so, your application also becomes +responsible for creating a dependency on that specific provider. + +Consider the following: + +- Never modify ``linux-libc-headers.inc``. Consider that file to be + part of the ``libc`` system, and not something you use to access the + kernel directly. You should access ``libc`` through specific ``libc`` + calls. + +- Applications that must talk directly to devices should either provide + necessary headers themselves, or establish a dependency on a special + headers package that is specific to that driver. + +For example, suppose you want to modify an existing header that adds I/O +control or network support. If the modifications are used by a small +number programs, providing a unique version of a header is easy and has +little impact. When doing so, bear in mind the guidelines in the +previous list. + +.. note:: + + If for some reason your changes need to modify the behavior of the + libc + , and subsequently all other applications on the system, use a + .bbappend + to modify the + linux-kernel-headers.inc + file. However, take care to not make the changes machine specific. + +Consider a case where your kernel is older and you need an older +``libc`` ABI. The headers installed by your recipe should still be a +standard mainline kernel, not your own custom one. + +When you use custom kernel headers you need to get them from +:term:`STAGING_KERNEL_DIR`, +which is the directory with kernel headers that are required to build +out-of-tree modules. Your recipe will also need the following: +:: + + do_configure[depends] += "virtual/kernel:do_shared_workdir" + +.. _new-recipe-compilation: + +Compilation +----------- + +During a build, the ``do_compile`` task happens after source is fetched, +unpacked, and configured. If the recipe passes through ``do_compile`` +successfully, nothing needs to be done. + +However, if the compile step fails, you need to diagnose the failure. +Here are some common issues that cause failures. + +.. note:: + + For cases where improper paths are detected for configuration files + or for when libraries/headers cannot be found, be sure you are using + the more robust + pkg-config + . See the note in section " + Configuring the Recipe + " for additional information. + +- *Parallel build failures:* These failures manifest themselves as + intermittent errors, or errors reporting that a file or directory + that should be created by some other part of the build process could + not be found. This type of failure can occur even if, upon + inspection, the file or directory does exist after the build has + failed, because that part of the build process happened in the wrong + order. + + To fix the problem, you need to either satisfy the missing dependency + in the Makefile or whatever script produced the Makefile, or (as a + workaround) set :term:`PARALLEL_MAKE` to an empty string: + :: + + PARALLEL_MAKE = "" + + For information on parallel Makefile issues, see the "`Debugging + Parallel Make Races <#debugging-parallel-make-races>`__" section. + +- *Improper host path usage:* This failure applies to recipes building + for the target or ``nativesdk`` only. The failure occurs when the + compilation process uses improper headers, libraries, or other files + from the host system when cross-compiling for the target. + + To fix the problem, examine the ``log.do_compile`` file to identify + the host paths being used (e.g. ``/usr/include``, ``/usr/lib``, and + so forth) and then either add configure options, apply a patch, or do + both. + +- *Failure to find required libraries/headers:* If a build-time + dependency is missing because it has not been declared in + :term:`DEPENDS`, or because the + dependency exists but the path used by the build process to find the + file is incorrect and the configure step did not detect it, the + compilation process could fail. For either of these failures, the + compilation process notes that files could not be found. In these + cases, you need to go back and add additional options to the + configure script as well as possibly add additional build-time + dependencies to ``DEPENDS``. + + Occasionally, it is necessary to apply a patch to the source to + ensure the correct paths are used. If you need to specify paths to + find files staged into the sysroot from other recipes, use the + variables that the OpenEmbedded build system provides (e.g. + ``STAGING_BINDIR``, ``STAGING_INCDIR``, ``STAGING_DATADIR``, and so + forth). + +.. _new-recipe-installing: + +Installing +---------- + +During ``do_install``, the task copies the built files along with their +hierarchy to locations that would mirror their locations on the target +device. The installation process copies files from the +``${``\ :term:`S`\ ``}``, +``${``\ :term:`B`\ ``}``, and +``${``\ :term:`WORKDIR`\ ``}`` +directories to the ``${``\ :term:`D`\ ``}`` +directory to create the structure as it should appear on the target +system. + +How your software is built affects what you must do to be sure your +software is installed correctly. The following list describes what you +must do for installation depending on the type of build system used by +the software being built: + +- *Autotools and CMake:* If the software your recipe is building uses + Autotools or CMake, the OpenEmbedded build system understands how to + install the software. Consequently, you do not have to have a + ``do_install`` task as part of your recipe. You just need to make + sure the install portion of the build completes with no issues. + However, if you wish to install additional files not already being + installed by ``make install``, you should do this using a + ``do_install_append`` function using the install command as described + in the "Manual" bulleted item later in this list. + +- Other (using ``make install``): You need to define a ``do_install`` + function in your recipe. The function should call + ``oe_runmake install`` and will likely need to pass in the + destination directory as well. How you pass that path is dependent on + how the ``Makefile`` being run is written (e.g. ``DESTDIR=${D}``, + ``PREFIX=${D}``, ``INSTALLROOT=${D}``, and so forth). + + For an example recipe using ``make install``, see the + "`Makefile-Based Package <#new-recipe-makefile-based-package>`__" + section. + +- *Manual:* You need to define a ``do_install`` function in your + recipe. The function must first use ``install -d`` to create the + directories under + ``${``\ :term:`D`\ ``}``. Once the + directories exist, your function can use ``install`` to manually + install the built software into the directories. + + You can find more information on ``install`` at + http://www.gnu.org/software/coreutils/manual/html_node/install-invocation.html. + +For the scenarios that do not use Autotools or CMake, you need to track +the installation and diagnose and fix any issues until everything +installs correctly. You need to look in the default location of +``${D}``, which is ``${WORKDIR}/image``, to be sure your files have been +installed correctly. + +.. note:: + + - During the installation process, you might need to modify some of + the installed files to suit the target layout. For example, you + might need to replace hard-coded paths in an initscript with + values of variables provided by the build system, such as + replacing ``/usr/bin/`` with ``${bindir}``. If you do perform such + modifications during ``do_install``, be sure to modify the + destination file after copying rather than before copying. + Modifying after copying ensures that the build system can + re-execute ``do_install`` if needed. + + - ``oe_runmake install``, which can be run directly or can be run + indirectly by the + :ref:`autotools ` and + :ref:`cmake ` classes, + runs ``make install`` in parallel. Sometimes, a Makefile can have + missing dependencies between targets that can result in race + conditions. If you experience intermittent failures during + ``do_install``, you might be able to work around them by disabling + parallel Makefile installs by adding the following to the recipe: + PARALLEL_MAKEINST = "" See + :term:`PARALLEL_MAKEINST` + for additional information. + + - If you need to install one or more custom CMake toolchain files + that are supplied by the application you are building, install the + files to ``${D}${datadir}/cmake/`` Modules during + :ref:`ref-tasks-install`. + +.. _new-recipe-enabling-system-services: + +Enabling System Services +------------------------ + +If you want to install a service, which is a process that usually starts +on boot and runs in the background, then you must include some +additional definitions in your recipe. + +If you are adding services and the service initialization script or the +service file itself is not installed, you must provide for that +installation in your recipe using a ``do_install_append`` function. If +your recipe already has a ``do_install`` function, update the function +near its end rather than adding an additional ``do_install_append`` +function. + +When you create the installation for your services, you need to +accomplish what is normally done by ``make install``. In other words, +make sure your installation arranges the output similar to how it is +arranged on the target system. + +The OpenEmbedded build system provides support for starting services two +different ways: + +- *SysVinit:* SysVinit is a system and service manager that manages the + init system used to control the very basic functions of your system. + The init program is the first program started by the Linux kernel + when the system boots. Init then controls the startup, running and + shutdown of all other programs. + + To enable a service using SysVinit, your recipe needs to inherit the + :ref:`update-rc.d ` + class. The class helps facilitate safely installing the package on + the target. + + You will need to set the + :term:`INITSCRIPT_PACKAGES`, + :term:`INITSCRIPT_NAME`, + and + :term:`INITSCRIPT_PARAMS` + variables within your recipe. + +- *systemd:* System Management Daemon (systemd) was designed to replace + SysVinit and to provide enhanced management of services. For more + information on systemd, see the systemd homepage at + http://freedesktop.org/wiki/Software/systemd/. + + To enable a service using systemd, your recipe needs to inherit the + :ref:`systemd ` class. See + the ``systemd.bbclass`` file located in your :term:`Source Directory` + section for + more information. + +.. _new-recipe-packaging: + +Packaging +--------- + +Successful packaging is a combination of automated processes performed +by the OpenEmbedded build system and some specific steps you need to +take. The following list describes the process: + +- *Splitting Files*: The ``do_package`` task splits the files produced + by the recipe into logical components. Even software that produces a + single binary might still have debug symbols, documentation, and + other logical components that should be split out. The ``do_package`` + task ensures that files are split up and packaged correctly. + +- *Running QA Checks*: The + :ref:`insane ` class adds a + step to the package generation process so that output quality + assurance checks are generated by the OpenEmbedded build system. This + step performs a range of checks to be sure the build's output is free + of common problems that show up during runtime. For information on + these checks, see the + :ref:`insane ` class and + the ":ref:`ref-manual/ref-qa-checks:qa error and warning messages`" + chapter in the Yocto Project Reference Manual. + +- *Hand-Checking Your Packages*: After you build your software, you + need to be sure your packages are correct. Examine the + ``${``\ :term:`WORKDIR`\ ``}/packages-split`` + directory and make sure files are where you expect them to be. If you + discover problems, you can set + :term:`PACKAGES`, + :term:`FILES`, + ``do_install(_append)``, and so forth as needed. + +- *Splitting an Application into Multiple Packages*: If you need to + split an application into several packages, see the "`Splitting an + Application into Multiple + Packages <#splitting-an-application-into-multiple-packages>`__" + section for an example. + +- *Installing a Post-Installation Script*: For an example showing how + to install a post-installation script, see the "`Post-Installation + Scripts <#new-recipe-post-installation-scripts>`__" section. + +- *Marking Package Architecture*: Depending on what your recipe is + building and how it is configured, it might be important to mark the + packages produced as being specific to a particular machine, or to + mark them as not being specific to a particular machine or + architecture at all. + + By default, packages apply to any machine with the same architecture + as the target machine. When a recipe produces packages that are + machine-specific (e.g. the + :term:`MACHINE` value is passed + into the configure script or a patch is applied only for a particular + machine), you should mark them as such by adding the following to the + recipe: + :: + + PACKAGE_ARCH = "${MACHINE_ARCH}" + + On the other hand, if the recipe produces packages that do not + contain anything specific to the target machine or architecture at + all (e.g. recipes that simply package script files or configuration + files), you should use the + :ref:`allarch ` class to + do this for you by adding this to your recipe: + :: + + inherit allarch + + Ensuring that the package architecture is correct is not critical + while you are doing the first few builds of your recipe. However, it + is important in order to ensure that your recipe rebuilds (or does + not rebuild) appropriately in response to changes in configuration, + and to ensure that you get the appropriate packages installed on the + target machine, particularly if you run separate builds for more than + one target machine. + +.. _new-sharing-files-between-recipes: + +Sharing Files Between Recipes +----------------------------- + +Recipes often need to use files provided by other recipes on the build +host. For example, an application linking to a common library needs +access to the library itself and its associated headers. The way this +access is accomplished is by populating a sysroot with files. Each +recipe has two sysroots in its work directory, one for target files +(``recipe-sysroot``) and one for files that are native to the build host +(``recipe-sysroot-native``). + +.. note:: + + You could find the term "staging" used within the Yocto project + regarding files populating sysroots (e.g. the + STAGING_DIR + variable). + +Recipes should never populate the sysroot directly (i.e. write files +into sysroot). Instead, files should be installed into standard +locations during the +:ref:`ref-tasks-install` task within +the ``${``\ :term:`D`\ ``}`` directory. The +reason for this limitation is that almost all files that populate the +sysroot are cataloged in manifests in order to ensure the files can be +removed later when a recipe is either modified or removed. Thus, the +sysroot is able to remain free from stale files. + +A subset of the files installed by the +:ref:`ref-tasks-install` task are +used by the +:ref:`ref-tasks-populate_sysroot` +task as defined by the the +:term:`SYSROOT_DIRS` variable to +automatically populate the sysroot. It is possible to modify the list of +directories that populate the sysroot. The following example shows how +you could add the ``/opt`` directory to the list of directories within a +recipe: +:: + + SYSROOT_DIRS += "/opt" + +For a more complete description of the +:ref:`ref-tasks-populate_sysroot` +task and its associated functions, see the +:ref:`staging ` class. + +.. _metadata-virtual-providers: + +Using Virtual Providers +----------------------- + +Prior to a build, if you know that several different recipes provide the +same functionality, you can use a virtual provider (i.e. ``virtual/*``) +as a placeholder for the actual provider. The actual provider is +determined at build-time. + +A common scenario where a virtual provider is used would be for the +kernel recipe. Suppose you have three kernel recipes whose +:term:`PN` values map to ``kernel-big``, +``kernel-mid``, and ``kernel-small``. Furthermore, each of these recipes +in some way uses a :term:`PROVIDES` +statement that essentially identifies itself as being able to provide +``virtual/kernel``. Here is one way through the +:ref:`kernel ` class: +:: + + PROVIDES += "${@ "virtual/kernel" if (d.getVar("KERNEL_PACKAGE_NAME") == "kernel") else "" }" + +Any recipe that inherits the ``kernel`` class is +going to utilize a ``PROVIDES`` statement that identifies that recipe as +being able to provide the ``virtual/kernel`` item. + +Now comes the time to actually build an image and you need a kernel +recipe, but which one? You can configure your build to call out the +kernel recipe you want by using the +:term:`PREFERRED_PROVIDER` +variable. As an example, consider the +`x86-base.inc `_ +include file, which is a machine (i.e. +:term:`MACHINE`) configuration file. +This include file is the reason all x86-based machines use the +``linux-yocto`` kernel. Here are the relevant lines from the include +file: +:: + + PREFERRED_PROVIDER_virtual/kernel ??= "linux-yocto" + PREFERRED_VERSION_linux-yocto ??= "4.15%" + +When you use a virtual provider, you do not have to "hard code" a recipe +name as a build dependency. You can use the +:term:`DEPENDS` variable to state the +build is dependent on ``virtual/kernel`` for example: DEPENDS = +"virtual/kernel" During the build, the OpenEmbedded build system picks +the correct recipe needed for the ``virtual/kernel`` dependency based on +the ``PREFERRED_PROVIDER`` variable. If you want to use the small kernel +mentioned at the beginning of this section, configure your build as +follows: PREFERRED_PROVIDER_virtual/kernel ??= "kernel-small" + +.. note:: + + Any recipe that + PROVIDES + a + virtual/\* + item that is ultimately not selected through + PREFERRED_PROVIDER + does not get built. Preventing these recipes from building is usually + the desired behavior since this mechanism's purpose is to select + between mutually exclusive alternative providers. + +The following lists specific examples of virtual providers: + +- ``virtual/kernel``: Provides the name of the kernel recipe to use + when building a kernel image. + +- ``virtual/bootloader``: Provides the name of the bootloader to use + when building an image. + +- ``virtual/libgbm``: Provides ``gbm.pc``. + +- ``virtual/egl``: Provides ``egl.pc`` and possibly ``wayland-egl.pc``. + +- ``virtual/libgl``: Provides ``gl.pc`` (i.e. libGL). + +- ``virtual/libgles1``: Provides ``glesv1_cm.pc`` (i.e. libGLESv1_CM). + +- ``virtual/libgles2``: Provides ``glesv2.pc`` (i.e. libGLESv2). + +.. note:: + + Virtual providers only apply to build time dependencies specified with + :term:`PROVIDES` and :term:`DEPENDS`. They do not apply to runtime + dependencies specified with :term:`RPROVIDES` and :term:`RDEPENDS`. + +Properly Versioning Pre-Release Recipes +--------------------------------------- + +Sometimes the name of a recipe can lead to versioning problems when the +recipe is upgraded to a final release. For example, consider the +``irssi_0.8.16-rc1.bb`` recipe file in the list of example recipes in +the "`Storing and Naming the +Recipe <#new-recipe-storing-and-naming-the-recipe>`__" section. This +recipe is at a release candidate stage (i.e. "rc1"). When the recipe is +released, the recipe filename becomes ``irssi_0.8.16.bb``. The version +change from ``0.8.16-rc1`` to ``0.8.16`` is seen as a decrease by the +build system and package managers, so the resulting packages will not +correctly trigger an upgrade. + +In order to ensure the versions compare properly, the recommended +convention is to set :term:`PV` within the +recipe to "previous_version+current_version". You can use an additional +variable so that you can use the current version elsewhere. Here is an +example: +:: + + REALPV = "0.8.16-rc1" + PV = "0.8.15+${REALPV}" + +.. _new-recipe-post-installation-scripts: + +Post-Installation Scripts +------------------------- + +Post-installation scripts run immediately after installing a package on +the target or during image creation when a package is included in an +image. To add a post-installation script to a package, add a +``pkg_postinst_``\ PACKAGENAME\ ``()`` function to the recipe file +(``.bb``) and replace PACKAGENAME with the name of the package you want +to attach to the ``postinst`` script. To apply the post-installation +script to the main package for the recipe, which is usually what is +required, specify +``${``\ :term:`PN`\ ``}`` in place of +PACKAGENAME. + +A post-installation function has the following structure: +pkg_postinst_PACKAGENAME() { # Commands to carry out } + +The script defined in the post-installation function is called when the +root filesystem is created. If the script succeeds, the package is +marked as installed. + +.. note:: + + Any RPM post-installation script that runs on the target should + return a 0 exit code. RPM does not allow non-zero exit codes for + these scripts, and the RPM package manager will cause the package to + fail installation on the target. + +Sometimes it is necessary for the execution of a post-installation +script to be delayed until the first boot. For example, the script might +need to be executed on the device itself. To delay script execution +until boot time, you must explicitly mark post installs to defer to the +target. You can use ``pkg_postinst_ontarget()`` or call +``postinst_intercept delay_to_first_boot`` from ``pkg_postinst()``. Any +failure of a ``pkg_postinst()`` script (including exit 1) triggers an +error during the +:ref:`ref-tasks-rootfs` task. + +If you have recipes that use ``pkg_postinst`` function and they require +the use of non-standard native tools that have dependencies during +rootfs construction, you need to use the +:term:`PACKAGE_WRITE_DEPS` +variable in your recipe to list these tools. If you do not use this +variable, the tools might be missing and execution of the +post-installation script is deferred until first boot. Deferring the +script to first boot is undesirable and for read-only rootfs impossible. + +.. note:: + + Equivalent support for pre-install, pre-uninstall, and post-uninstall + scripts exist by way of + pkg_preinst + , + pkg_prerm + , and + pkg_postrm + , respectively. These scrips work in exactly the same way as does + pkg_postinst + with the exception that they run at different times. Also, because of + when they run, they are not applicable to being run at image creation + time like + pkg_postinst + . + +.. _new-recipe-testing: + +Testing +------- + +The final step for completing your recipe is to be sure that the +software you built runs correctly. To accomplish runtime testing, add +the build's output packages to your image and test them on the target. + +For information on how to customize your image by adding specific +packages, see the "`Customizing +Images <#usingpoky-extend-customimage>`__" section. + +.. _new-recipe-testing-examples: + +Examples +-------- + +To help summarize how to write a recipe, this section provides some +examples given various scenarios: + +- Recipes that use local files + +- Using an Autotooled package + +- Using a Makefile-based package + +- Splitting an application into multiple packages + +- Adding binaries to an image + +.. _new-recipe-single-c-file-package-hello-world: + +Single .c File Package (Hello World!) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Building an application from a single file that is stored locally (e.g. +under ``files``) requires a recipe that has the file listed in the +``SRC_URI`` variable. Additionally, you need to manually write the +``do_compile`` and ``do_install`` tasks. The ``S`` variable defines the +directory containing the source code, which is set to +:term:`WORKDIR` in this case - the +directory BitBake uses for the build. +:: + + SUMMARY = "Simple helloworld application" + SECTION = "examples" + LICENSE = "MIT" + LIC_FILES_CHKSUM = "file://${COMMON_LICENSE_DIR}/MIT;md5=0835ade698e0bcf8506ecda2f7b4f302" + + SRC_URI = "file://helloworld.c" + + S = "${WORKDIR}" + + do_compile() { + ${CC} helloworld.c -o helloworld + } + + do_install() { + install -d ${D}${bindir} + install -m 0755 helloworld ${D}${bindir} + } + +By default, the ``helloworld``, ``helloworld-dbg``, and +``helloworld-dev`` packages are built. For information on how to +customize the packaging process, see the "`Splitting an Application into +Multiple Packages <#splitting-an-application-into-multiple-packages>`__" +section. + +.. _new-recipe-autotooled-package: + +Autotooled Package +~~~~~~~~~~~~~~~~~~ + +Applications that use Autotools such as ``autoconf`` and ``automake`` +require a recipe that has a source archive listed in ``SRC_URI`` and +also inherit the +:ref:`autotools ` class, +which contains the definitions of all the steps needed to build an +Autotool-based application. The result of the build is automatically +packaged. And, if the application uses NLS for localization, packages +with local information are generated (one package per language). +Following is one example: (``hello_2.3.bb``) +:: + + SUMMARY = "GNU Helloworld application" + SECTION = "examples" + LICENSE = "GPLv2+" + LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe" + + SRC_URI = "${GNU_MIRROR}/hello/hello-${PV}.tar.gz" + + inherit autotools gettext + +The variable ``LIC_FILES_CHKSUM`` is used to track source license +changes as described in the "`Tracking License +Changes <#usingpoky-configuring-LIC_FILES_CHKSUM>`__" section in the +Yocto Project Overview and Concepts Manual. You can quickly create +Autotool-based recipes in a manner similar to the previous example. + +.. _new-recipe-makefile-based-package: + +Makefile-Based Package +~~~~~~~~~~~~~~~~~~~~~~ + +Applications that use GNU ``make`` also require a recipe that has the +source archive listed in ``SRC_URI``. You do not need to add a +``do_compile`` step since by default BitBake starts the ``make`` command +to compile the application. If you need additional ``make`` options, you +should store them in the +:term:`EXTRA_OEMAKE` or +:term:`PACKAGECONFIG_CONFARGS` +variables. BitBake passes these options into the GNU ``make`` +invocation. Note that a ``do_install`` task is still required. +Otherwise, BitBake runs an empty ``do_install`` task by default. + +Some applications might require extra parameters to be passed to the +compiler. For example, the application might need an additional header +path. You can accomplish this by adding to the ``CFLAGS`` variable. The +following example shows this: +:: + + CFLAGS_prepend = "-I ${S}/include " + +In the following example, ``mtd-utils`` is a makefile-based package: +:: + + SUMMARY = "Tools for managing memory technology devices" + SECTION = "base" + DEPENDS = "zlib lzo e2fsprogs util-linux" + HOMEPAGE = "http://www.linux-mtd.infradead.org/" + LICENSE = "GPLv2+" + LIC_FILES_CHKSUM = "file://COPYING;md5=0636e73ff0215e8d672dc4c32c317bb3 \ + file://include/common.h;beginline=1;endline=17;md5=ba05b07912a44ea2bf81ce409380049c" + # Use the latest version at 26 Oct, 2013 + SRCREV = "9f107132a6a073cce37434ca9cda6917dd8d866b" + SRC_URI = "git://git.infradead.org/mtd-utils.git \ + file://add-exclusion-to-mkfs-jffs2-git-2.patch \ + " + PV = "1.5.1+git${SRCPV}" + S = "${WORKDIR}/git" + EXTRA_OEMAKE = "'CC=${CC}' 'RANLIB=${RANLIB}' 'AR=${AR}' 'CFLAGS=${CFLAGS} -I${S}/include -DWITHOUT_XATTR' 'BUILDDIR=${S}'" + do_install () { + oe_runmake install DESTDIR=${D} SBINDIR=${sbindir} MANDIR=${mandir} INCLUDEDIR=${includedir} + } + PACKAGES =+ "mtd-utils-jffs2 mtd-utils-ubifs mtd-utils-misc" + FILES_mtd-utils-jffs2 = "${sbindir}/mkfs.jffs2 ${sbindir}/jffs2dump ${sbindir}/jffs2reader ${sbindir}/sumtool" + FILES_mtd-utils-ubifs = "${sbindir}/mkfs.ubifs ${sbindir}/ubi*" + FILES_mtd-utils-misc = "${sbindir}/nftl* ${sbindir}/ftl* ${sbindir}/rfd* ${sbindir}/doc* ${sbindir}/serve_image ${sbindir}/recv_image" + PARALLEL_MAKE = "" + BBCLASSEXTEND = "native" + +Splitting an Application into Multiple Packages +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use the variables ``PACKAGES`` and ``FILES`` to split an +application into multiple packages. + +Following is an example that uses the ``libxpm`` recipe. By default, +this recipe generates a single package that contains the library along +with a few binaries. You can modify the recipe to split the binaries +into separate packages: +:: + + require xorg-lib-common.inc + SUMMARY = "Xpm: X Pixmap extension library" + LICENSE = "BSD" + LIC_FILES_CHKSUM = "file://COPYING;md5=51f4270b012ecd4ab1a164f5f4ed6cf7" + DEPENDS += "libxext libsm libxt" + PE = "1" + XORG_PN = "libXpm" + PACKAGES =+ "sxpm cxpm" + FILES_cxpm = "${bindir}/cxpm" + FILES_sxpm = "${bindir}/sxpm" + +In the previous example, we want to ship the ``sxpm`` and ``cxpm`` +binaries in separate packages. Since ``bindir`` would be packaged into +the main ``PN`` package by default, we prepend the ``PACKAGES`` variable +so additional package names are added to the start of list. This results +in the extra ``FILES_*`` variables then containing information that +define which files and directories go into which packages. Files +included by earlier packages are skipped by latter packages. Thus, the +main ``PN`` package does not include the above listed files. + +Packaging Externally Produced Binaries +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes, you need to add pre-compiled binaries to an image. For +example, suppose that binaries for proprietary code exist, which are +created by a particular division of a company. Your part of the company +needs to use those binaries as part of an image that you are building +using the OpenEmbedded build system. Since you only have the binaries +and not the source code, you cannot use a typical recipe that expects to +fetch the source specified in +:term:`SRC_URI` and then compile it. + +One method is to package the binaries and then install them as part of +the image. Generally, it is not a good idea to package binaries since, +among other things, it can hinder the ability to reproduce builds and +could lead to compatibility problems with ABI in the future. However, +sometimes you have no choice. + +The easiest solution is to create a recipe that uses the +:ref:`bin_package ` class +and to be sure that you are using default locations for build artifacts. +In most cases, the ``bin_package`` class handles "skipping" the +configure and compile steps as well as sets things up to grab packages +from the appropriate area. In particular, this class sets ``noexec`` on +both the :ref:`ref-tasks-configure` +and :ref:`ref-tasks-compile` tasks, +sets ``FILES_${PN}`` to "/" so that it picks up all files, and sets up a +:ref:`ref-tasks-install` task, which +effectively copies all files from ``${S}`` to ``${D}``. The +``bin_package`` class works well when the files extracted into ``${S}`` +are already laid out in the way they should be laid out on the target. +For more information on these variables, see the +:term:`FILES`, +:term:`PN`, +:term:`S`, and +:term:`D` variables in the Yocto Project +Reference Manual's variable glossary. + +.. note:: + + - Using :term:`DEPENDS` is a good + idea even for components distributed in binary form, and is often + necessary for shared libraries. For a shared library, listing the + library dependencies in ``DEPENDS`` makes sure that the libraries + are available in the staging sysroot when other recipes link + against the library, which might be necessary for successful + linking. + + - Using ``DEPENDS`` also allows runtime dependencies between + packages to be added automatically. See the + ":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" + section in the Yocto Project Overview and Concepts Manual for more + information. + +If you cannot use the ``bin_package`` class, you need to be sure you are +doing the following: + +- Create a recipe where the + :ref:`ref-tasks-configure` and + :ref:`ref-tasks-compile` tasks do + nothing: It is usually sufficient to just not define these tasks in + the recipe, because the default implementations do nothing unless a + Makefile is found in + ``${``\ :term:`S`\ ``}``. + + If ``${S}`` might contain a Makefile, or if you inherit some class + that replaces ``do_configure`` and ``do_compile`` with custom + versions, then you can use the + ``[``\ :ref:`noexec `\ ``]`` + flag to turn the tasks into no-ops, as follows: + :: + + do_configure[noexec] = "1" + do_compile[noexec] = "1" + + Unlike + :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:deleting a task`, + using the flag preserves the dependency chain from the + :ref:`ref-tasks-fetch`, + :ref:`ref-tasks-unpack`, and + :ref:`ref-tasks-patch` tasks to the + :ref:`ref-tasks-install` task. + +- Make sure your ``do_install`` task installs the binaries + appropriately. + +- Ensure that you set up :term:`FILES` + (usually + ``FILES_${``\ :term:`PN`\ ``}``) to + point to the files you have installed, which of course depends on + where you have installed them and whether those files are in + different locations than the defaults. + +Following Recipe Style Guidelines +--------------------------------- + +When writing recipes, it is good to conform to existing style +guidelines. The `OpenEmbedded +Styleguide `__ wiki page +provides rough guidelines for preferred recipe style. + +It is common for existing recipes to deviate a bit from this style. +However, aiming for at least a consistent style is a good idea. Some +practices, such as omitting spaces around ``=`` operators in assignments +or ordering recipe components in an erratic way, are widely seen as poor +style. + +Recipe Syntax +------------- + +Understanding recipe file syntax is important for writing recipes. The +following list overviews the basic items that make up a BitBake recipe +file. For more complete BitBake syntax descriptions, see the +":doc:`bitbake-user-manual/bitbake-user-manual-metadata`" +chapter of the BitBake User Manual. + +- *Variable Assignments and Manipulations:* Variable assignments allow + a value to be assigned to a variable. The assignment can be static + text or might include the contents of other variables. In addition to + the assignment, appending and prepending operations are also + supported. + + The following example shows some of the ways you can use variables in + recipes: + :: + + S = "${WORKDIR}/postfix-${PV}" + CFLAGS += "-DNO_ASM" + SRC_URI_append = " file://fixup.patch" + +- *Functions:* Functions provide a series of actions to be performed. + You usually use functions to override the default implementation of a + task function or to complement a default function (i.e. append or + prepend to an existing function). Standard functions use ``sh`` shell + syntax, although access to OpenEmbedded variables and internal + methods are also available. + + The following is an example function from the ``sed`` recipe: + :: + + do_install () { + autotools_do_install + install -d ${D}${base_bindir} + mv ${D}${bindir}/sed ${D}${base_bindir}/sed + rmdir ${D}${bindir}/ + } + + It is + also possible to implement new functions that are called between + existing tasks as long as the new functions are not replacing or + complementing the default functions. You can implement functions in + Python instead of shell. Both of these options are not seen in the + majority of recipes. + +- *Keywords:* BitBake recipes use only a few keywords. You use keywords + to include common functions (``inherit``), load parts of a recipe + from other files (``include`` and ``require``) and export variables + to the environment (``export``). + + The following example shows the use of some of these keywords: + :: + + export POSTCONF = "${STAGING_BINDIR}/postconf" + inherit autoconf + require otherfile.inc + +- *Comments (#):* Any lines that begin with the hash character (``#``) + are treated as comment lines and are ignored: + :: + + # This is a comment + +This next list summarizes the most important and most commonly used +parts of the recipe syntax. For more information on these parts of the +syntax, you can reference the +:doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata` chapter +in the BitBake User Manual. + +- *Line Continuation (\):* Use the backward slash (``\``) character to + split a statement over multiple lines. Place the slash character at + the end of the line that is to be continued on the next line: + :: + + VAR = "A really long \ + line" + + .. note:: + + You cannot have any characters including spaces or tabs after the + slash character. + +- *Using Variables (${VARNAME}):* Use the ``${VARNAME}`` syntax to + access the contents of a variable: + :: + + SRC_URI = "${SOURCEFORGE_MIRROR}/libpng/zlib-${PV}.tar.gz" + + .. note:: + + It is important to understand that the value of a variable + expressed in this form does not get substituted automatically. The + expansion of these expressions happens on-demand later (e.g. + usually when a function that makes reference to the variable + executes). This behavior ensures that the values are most + appropriate for the context in which they are finally used. On the + rare occasion that you do need the variable expression to be + expanded immediately, you can use the + := + operator instead of + = + when you make the assignment, but this is not generally needed. + +- *Quote All Assignments ("value"):* Use double quotes around values in + all variable assignments (e.g. ``"value"``). Following is an example: + :: + + VAR1 = "${OTHERVAR}" + VAR2 = "The version is ${PV}" + +- *Conditional Assignment (?=):* Conditional assignment is used to + assign a value to a variable, but only when the variable is currently + unset. Use the question mark followed by the equal sign (``?=``) to + make a "soft" assignment used for conditional assignment. Typically, + "soft" assignments are used in the ``local.conf`` file for variables + that are allowed to come through from the external environment. + + Here is an example where ``VAR1`` is set to "New value" if it is + currently empty. However, if ``VAR1`` has already been set, it + remains unchanged: VAR1 ?= "New value" In this next example, ``VAR1`` + is left with the value "Original value": + :: + + VAR1 = "Original value" + VAR1 ?= "New value" + +- *Appending (+=):* Use the plus character followed by the equals sign + (``+=``) to append values to existing variables. + + .. note:: + + This operator adds a space between the existing content of the + variable and the new content. + + Here is an example: + :: + + SRC_URI += "file://fix-makefile.patch" + +- *Prepending (=+):* Use the equals sign followed by the plus character + (``=+``) to prepend values to existing variables. + + .. note:: + + This operator adds a space between the new content and the + existing content of the variable. + + Here is an example: + :: + + VAR =+ "Starts" + +- *Appending (_append):* Use the ``_append`` operator to append values + to existing variables. This operator does not add any additional + space. Also, the operator is applied after all the ``+=``, and ``=+`` + operators have been applied and after all ``=`` assignments have + occurred. + + The following example shows the space being explicitly added to the + start to ensure the appended value is not merged with the existing + value: + :: + + SRC_URI_append = " file://fix-makefile.patch" + + You can also use + the ``_append`` operator with overrides, which results in the actions + only being performed for the specified target or machine: + :: + + SRC_URI_append_sh4 = " file://fix-makefile.patch" + +- *Prepending (_prepend):* Use the ``_prepend`` operator to prepend + values to existing variables. This operator does not add any + additional space. Also, the operator is applied after all the ``+=``, + and ``=+`` operators have been applied and after all ``=`` + assignments have occurred. + + The following example shows the space being explicitly added to the + end to ensure the prepended value is not merged with the existing + value: + :: + + CFLAGS_prepend = "-I${S}/myincludes " + + You can also use the + ``_prepend`` operator with overrides, which results in the actions + only being performed for the specified target or machine: + :: + + CFLAGS_prepend_sh4 = "-I${S}/myincludes " + +- *Overrides:* You can use overrides to set a value conditionally, + typically based on how the recipe is being built. For example, to set + the :term:`KBRANCH` variable's + value to "standard/base" for any target + :term:`MACHINE`, except for + qemuarm where it should be set to "standard/arm-versatile-926ejs", + you would do the following: + :: + + KBRANCH = "standard/base" + KBRANCH_qemuarm = "standard/arm-versatile-926ejs" + + Overrides are also used to separate + alternate values of a variable in other situations. For example, when + setting variables such as + :term:`FILES` and + :term:`RDEPENDS` that are + specific to individual packages produced by a recipe, you should + always use an override that specifies the name of the package. + +- *Indentation:* Use spaces for indentation rather than than tabs. For + shell functions, both currently work. However, it is a policy + decision of the Yocto Project to use tabs in shell functions. Realize + that some layers have a policy to use spaces for all indentation. + +- *Using Python for Complex Operations:* For more advanced processing, + it is possible to use Python code during variable assignments (e.g. + search and replacement on a variable). + + You indicate Python code using the ``${@python_code}`` syntax for the + variable assignment: + :: + + SRC_URI = "ftp://ftp.info-zip.org/pub/infozip/src/zip${@d.getVar('PV',1).replace('.', '')}.tgz + +- *Shell Function Syntax:* Write shell functions as if you were writing + a shell script when you describe a list of actions to take. You + should ensure that your script works with a generic ``sh`` and that + it does not require any ``bash`` or other shell-specific + functionality. The same considerations apply to various system + utilities (e.g. ``sed``, ``grep``, ``awk``, and so forth) that you + might wish to use. If in doubt, you should check with multiple + implementations - including those from BusyBox. + +.. _platdev-newmachine: + +Adding a New Machine +==================== + +Adding a new machine to the Yocto Project is a straightforward process. +This section describes how to add machines that are similar to those +that the Yocto Project already supports. + +.. note:: + + Although well within the capabilities of the Yocto Project, adding a + totally new architecture might require changes to + gcc/glibc + and to the site information, which is beyond the scope of this + manual. + +For a complete example that shows how to add a new machine, see the +":ref:`bsp-guide/bsp:creating a new bsp layer using the \`\`bitbake-layers\`\` script`" +section in the Yocto Project Board Support Package (BSP) Developer's +Guide. + +.. _platdev-newmachine-conffile: + +Adding the Machine Configuration File +------------------------------------- + +To add a new machine, you need to add a new machine configuration file +to the layer's ``conf/machine`` directory. This configuration file +provides details about the device you are adding. + +The OpenEmbedded build system uses the root name of the machine +configuration file to reference the new machine. For example, given a +machine configuration file named ``crownbay.conf``, the build system +recognizes the machine as "crownbay". + +The most important variables you must set in your machine configuration +file or include from a lower-level configuration file are as follows: + +- ``TARGET_ARCH`` (e.g. "arm") + +- ``PREFERRED_PROVIDER_virtual/kernel`` + +- ``MACHINE_FEATURES`` (e.g. "apm screen wifi") + +You might also need these variables: + +- ``SERIAL_CONSOLES`` (e.g. "115200;ttyS0 115200;ttyS1") + +- ``KERNEL_IMAGETYPE`` (e.g. "zImage") + +- ``IMAGE_FSTYPES`` (e.g. "tar.gz jffs2") + +You can find full details on these variables in the reference section. +You can leverage existing machine ``.conf`` files from +``meta-yocto-bsp/conf/machine/``. + +.. _platdev-newmachine-kernel: + +Adding a Kernel for the Machine +------------------------------- + +The OpenEmbedded build system needs to be able to build a kernel for the +machine. You need to either create a new kernel recipe for this machine, +or extend an existing kernel recipe. You can find several kernel recipe +examples in the Source Directory at ``meta/recipes-kernel/linux`` that +you can use as references. + +If you are creating a new kernel recipe, normal recipe-writing rules +apply for setting up a ``SRC_URI``. Thus, you need to specify any +necessary patches and set ``S`` to point at the source code. You need to +create a ``do_configure`` task that configures the unpacked kernel with +a ``defconfig`` file. You can do this by using a ``make defconfig`` +command or, more commonly, by copying in a suitable ``defconfig`` file +and then running ``make oldconfig``. By making use of ``inherit kernel`` +and potentially some of the ``linux-*.inc`` files, most other +functionality is centralized and the defaults of the class normally work +well. + +If you are extending an existing kernel recipe, it is usually a matter +of adding a suitable ``defconfig`` file. The file needs to be added into +a location similar to ``defconfig`` files used for other machines in a +given kernel recipe. A possible way to do this is by listing the file in +the ``SRC_URI`` and adding the machine to the expression in +``COMPATIBLE_MACHINE``: +:: + + COMPATIBLE_MACHINE = '(qemux86|qemumips)' + +For more information on ``defconfig`` files, see the +":ref:`kernel-dev/kernel-dev-common:changing the configuration`" +section in the Yocto Project Linux Kernel Development Manual. + +.. _platdev-newmachine-formfactor: + +Adding a Formfactor Configuration File +-------------------------------------- + +A formfactor configuration file provides information about the target +hardware for which the image is being built and information that the +build system cannot obtain from other sources such as the kernel. Some +examples of information contained in a formfactor configuration file +include framebuffer orientation, whether or not the system has a +keyboard, the positioning of the keyboard in relation to the screen, and +the screen resolution. + +The build system uses reasonable defaults in most cases. However, if +customization is necessary, you need to create a ``machconfig`` file in +the ``meta/recipes-bsp/formfactor/files`` directory. This directory +contains directories for specific machines such as ``qemuarm`` and +``qemux86``. For information about the settings available and the +defaults, see the ``meta/recipes-bsp/formfactor/files/config`` file +found in the same area. + +Following is an example for "qemuarm" machine: +:: + + HAVE_TOUCHSCREEN=1 + HAVE_KEYBOARD=1 + DISPLAY_CAN_ROTATE=0 + DISPLAY_ORIENTATION=0 + #DISPLAY_WIDTH_PIXELS=640 + #DISPLAY_HEIGHT_PIXELS=480 + #DISPLAY_BPP=16 + DISPLAY_DPI=150 + DISPLAY_SUBPIXEL_ORDER=vrgb + +.. _gs-upgrading-recipes: + +Upgrading Recipes +================= + +Over time, upstream developers publish new versions for software built +by layer recipes. It is recommended to keep recipes up-to-date with +upstream version releases. + +While several methods exist that allow you upgrade a recipe, you might +consider checking on the upgrade status of a recipe first. You can do so +using the ``devtool check-upgrade-status`` command. See the +":ref:`devtool-checking-on-the-upgrade-status-of-a-recipe`" +section in the Yocto Project Reference Manual for more information. + +The remainder of this section describes three ways you can upgrade a +recipe. You can use the Automated Upgrade Helper (AUH) to set up +automatic version upgrades. Alternatively, you can use +``devtool upgrade`` to set up semi-automatic version upgrades. Finally, +you can manually upgrade a recipe by editing the recipe itself. + +.. _gs-using-the-auto-upgrade-helper: + +Using the Auto Upgrade Helper (AUH) +----------------------------------- + +The AUH utility works in conjunction with the OpenEmbedded build system +in order to automatically generate upgrades for recipes based on new +versions being published upstream. Use AUH when you want to create a +service that performs the upgrades automatically and optionally sends +you an email with the results. + +AUH allows you to update several recipes with a single use. You can also +optionally perform build and integration tests using images with the +results saved to your hard drive and emails of results optionally sent +to recipe maintainers. Finally, AUH creates Git commits with appropriate +commit messages in the layer's tree for the changes made to recipes. + +.. note:: + + Conditions do exist when you should not use AUH to upgrade recipes + and you should instead use either + devtool upgrade + or upgrade your recipes manually: + + - When AUH cannot complete the upgrade sequence. This situation + usually results because custom patches carried by the recipe + cannot be automatically rebased to the new version. In this case, + ``devtool upgrade`` allows you to manually resolve conflicts. + + - When for any reason you want fuller control over the upgrade + process. For example, when you want special arrangements for + testing. + +The following steps describe how to set up the AUH utility: + +1. *Be Sure the Development Host is Set Up:* You need to be sure that + your development host is set up to use the Yocto Project. For + information on how to set up your host, see the "`Preparing the Build + Host <#dev-preparing-the-build-host>`__" section. + +2. *Make Sure Git is Configured:* The AUH utility requires Git to be + configured because AUH uses Git to save upgrades. Thus, you must have + Git user and email configured. The following command shows your + configurations: + + $ git config --list + + If you do not have the user and + email configured, you can use the following commands to do so: + :: + + $ git config --global user.name some_name + $ git config --global user.email username@domain.com + +3. *Clone the AUH Repository:* To use AUH, you must clone the repository + onto your development host. The following command uses Git to create + a local copy of the repository on your system: + :: + + $ git clone git://git.yoctoproject.org/auto-upgrade-helper + Cloning into 'auto-upgrade-helper'... remote: Counting objects: 768, done. + remote: Compressing objects: 100% (300/300), done. + remote: Total 768 (delta 499), reused 703 (delta 434) + Receiving objects: 100% (768/768), 191.47 KiB | 98.00 KiB/s, done. + Resolving deltas: 100% (499/499), done. + Checking connectivity... done. + + AUH is not part of the :term:`OpenEmbedded-Core (OE-Core)` or + :term:`Poky` repositories. + +4. *Create a Dedicated Build Directory:* Run the + :ref:`structure-core-script` + script to create a fresh build directory that you use exclusively for + running the AUH utility: + :: + + $ cd ~/poky + $ source oe-init-build-env + + your_AUH_build_directory Re-using an existing build directory and its + configurations is not recommended as existing settings could cause + AUH to fail or behave undesirably. + +5. *Make Configurations in Your Local Configuration File:* Several + settings need to exist in the ``local.conf`` file in the build + directory you just created for AUH. Make these following + configurations: + + - If you want to enable :ref:`Build + History `, + which is optional, you need the following lines in the + ``conf/local.conf`` file: + :: + + INHERIT =+ "buildhistory" + BUILDHISTORY_COMMIT = "1" + + With this configuration and a successful + upgrade, a build history "diff" file appears in the + ``upgrade-helper/work/recipe/buildhistory-diff.txt`` file found in + your build directory. + + - If you want to enable testing through the + :ref:`testimage ` + class, which is optional, you need to have the following set in + your ``conf/local.conf`` file: INHERIT += "testimage" + + .. note:: + + If your distro does not enable by default ptest, which Poky + does, you need the following in your + local.conf + file: + :: + + DISTRO_FEATURES_append = " ptest" + + +6. *Optionally Start a vncserver:* If you are running in a server + without an X11 session, you need to start a vncserver: + :: + + $ vncserver :1 + $ export DISPLAY=:1 + +7. *Create and Edit an AUH Configuration File:* You need to have the + ``upgrade-helper/upgrade-helper.conf`` configuration file in your + build directory. You can find a sample configuration file in the `AUH + source + repository `__. + + Read through the sample file and make configurations as needed. For + example, if you enabled build history in your ``local.conf`` as + described earlier, you must enable it in ``upgrade-helper.conf``. + + Also, if you are using the default ``maintainers.inc`` file supplied + with Poky and located in ``meta-yocto`` and you do not set a + "maintainers_whitelist" or "global_maintainer_override" in the + ``upgrade-helper.conf`` configuration, and you specify "-e all" on + the AUH command-line, the utility automatically sends out emails to + all the default maintainers. Please avoid this. + +This next set of examples describes how to use the AUH: + +- *Upgrading a Specific Recipe:* To upgrade a specific recipe, use the + following form: $ upgrade-helper.py recipe_name For example, this + command upgrades the ``xmodmap`` recipe: + :: + + $ upgrade-helper.py xmodmap + +- *Upgrading a Specific Recipe to a Particular Version:* To upgrade a + specific recipe to a particular version, use the following form: $ + upgrade-helper.py recipe_name -t version For example, this command + upgrades the ``xmodmap`` recipe to version 1.2.3: + :: + + $ upgrade-helper.py xmodmap -t 1.2.3 + +- *Upgrading all Recipes to the Latest Versions and Suppressing Email + Notifications:* To upgrade all recipes to their most recent versions + and suppress the email notifications, use the following command: + :: + + $ upgrade-helper.py all + +- *Upgrading all Recipes to the Latest Versions and Send Email + Notifications:* To upgrade all recipes to their most recent versions + and send email messages to maintainers for each attempted recipe as + well as a status email, use the following command: + :: + + $ upgrade-helper.py -e all + +Once you have run the AUH utility, you can find the results in the AUH +build directory: +:: + + ${BUILDDIR}/upgrade-helper/timestamp + +The AUH utility +also creates recipe update commits from successful upgrade attempts in +the layer tree. + +You can easily set up to run the AUH utility on a regular basis by using +a cron job. See the +`weeklyjob.sh `_ +file distributed with the utility for an example. + +.. _gs-using-devtool-upgrade: + +Using ``devtool upgrade`` +------------------------- + +As mentioned earlier, an alternative method for upgrading recipes to +newer versions is to use +:doc:`devtool upgrade <../ref-manual/ref-devtool-reference>`. +You can read about ``devtool upgrade`` in general in the +":ref:`sdk-devtool-use-devtool-upgrade-to-create-a-version-of-the-recipe-that-supports-a-newer-version-of-the-software`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) Manual. + +To see all the command-line options available with ``devtool upgrade``, +use the following help command: +:: + + $ devtool upgrade -h + +If you want to find out what version a recipe is currently at upstream +without any attempt to upgrade your local version of the recipe, you can +use the following command: +:: + + $ devtool latest-version recipe_name + +As mentioned in the previous section describing AUH, ``devtool upgrade`` +works in a less-automated manner than AUH. Specifically, +``devtool upgrade`` only works on a single recipe that you name on the +command line, cannot perform build and integration testing using images, +and does not automatically generate commits for changes in the source +tree. Despite all these "limitations", ``devtool upgrade`` updates the +recipe file to the new upstream version and attempts to rebase custom +patches contained by the recipe as needed. + +.. note:: + + AUH uses much of + devtool upgrade + behind the scenes making AUH somewhat of a "wrapper" application for + devtool upgrade + . + +A typical scenario involves having used Git to clone an upstream +repository that you use during build operations. Because you are (or +have) built the recipe in the past, the layer is likely added to your +configuration already. If for some reason, the layer is not added, you +could add it easily using the +":ref:`bitbake-layers `" +script. For example, suppose you use the ``nano.bb`` recipe from the +``meta-oe`` layer in the ``meta-openembedded`` repository. For this +example, assume that the layer has been cloned into following area: +:: + + /home/scottrif/meta-openembedded + +The following command from your +:term:`Build Directory` adds the layer to +your build configuration (i.e. ``${BUILDDIR}/conf/bblayers.conf``): +:: + + $ bitbake-layers add-layer /home/scottrif/meta-openembedded/meta-oe + NOTE: Starting bitbake server... + Parsing recipes: 100% |##########################################| Time: 0:00:55 + Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors. + Removing 12 recipes from the x86_64 sysroot: 100% |##############| Time: 0:00:00 + Removing 1 recipes from the x86_64_i586 sysroot: 100% |##########| Time: 0:00:00 + Removing 5 recipes from the i586 sysroot: 100% |#################| Time: 0:00:00 + Removing 5 recipes from the qemux86 sysroot: 100% |##############| Time: 0:00:00 + +For this example, assume that the ``nano.bb`` recipe that +is upstream has a 2.9.3 version number. However, the version in the +local repository is 2.7.4. The following command from your build +directory automatically upgrades the recipe for you: + +.. note:: + + Using the + -V + option is not necessary. Omitting the version number causes + devtool upgrade + to upgrade the recipe to the most recent version. + +:: + + $ devtool upgrade nano -V 2.9.3 + NOTE: Starting bitbake server... + NOTE: Creating workspace layer in /home/scottrif/poky/build/workspace + Parsing recipes: 100% |##########################################| Time: 0:00:46 + Parsing of 1431 .bb files complete (0 cached, 1431 parsed). 2040 targets, 56 skipped, 0 masked, 0 errors. + NOTE: Extracting current version source... + NOTE: Resolving any missing task queue dependencies + . + . + . + NOTE: Executing SetScene Tasks + NOTE: Executing RunQueue Tasks + NOTE: Tasks Summary: Attempted 74 tasks of which 72 didn't need to be rerun and all succeeded. + Adding changed files: 100% |#####################################| Time: 0:00:00 + NOTE: Upgraded source extracted to /home/scottrif/poky/build/workspace/sources/nano + NOTE: New recipe is /home/scottrif/poky/build/workspace/recipes/nano/nano_2.9.3.bb + +Continuing with this example, you can use ``devtool build`` to build the +newly upgraded recipe: +:: + + $ devtool build nano + NOTE: Starting bitbake server... + Loading cache: 100% |################################################################################################| Time: 0:00:01 + Loaded 2040 entries from dependency cache. + Parsing recipes: 100% |##############################################################################################| Time: 0:00:00 + Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors. + NOTE: Resolving any missing task queue dependencies + . + . + . + NOTE: Executing SetScene Tasks + NOTE: Executing RunQueue Tasks + NOTE: nano: compiling from external source tree /home/scottrif/poky/build/workspace/sources/nano + NOTE: Tasks Summary: Attempted 520 tasks of which 304 didn't need to be rerun and all succeeded. + +Within the ``devtool upgrade`` workflow, opportunity +exists to deploy and test your rebuilt software. For this example, +however, running ``devtool finish`` cleans up the workspace once the +source in your workspace is clean. This usually means using Git to stage +and submit commits for the changes generated by the upgrade process. + +Once the tree is clean, you can clean things up in this example with the +following command from the ``${BUILDDIR}/workspace/sources/nano`` +directory: +:: + + $ devtool finish nano meta-oe + NOTE: Starting bitbake server... + Loading cache: 100% |################################################################################################| Time: 0:00:00 + Loaded 2040 entries from dependency cache. + Parsing recipes: 100% |##############################################################################################| Time: 0:00:01 + Parsing of 1432 .bb files complete (1431 cached, 1 parsed). 2041 targets, 56 skipped, 0 masked, 0 errors. + NOTE: Adding new patch 0001-nano.bb-Stuff-I-changed-when-upgrading-nano.bb.patch + NOTE: Updating recipe nano_2.9.3.bb + NOTE: Removing file /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano/nano_2.7.4.bb + NOTE: Moving recipe file to /home/scottrif/meta-openembedded/meta-oe/recipes-support/nano + NOTE: Leaving source tree /home/scottrif/poky/build/workspace/sources/nano as-is; if you no longer need it then please delete it manually + + +Using the ``devtool finish`` command cleans up the workspace and creates a patch +file based on your commits. The tool puts all patch files back into the +source directory in a sub-directory named ``nano`` in this case. + +.. _dev-manually-upgrading-a-recipe: + +Manually Upgrading a Recipe +--------------------------- + +If for some reason you choose not to upgrade recipes using the `Auto +Upgrade Helper (AUH) <#gs-using-the-auto-upgrade-helper>`__ or by using +```devtool upgrade`` <#gs-using-devtool-upgrade>`__, you can manually +edit the recipe files to upgrade the versions. + +.. note:: + + Manually updating multiple recipes scales poorly and involves many + steps. The recommendation to upgrade recipe versions is through AUH + or + devtool upgrade + , both of which automate some steps and provide guidance for others + needed for the manual process. + +To manually upgrade recipe versions, follow these general steps: + +1. *Change the Version:* Rename the recipe such that the version (i.e. + the :term:`PV` part of the recipe name) + changes appropriately. If the version is not part of the recipe name, + change the value as it is set for ``PV`` within the recipe itself. + +2. Update ``SRCREV`` if Needed: If the source code your recipe builds + is fetched from Git or some other version control system, update + :term:`SRCREV` to point to the + commit hash that matches the new version. + +3. *Build the Software:* Try to build the recipe using BitBake. Typical + build failures include the following: + + - License statements were updated for the new version. For this + case, you need to review any changes to the license and update the + values of :term:`LICENSE` and + :term:`LIC_FILES_CHKSUM` + as needed. + + .. note:: + + License changes are often inconsequential. For example, the + license text's copyright year might have changed. + + - Custom patches carried by the older version of the recipe might + fail to apply to the new version. For these cases, you need to + review the failures. Patches might not be necessary for the new + version of the software if the upgraded version has fixed those + issues. If a patch is necessary and failing, you need to rebase it + into the new version. + +4. *Optionally Attempt to Build for Several Architectures:* Once you + successfully build the new software for a given architecture, you + could test the build for other architectures by changing the + :term:`MACHINE` variable and + rebuilding the software. This optional step is especially important + if the recipe is to be released publicly. + +5. *Check the Upstream Change Log or Release Notes:* Checking both these + reveals if new features exist that could break + backwards-compatibility. If so, you need to take steps to mitigate or + eliminate that situation. + +6. *Optionally Create a Bootable Image and Test:* If you want, you can + test the new software by booting it onto actual hardware. + +7. *Create a Commit with the Change in the Layer Repository:* After all + builds work and any testing is successful, you can create commits for + any changes in the layer holding your upgraded recipe. + +.. _finding-the-temporary-source-code: + +Finding Temporary Source Code +============================= + +You might find it helpful during development to modify the temporary +source code used by recipes to build packages. For example, suppose you +are developing a patch and you need to experiment a bit to figure out +your solution. After you have initially built the package, you can +iteratively tweak the source code, which is located in the +:term:`Build Directory`, and then you can +force a re-compile and quickly test your altered code. Once you settle +on a solution, you can then preserve your changes in the form of +patches. + +During a build, the unpacked temporary source code used by recipes to +build packages is available in the Build Directory as defined by the +:term:`S` variable. Below is the default +value for the ``S`` variable as defined in the +``meta/conf/bitbake.conf`` configuration file in the +:term:`Source Directory`: +:: + + S = "${WORKDIR}/${BP}" + +You should be aware that many recipes override the +``S`` variable. For example, recipes that fetch their source from Git +usually set ``S`` to ``${WORKDIR}/git``. + +.. note:: + + The + BP + represents the base recipe name, which consists of the name and + version: + :: + + BP = "${BPN}-${PV}" + + +The path to the work directory for the recipe +(:term:`WORKDIR`) is defined as +follows: +${TMPDIR}/work/${MULTIMACH_TARGET_SYS}/${PN}/${EXTENDPE}${PV}-${PR} The +actual directory depends on several things: + +- :term:`TMPDIR`: The top-level build + output directory. + +- :term:`MULTIMACH_TARGET_SYS`: + The target system identifier. + +- :term:`PN`: The recipe name. + +- :term:`EXTENDPE`: The epoch - (if + :term:`PE` is not specified, which is + usually the case for most recipes, then ``EXTENDPE`` is blank). + +- :term:`PV`: The recipe version. + +- :term:`PR`: The recipe revision. + +As an example, assume a Source Directory top-level folder named +``poky``, a default Build Directory at ``poky/build``, and a +``qemux86-poky-linux`` machine target system. Furthermore, suppose your +recipe is named ``foo_1.3.0.bb``. In this case, the work directory the +build system uses to build the package would be as follows: +:: + + poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0 + +.. _using-a-quilt-workflow: + +Using Quilt in Your Workflow +============================ + +`Quilt `__ is a powerful tool +that allows you to capture source code changes without having a clean +source tree. This section outlines the typical workflow you can use to +modify source code, test changes, and then preserve the changes in the +form of a patch all using Quilt. + +.. note:: + + With regard to preserving changes to source files, if you clean a + recipe or have + rm_work + enabled, the + devtool + workflow + as described in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual is a safer + development flow than the flow that uses Quilt. + +Follow these general steps: + +1. *Find the Source Code:* Temporary source code used by the + OpenEmbedded build system is kept in the + :term:`Build Directory`. See the + "`Finding Temporary Source + Code <#finding-the-temporary-source-code>`__" section to learn how to + locate the directory that has the temporary source code for a + particular package. + +2. *Change Your Working Directory:* You need to be in the directory that + has the temporary source code. That directory is defined by the + :term:`S` variable. + +3. *Create a New Patch:* Before modifying source code, you need to + create a new patch. To create a new patch file, use ``quilt new`` as + below: + :; + + $ quilt new my_changes.patch + +4. *Notify Quilt and Add Files:* After creating the patch, you need to + notify Quilt about the files you plan to edit. You notify Quilt by + adding the files to the patch you just created: + :: + + $ quilt add file1.c file2.c file3.c + +5. *Edit the Files:* Make your changes in the source code to the files + you added to the patch. + +6. *Test Your Changes:* Once you have modified the source code, the + easiest way to test your changes is by calling the ``do_compile`` + task as shown in the following example: + :: + + $ bitbake -c compile -f package + + The ``-f`` or ``--force`` option forces the specified task to + execute. If you find problems with your code, you can just keep + editing and re-testing iteratively until things work as expected. + + .. note:: + + All the modifications you make to the temporary source code + disappear once you run the + do_clean + or + do_cleanall + tasks using BitBake (i.e. + bitbake -c clean + package + and + bitbake -c cleanall + package + ). Modifications will also disappear if you use the + rm_work + feature as described in the " + Conserving Disk Space During Builds + " section. + +7. *Generate the Patch:* Once your changes work as expected, you need to + use Quilt to generate the final patch that contains all your + modifications. + :: + + $ quilt refresh + + At this point, the + ``my_changes.patch`` file has all your edits made to the ``file1.c``, + ``file2.c``, and ``file3.c`` files. + + You can find the resulting patch file in the ``patches/`` + subdirectory of the source (``S``) directory. + +8. *Copy the Patch File:* For simplicity, copy the patch file into a + directory named ``files``, which you can create in the same directory + that holds the recipe (``.bb``) file or the append (``.bbappend``) + file. Placing the patch here guarantees that the OpenEmbedded build + system will find the patch. Next, add the patch into the ``SRC_URI`` + of the recipe. Here is an example: + :: + + SRC_URI += "file://my_changes.patch" + +.. _platdev-appdev-devshell: + +Using a Development Shell +========================= + +When debugging certain commands or even when just editing packages, +``devshell`` can be a useful tool. When you invoke ``devshell``, all +tasks up to and including +:ref:`ref-tasks-patch` are run for the +specified target. Then, a new terminal is opened and you are placed in +``${``\ :term:`S`\ ``}``, the source +directory. In the new terminal, all the OpenEmbedded build-related +environment variables are still defined so you can use commands such as +``configure`` and ``make``. The commands execute just as if the +OpenEmbedded build system were executing them. Consequently, working +this way can be helpful when debugging a build or preparing software to +be used with the OpenEmbedded build system. + +Following is an example that uses ``devshell`` on a target named +``matchbox-desktop``: +:: + + $ bitbake matchbox-desktop -c devshell + +This command spawns a terminal with a shell prompt within the +OpenEmbedded build environment. The +:term:`OE_TERMINAL` variable +controls what type of shell is opened. + +For spawned terminals, the following occurs: + +- The ``PATH`` variable includes the cross-toolchain. + +- The ``pkgconfig`` variables find the correct ``.pc`` files. + +- The ``configure`` command finds the Yocto Project site files as well + as any other necessary files. + +Within this environment, you can run configure or compile commands as if +they were being run by the OpenEmbedded build system itself. As noted +earlier, the working directory also automatically changes to the Source +Directory (:term:`S`). + +To manually run a specific task using ``devshell``, run the +corresponding ``run.*`` script in the +``${``\ :term:`WORKDIR`\ ``}/temp`` +directory (e.g., ``run.do_configure.``\ pid). If a task's script does +not exist, which would be the case if the task was skipped by way of the +sstate cache, you can create the task by first running it outside of the +``devshell``: +:: + + $ bitbake -c task + +.. note:: + + - Execution of a task's ``run.*`` script and BitBake's execution of + a task are identical. In other words, running the script re-runs + the task just as it would be run using the ``bitbake -c`` command. + + - Any ``run.*`` file that does not have a ``.pid`` extension is a + symbolic link (symlink) to the most recent version of that file. + +Remember, that the ``devshell`` is a mechanism that allows you to get +into the BitBake task execution environment. And as such, all commands +must be called just as BitBake would call them. That means you need to +provide the appropriate options for cross-compilation and so forth as +applicable. + +When you are finished using ``devshell``, exit the shell or close the +terminal window. + +.. note:: + + - It is worth remembering that when using ``devshell`` you need to + use the full compiler name such as ``arm-poky-linux-gnueabi-gcc`` + instead of just using ``gcc``. The same applies to other + applications such as ``binutils``, ``libtool`` and so forth. + BitBake sets up environment variables such as ``CC`` to assist + applications, such as ``make`` to find the correct tools. + + - It is also worth noting that ``devshell`` still works over X11 + forwarding and similar situations. + +.. _platdev-appdev-devpyshell: + +Using a Development Python Shell +================================ + +Similar to working within a development shell as described in the +previous section, you can also spawn and work within an interactive +Python development shell. When debugging certain commands or even when +just editing packages, ``devpyshell`` can be a useful tool. When you +invoke ``devpyshell``, all tasks up to and including +:ref:`ref-tasks-patch` are run for the +specified target. Then a new terminal is opened. Additionally, key +Python objects and code are available in the same way they are to +BitBake tasks, in particular, the data store 'd'. So, commands such as +the following are useful when exploring the data store and running +functions: +:: + + pydevshell> d.getVar("STAGING_DIR") + '/media/build1/poky/build/tmp/sysroots' + pydevshell> d.getVar("STAGING_DIR") + '${TMPDIR}/sysroots' + pydevshell> d.setVar("FOO", "bar") + pydevshell> d.getVar("FOO") + 'bar' + pydevshell> d.delVar("FOO") + pydevshell> d.getVar("FOO") + pydevshell> bb.build.exec_func("do_unpack", d) + pydevshell> + +The commands execute just as if the OpenEmbedded build +system were executing them. Consequently, working this way can be +helpful when debugging a build or preparing software to be used with the +OpenEmbedded build system. + +Following is an example that uses ``devpyshell`` on a target named +``matchbox-desktop``: +:: + + $ bitbake matchbox-desktop -c devpyshell + +This command spawns a terminal and places you in an interactive Python +interpreter within the OpenEmbedded build environment. The +:term:`OE_TERMINAL` variable +controls what type of shell is opened. + +When you are finished using ``devpyshell``, you can exit the shell +either by using Ctrl+d or closing the terminal window. + +.. _dev-building: + +Building +======== + +This section describes various build procedures. For example, the steps +needed for a simple build, a target that uses multiple configurations, +building an image for more than one machine, and so forth. + +.. _dev-building-a-simple-image: + +Building a Simple Image +----------------------- + +In the development environment, you need to build an image whenever you +change hardware support, add or change system libraries, or add or +change services that have dependencies. Several methods exist that allow +you to build an image within the Yocto Project. This section presents +the basic steps you need to build a simple image using BitBake from a +build host running Linux. + +.. note:: + + - For information on how to build an image using + :term:`Toaster`, see the + :doc:`../toaster-manual/toaster-manual`. + + - For information on how to use ``devtool`` to build images, see the + ":ref:`sdk-manual/sdk-extensible:using \`\`devtool\`\` in your sdk workflow`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + - For a quick example on how to build an image using the + OpenEmbedded build system, see the + :doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document. + +The build process creates an entire Linux distribution from source and +places it in your :term:`Build Directory` under +``tmp/deploy/images``. For detailed information on the build process +using BitBake, see the ":ref:`images-dev-environment`" section in the +Yocto Project Overview and Concepts Manual. + +The following figure and list overviews the build process: + +.. image:: figures/bitbake-build-flow.png + :align: center + +1. *Set up Your Host Development System to Support Development Using the + Yocto Project*: See the "`Setting Up to Use the Yocto + Project <#dev-manual-start>`__" section for options on how to get a + build host ready to use the Yocto Project. + +2. *Initialize the Build Environment:* Initialize the build environment + by sourcing the build environment script (i.e. + :ref:`structure-core-script`): + :: + + $ source oe-init-build-env [build_dir] + + When you use the initialization script, the OpenEmbedded build system + uses ``build`` as the default Build Directory in your current work + directory. You can use a build_dir argument with the script to + specify a different build directory. + + .. note:: + + A common practice is to use a different Build Directory for + different targets. For example, + ~/build/x86 + for a + qemux86 + target, and + ~/build/arm + for a + qemuarm + target. + +3. Make Sure Your ``local.conf`` File is Correct: Ensure the + ``conf/local.conf`` configuration file, which is found in the Build + Directory, is set up how you want it. This file defines many aspects + of the build environment including the target machine architecture + through the ``MACHINE`` variable, the packaging format used during + the build + (:term:`PACKAGE_CLASSES`), + and a centralized tarball download directory through the + :term:`DL_DIR` variable. + +4. *Build the Image:* Build the image using the ``bitbake`` command: + :: + + $ bitbake target + + .. note:: + + For information on BitBake, see the + BitBake User Manual + . + + The target is the name of the recipe you want to build. Common + targets are the images in ``meta/recipes-core/images``, + ``meta/recipes-sato/images``, and so forth all found in the + :term:`Source Directory`. Or, the target + can be the name of a recipe for a specific piece of software such as + BusyBox. For more details about the images the OpenEmbedded build + system supports, see the + ":ref:`ref-manual/ref-images:Images`" chapter in the Yocto + Project Reference Manual. + + As an example, the following command builds the + ``core-image-minimal`` image: + :: + + $ bitbake core-image-minimal + + Once an + image has been built, it often needs to be installed. The images and + kernels built by the OpenEmbedded build system are placed in the + Build Directory in ``tmp/deploy/images``. For information on how to + run pre-built images such as ``qemux86`` and ``qemuarm``, see the + :doc:`../sdk-manual/sdk-manual` manual. For + information about how to install these images, see the documentation + for your particular board or machine. + +.. _dev-building-images-for-multiple-targets-using-multiple-configurations: + +Building Images for Multiple Targets Using Multiple Configurations +------------------------------------------------------------------ + +You can use a single ``bitbake`` command to build multiple images or +packages for different targets where each image or package requires a +different configuration (multiple configuration builds). The builds, in +this scenario, are sometimes referred to as "multiconfigs", and this +section uses that term throughout. + +This section describes how to set up for multiple configuration builds +and how to account for cross-build dependencies between the +multiconfigs. + +.. _dev-setting-up-and-running-a-multiple-configuration-build: + +Setting Up and Running a Multiple Configuration Build +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To accomplish a multiple configuration build, you must define each +target's configuration separately using a parallel configuration file in +the :term:`Build Directory`, and you +must follow a required file hierarchy. Additionally, you must enable the +multiple configuration builds in your ``local.conf`` file. + +Follow these steps to set up and execute multiple configuration builds: + +- *Create Separate Configuration Files*: You need to create a single + configuration file for each build target (each multiconfig). + Minimally, each configuration file must define the machine and the + temporary directory BitBake uses for the build. Suggested practice + dictates that you do not overlap the temporary directories used + during the builds. However, it is possible that you can share the + temporary directory + (:term:`TMPDIR`). For example, + consider a scenario with two different multiconfigs for the same + :term:`MACHINE`: "qemux86" built + for two distributions such as "poky" and "poky-lsb". In this case, + you might want to use the same ``TMPDIR``. + + Here is an example showing the minimal statements needed in a + configuration file for a "qemux86" target whose temporary build + directory is ``tmpmultix86``: + :: + + MACHINE = "qemux86" + TMPDIR = "${TOPDIR}/tmpmultix86" + + The location for these multiconfig configuration files is specific. + They must reside in the current build directory in a sub-directory of + ``conf`` named ``multiconfig``. Following is an example that defines + two configuration files for the "x86" and "arm" multiconfigs: + + .. image:: figures/multiconfig_files.png + :align: center + + The reason for this required file hierarchy is because the ``BBPATH`` + variable is not constructed until the layers are parsed. + Consequently, using the configuration file as a pre-configuration + file is not possible unless it is located in the current working + directory. + +- *Add the BitBake Multi-configuration Variable to the Local + Configuration File*: Use the + :term:`BBMULTICONFIG` + variable in your ``conf/local.conf`` configuration file to specify + each multiconfig. Continuing with the example from the previous + figure, the ``BBMULTICONFIG`` variable needs to enable two + multiconfigs: "x86" and "arm" by specifying each configuration file: + :: + + BBMULTICONFIG = "x86 arm" + + .. note:: + + A "default" configuration already exists by definition. This + configuration is named: "" (i.e. empty string) and is defined by + the variables coming from your + local.conf + file. Consequently, the previous example actually adds two + additional configurations to your build: "arm" and "x86" along + with "". + +- *Launch BitBake*: Use the following BitBake command form to launch + the multiple configuration build: + :: + + $ bitbake [mc:multiconfigname:]target [[[mc:multiconfigname:]target] ... ] + + For the example in this section, the following command applies: + :: + + $ bitbake mc:x86:core-image-minimal mc:arm:core-image-sato mc::core-image-base + + The previous BitBake command builds a ``core-image-minimal`` image + that is configured through the ``x86.conf`` configuration file, a + ``core-image-sato`` image that is configured through the ``arm.conf`` + configuration file and a ``core-image-base`` that is configured + through your ``local.conf`` configuration file. + +.. note:: + + Support for multiple configuration builds in the Yocto Project DISTRO + (DISTRO_NAME) Release does not include Shared State (sstate) + optimizations. Consequently, if a build uses the same object twice + in, for example, two different + TMPDIR + directories, the build either loads from an existing sstate cache for + that build at the start or builds the object fresh. + +.. _dev-enabling-multiple-configuration-build-dependencies: + +Enabling Multiple Configuration Build Dependencies +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes dependencies can exist between targets (multiconfigs) in a +multiple configuration build. For example, suppose that in order to +build a ``core-image-sato`` image for an "x86" multiconfig, the root +filesystem of an "arm" multiconfig must exist. This dependency is +essentially that the +:ref:`ref-tasks-image` task in the +``core-image-sato`` recipe depends on the completion of the +:ref:`ref-tasks-rootfs` task of the +``core-image-minimal`` recipe. + +To enable dependencies in a multiple configuration build, you must +declare the dependencies in the recipe using the following statement +form: +:: + + task_or_package[mcdepends] = "mc:from_multiconfig:to_multiconfig:recipe_name:task_on_which_to_depend" + +To better show how to use this statement, consider the example scenario +from the first paragraph of this section. The following statement needs +to be added to the recipe that builds the ``core-image-sato`` image: +:: + + do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_rootfs" + +In this example, the from_multiconfig is "x86". The to_multiconfig is "arm". The +task on which the ``do_image`` task in the recipe depends is the +``do_rootfs`` task from the ``core-image-minimal`` recipe associated +with the "arm" multiconfig. + +Once you set up this dependency, you can build the "x86" multiconfig +using a BitBake command as follows: +:: + + $ bitbake mc:x86:core-image-sato + +This command executes all the tasks needed to create the +``core-image-sato`` image for the "x86" multiconfig. Because of the +dependency, BitBake also executes through the ``do_rootfs`` task for the +"arm" multiconfig build. + +Having a recipe depend on the root filesystem of another build might not +seem that useful. Consider this change to the statement in the +``core-image-sato`` recipe: +:: + + do_image[mcdepends] = "mc:x86:arm:core-image-minimal:do_image" + +In this case, BitBake must +create the ``core-image-minimal`` image for the "arm" build since the +"x86" build depends on it. + +Because "x86" and "arm" are enabled for multiple configuration builds +and have separate configuration files, BitBake places the artifacts for +each build in the respective temporary build directories (i.e. +:term:`TMPDIR`). + +.. _building-an-initramfs-image: + +Building an Initial RAM Filesystem (initramfs) Image +---------------------------------------------------- + +An initial RAM filesystem (initramfs) image provides a temporary root +filesystem used for early system initialization (e.g. loading of modules +needed to locate and mount the "real" root filesystem). + +.. note:: + + The initramfs image is the successor of initial RAM disk (initrd). It + is a "copy in and out" (cpio) archive of the initial filesystem that + gets loaded into memory during the Linux startup process. Because + Linux uses the contents of the archive during initialization, the + initramfs image needs to contain all of the device drivers and tools + needed to mount the final root filesystem. + +Follow these steps to create an initramfs image: + +1. *Create the initramfs Image Recipe:* You can reference the + ``core-image-minimal-initramfs.bb`` recipe found in the + ``meta/recipes-core`` directory of the :term:`Source Directory` + as an example + from which to work. + +2. *Decide if You Need to Bundle the initramfs Image Into the Kernel + Image:* If you want the initramfs image that is built to be bundled + in with the kernel image, set the + :term:`INITRAMFS_IMAGE_BUNDLE` + variable to "1" in your ``local.conf`` configuration file and set the + :term:`INITRAMFS_IMAGE` + variable in the recipe that builds the kernel image. + + .. note:: + + It is recommended that you do bundle the initramfs image with the + kernel image to avoid circular dependencies between the kernel + recipe and the initramfs recipe should the initramfs image include + kernel modules. + + Setting the ``INITRAMFS_IMAGE_BUNDLE`` flag causes the initramfs + image to be unpacked into the ``${B}/usr/`` directory. The unpacked + initramfs image is then passed to the kernel's ``Makefile`` using the + :term:`CONFIG_INITRAMFS_SOURCE` + variable, allowing the initramfs image to be built into the kernel + normally. + + .. note:: + + If you choose to not bundle the initramfs image with the kernel + image, you are essentially using an + Initial RAM Disk (initrd) + . Creating an initrd is handled primarily through the + INITRD_IMAGE + , + INITRD_LIVE + , and + INITRD_IMAGE_LIVE + variables. For more information, see the + image-live.bbclass + file. + +3. *Optionally Add Items to the initramfs Image Through the initramfs + Image Recipe:* If you add items to the initramfs image by way of its + recipe, you should use + :term:`PACKAGE_INSTALL` + rather than + :term:`IMAGE_INSTALL`. + ``PACKAGE_INSTALL`` gives more direct control of what is added to the + image as compared to the defaults you might not necessarily want that + are set by the :ref:`image ` + or :ref:`core-image ` + classes. + +4. *Build the Kernel Image and the initramfs Image:* Build your kernel + image using BitBake. Because the initramfs image recipe is a + dependency of the kernel image, the initramfs image is built as well + and bundled with the kernel image if you used the + :term:`INITRAMFS_IMAGE_BUNDLE` + variable described earlier. + +Building a Tiny System +---------------------- + +Very small distributions have some significant advantages such as +requiring less on-die or in-package memory (cheaper), better performance +through efficient cache usage, lower power requirements due to less +memory, faster boot times, and reduced development overhead. Some +real-world examples where a very small distribution gives you distinct +advantages are digital cameras, medical devices, and small headless +systems. + +This section presents information that shows you how you can trim your +distribution to even smaller sizes than the ``poky-tiny`` distribution, +which is around 5 Mbytes, that can be built out-of-the-box using the +Yocto Project. + +.. _tiny-system-overview: + +Tiny System Overview +~~~~~~~~~~~~~~~~~~~~ + +The following list presents the overall steps you need to consider and +perform to create distributions with smaller root filesystems, achieve +faster boot times, maintain your critical functionality, and avoid +initial RAM disks: + +- `Determine your goals and guiding + principles. <#goals-and-guiding-principles>`__ + +- `Understand what contributes to your image + size. <#understand-what-gives-your-image-size>`__ + +- `Reduce the size of the root + filesystem. <#trim-the-root-filesystem>`__ + +- `Reduce the size of the kernel. <#trim-the-kernel>`__ + +- `Eliminate packaging + requirements. <#remove-package-management-requirements>`__ + +- `Look for other ways to minimize + size. <#look-for-other-ways-to-minimize-size>`__ + +- `Iterate on the process. <#iterate-on-the-process>`__ + +Goals and Guiding Principles +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before you can reach your destination, you need to know where you are +going. Here is an example list that you can use as a guide when creating +very small distributions: + +- Determine how much space you need (e.g. a kernel that is 1 Mbyte or + less and a root filesystem that is 3 Mbytes or less). + +- Find the areas that are currently taking 90% of the space and + concentrate on reducing those areas. + +- Do not create any difficult "hacks" to achieve your goals. + +- Leverage the device-specific options. + +- Work in a separate layer so that you keep changes isolated. For + information on how to create layers, see the "`Understanding and + Creating Layers <#understanding-and-creating-layers>`__" section. + +.. _understand-what-gives-your-image-size: + +Understand What Contributes to Your Image Size +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It is easiest to have something to start with when creating your own +distribution. You can use the Yocto Project out-of-the-box to create the +``poky-tiny`` distribution. Ultimately, you will want to make changes in +your own distribution that are likely modeled after ``poky-tiny``. + +.. note:: + + To use + poky-tiny + in your build, set the + DISTRO + variable in your + local.conf + file to "poky-tiny" as described in the " + Creating Your Own Distribution + " section. + +Understanding some memory concepts will help you reduce the system size. +Memory consists of static, dynamic, and temporary memory. Static memory +is the TEXT (code), DATA (initialized data in the code), and BSS +(uninitialized data) sections. Dynamic memory represents memory that is +allocated at runtime: stacks, hash tables, and so forth. Temporary +memory is recovered after the boot process. This memory consists of +memory used for decompressing the kernel and for the ``__init__`` +functions. + +To help you see where you currently are with kernel and root filesystem +sizes, you can use two tools found in the :term:`Source Directory` +in the +``scripts/tiny/`` directory: + +- ``ksize.py``: Reports component sizes for the kernel build objects. + +- ``dirsize.py``: Reports component sizes for the root filesystem. + +This next tool and command help you organize configuration fragments and +view file dependencies in a human-readable form: + +- ``merge_config.sh``: Helps you manage configuration files and + fragments within the kernel. With this tool, you can merge individual + configuration fragments together. The tool allows you to make + overrides and warns you of any missing configuration options. The + tool is ideal for allowing you to iterate on configurations, create + minimal configurations, and create configuration files for different + machines without having to duplicate your process. + + The ``merge_config.sh`` script is part of the Linux Yocto kernel Git + repositories (i.e. ``linux-yocto-3.14``, ``linux-yocto-3.10``, + ``linux-yocto-3.8``, and so forth) in the ``scripts/kconfig`` + directory. + + For more information on configuration fragments, see the + ":ref:`creating-config-fragments`" + section in the Yocto Project Linux Kernel Development Manual. + +- ``bitbake -u taskexp -g bitbake_target``: Using the BitBake command + with these options brings up a Dependency Explorer from which you can + view file dependencies. Understanding these dependencies allows you + to make informed decisions when cutting out various pieces of the + kernel and root filesystem. + +Trim the Root Filesystem +~~~~~~~~~~~~~~~~~~~~~~~~ + +The root filesystem is made up of packages for booting, libraries, and +applications. To change things, you can configure how the packaging +happens, which changes the way you build them. You can also modify the +filesystem itself or select a different filesystem. + +First, find out what is hogging your root filesystem by running the +``dirsize.py`` script from your root directory: +:: + + $ cd root-directory-of-image + $ dirsize.py 100000 > dirsize-100k.log + $ cat dirsize-100k.log + +You can apply a filter to the script to ignore files +under a certain size. The previous example filters out any files below +100 Kbytes. The sizes reported by the tool are uncompressed, and thus +will be smaller by a relatively constant factor in a compressed root +filesystem. When you examine your log file, you can focus on areas of +the root filesystem that take up large amounts of memory. + +You need to be sure that what you eliminate does not cripple the +functionality you need. One way to see how packages relate to each other +is by using the Dependency Explorer UI with the BitBake command: +:: + + $ cd image-directory + $ bitbake -u taskexp -g image + +Use the interface to +select potential packages you wish to eliminate and see their dependency +relationships. + +When deciding how to reduce the size, get rid of packages that result in +minimal impact on the feature set. For example, you might not need a VGA +display. Or, you might be able to get by with ``devtmpfs`` and ``mdev`` +instead of ``udev``. + +Use your ``local.conf`` file to make changes. For example, to eliminate +``udev`` and ``glib``, set the following in the local configuration +file: +:: + + VIRTUAL-RUNTIME_dev_manager = "" + +Finally, you should consider exactly the type of root filesystem you +need to meet your needs while also reducing its size. For example, +consider ``cramfs``, ``squashfs``, ``ubifs``, ``ext2``, or an +``initramfs`` using ``initramfs``. Be aware that ``ext3`` requires a 1 +Mbyte journal. If you are okay with running read-only, you do not need +this journal. + +.. note:: + + After each round of elimination, you need to rebuild your system and + then use the tools to see the effects of your reductions. + +Trim the Kernel +~~~~~~~~~~~~~~~ + +The kernel is built by including policies for hardware-independent +aspects. What subsystems do you enable? For what architecture are you +building? Which drivers do you build by default? + +.. note:: + + You can modify the kernel source if you want to help with boot time. + +Run the ``ksize.py`` script from the top-level Linux build directory to +get an idea of what is making up the kernel: +:: + + $ cd top-level-linux-build-directory + $ ksize.py > ksize.log + $ cat ksize.log + +When you examine the log, you will see how much space is taken up with +the built-in ``.o`` files for drivers, networking, core kernel files, +filesystem, sound, and so forth. The sizes reported by the tool are +uncompressed, and thus will be smaller by a relatively constant factor +in a compressed kernel image. Look to reduce the areas that are large +and taking up around the "90% rule." + +To examine, or drill down, into any particular area, use the ``-d`` +option with the script: +:: + + $ ksize.py -d > ksize.log + +Using this option +breaks out the individual file information for each area of the kernel +(e.g. drivers, networking, and so forth). + +Use your log file to see what you can eliminate from the kernel based on +features you can let go. For example, if you are not going to need +sound, you do not need any drivers that support sound. + +After figuring out what to eliminate, you need to reconfigure the kernel +to reflect those changes during the next build. You could run +``menuconfig`` and make all your changes at once. However, that makes it +difficult to see the effects of your individual eliminations and also +makes it difficult to replicate the changes for perhaps another target +device. A better method is to start with no configurations using +``allnoconfig``, create configuration fragments for individual changes, +and then manage the fragments into a single configuration file using +``merge_config.sh``. The tool makes it easy for you to iterate using the +configuration change and build cycle. + +Each time you make configuration changes, you need to rebuild the kernel +and check to see what impact your changes had on the overall size. + +Remove Package Management Requirements +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Packaging requirements add size to the image. One way to reduce the size +of the image is to remove all the packaging requirements from the image. +This reduction includes both removing the package manager and its unique +dependencies as well as removing the package management data itself. + +To eliminate all the packaging requirements for an image, be sure that +"package-management" is not part of your +:term:`IMAGE_FEATURES` +statement for the image. When you remove this feature, you are removing +the package manager as well as its dependencies from the root +filesystem. + +Look for Other Ways to Minimize Size +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Depending on your particular circumstances, other areas that you can +trim likely exist. The key to finding these areas is through tools and +methods described here combined with experimentation and iteration. Here +are a couple of areas to experiment with: + +- ``glibc``: In general, follow this process: + + 1. Remove ``glibc`` features from + :term:`DISTRO_FEATURES` + that you think you do not need. + + 2. Build your distribution. + + 3. If the build fails due to missing symbols in a package, determine + if you can reconfigure the package to not need those features. For + example, change the configuration to not support wide character + support as is done for ``ncurses``. Or, if support for those + characters is needed, determine what ``glibc`` features provide + the support and restore the configuration. + + 4. Rebuild and repeat the process. + +- ``busybox``: For BusyBox, use a process similar as described for + ``glibc``. A difference is you will need to boot the resulting system + to see if you are able to do everything you expect from the running + system. You need to be sure to integrate configuration fragments into + Busybox because BusyBox handles its own core features and then allows + you to add configuration fragments on top. + +Iterate on the Process +~~~~~~~~~~~~~~~~~~~~~~ + +If you have not reached your goals on system size, you need to iterate +on the process. The process is the same. Use the tools and see just what +is taking up 90% of the root filesystem and the kernel. Decide what you +can eliminate without limiting your device beyond what you need. + +Depending on your system, a good place to look might be Busybox, which +provides a stripped down version of Unix tools in a single, executable +file. You might be able to drop virtual terminal services or perhaps +ipv6. + +Building Images for More than One Machine +----------------------------------------- + +A common scenario developers face is creating images for several +different machines that use the same software environment. In this +situation, it is tempting to set the tunings and optimization flags for +each build specifically for the targeted hardware (i.e. "maxing out" the +tunings). Doing so can considerably add to build times and package feed +maintenance collectively for the machines. For example, selecting tunes +that are extremely specific to a CPU core used in a system might enable +some micro optimizations in GCC for that particular system but would +otherwise not gain you much of a performance difference across the other +systems as compared to using a more general tuning across all the builds +(e.g. setting :term:`DEFAULTTUNE` +specifically for each machine's build). Rather than "max out" each +build's tunings, you can take steps that cause the OpenEmbedded build +system to reuse software across the various machines where it makes +sense. + +If build speed and package feed maintenance are considerations, you +should consider the points in this section that can help you optimize +your tunings to best consider build times and package feed maintenance. + +- *Share the Build Directory:* If at all possible, share the + :term:`TMPDIR` across builds. The + Yocto Project supports switching between different + :term:`MACHINE` values in the same + ``TMPDIR``. This practice is well supported and regularly used by + developers when building for multiple machines. When you use the same + ``TMPDIR`` for multiple machine builds, the OpenEmbedded build system + can reuse the existing native and often cross-recipes for multiple + machines. Thus, build time decreases. + + .. note:: + + If + DISTRO + settings change or fundamental configuration settings such as the + filesystem layout, you need to work with a clean + TMPDIR + . Sharing + TMPDIR + under these circumstances might work but since it is not + guaranteed, you should use a clean + TMPDIR + . + +- *Enable the Appropriate Package Architecture:* By default, the + OpenEmbedded build system enables three levels of package + architectures: "all", "tune" or "package", and "machine". Any given + recipe usually selects one of these package architectures (types) for + its output. Depending for what a given recipe creates packages, + making sure you enable the appropriate package architecture can + directly impact the build time. + + A recipe that just generates scripts can enable "all" architecture + because there are no binaries to build. To specifically enable "all" + architecture, be sure your recipe inherits the + :ref:`allarch ` class. + This class is useful for "all" architectures because it configures + many variables so packages can be used across multiple architectures. + + If your recipe needs to generate packages that are machine-specific + or when one of the build or runtime dependencies is already + machine-architecture dependent, which makes your recipe also + machine-architecture dependent, make sure your recipe enables the + "machine" package architecture through the + :term:`MACHINE_ARCH` + variable: + :: + + PACKAGE_ARCH = "${MACHINE_ARCH}" + + When you do not + specifically enable a package architecture through the + :term:`PACKAGE_ARCH`, The + OpenEmbedded build system defaults to the + :term:`TUNE_PKGARCH` setting: + :: + + PACKAGE_ARCH = "${TUNE_PKGARCH}" + +- *Choose a Generic Tuning File if Possible:* Some tunes are more + generic and can run on multiple targets (e.g. an ``armv5`` set of + packages could run on ``armv6`` and ``armv7`` processors in most + cases). Similarly, ``i486`` binaries could work on ``i586`` and + higher processors. You should realize, however, that advances on + newer processor versions would not be used. + + If you select the same tune for several different machines, the + OpenEmbedded build system reuses software previously built, thus + speeding up the overall build time. Realize that even though a new + sysroot for each machine is generated, the software is not recompiled + and only one package feed exists. + +- *Manage Granular Level Packaging:* Sometimes cases exist where + injecting another level of package architecture beyond the three + higher levels noted earlier can be useful. For example, consider how + NXP (formerly Freescale) allows for the easy reuse of binary packages + in their layer + :yocto_git:`meta-freescale `. + In this example, the + :yocto_git:`fsl-dynamic-packagearch ` + class shares GPU packages for i.MX53 boards because all boards share + the AMD GPU. The i.MX6-based boards can do the same because all + boards share the Vivante GPU. This class inspects the BitBake + datastore to identify if the package provides or depends on one of + the sub-architecture values. If so, the class sets the + :term:`PACKAGE_ARCH` value + based on the ``MACHINE_SUBARCH`` value. If the package does not + provide or depend on one of the sub-architecture values but it + matches a value in the machine-specific filter, it sets + :term:`MACHINE_ARCH`. This + behavior reduces the number of packages built and saves build time by + reusing binaries. + +- *Use Tools to Debug Issues:* Sometimes you can run into situations + where software is being rebuilt when you think it should not be. For + example, the OpenEmbedded build system might not be using shared + state between machines when you think it should be. These types of + situations are usually due to references to machine-specific + variables such as :term:`MACHINE`, + :term:`SERIAL_CONSOLES`, + :term:`XSERVER`, + :term:`MACHINE_FEATURES`, + and so forth in code that is supposed to only be tune-specific or + when the recipe depends + (:term:`DEPENDS`, + :term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, and so forth) + on some other recipe that already has + :term:`PACKAGE_ARCH` defined + as "${MACHINE_ARCH}". + + .. note:: + + Patches to fix any issues identified are most welcome as these + issues occasionally do occur. + + For such cases, you can use some tools to help you sort out the + situation: + + - *sstate-diff-machines.sh:* You can find this tool in the + ``scripts`` directory of the Source Repositories. See the comments + in the script for information on how to use the tool. + + - *BitBake's "-S printdiff" Option:* Using this option causes + BitBake to try to establish the closest signature match it can + (e.g. in the shared state cache) and then run ``bitbake-diffsigs`` + over the matches to determine the stamps and delta where these two + stamp trees diverge. + +Building Software from an External Source +----------------------------------------- + +By default, the OpenEmbedded build system uses the +:term:`Build Directory` when building source +code. The build process involves fetching the source files, unpacking +them, and then patching them if necessary before the build takes place. + +Situations exist where you might want to build software from source +files that are external to and thus outside of the OpenEmbedded build +system. For example, suppose you have a project that includes a new BSP +with a heavily customized kernel. And, you want to minimize exposing the +build system to the development team so that they can focus on their +project and maintain everyone's workflow as much as possible. In this +case, you want a kernel source directory on the development machine +where the development occurs. You want the recipe's +:term:`SRC_URI` variable to point to +the external directory and use it as is, not copy it. + +To build from software that comes from an external source, all you need +to do is inherit the +:ref:`externalsrc ` class +and then set the +:term:`EXTERNALSRC` variable to +point to your external source code. Here are the statements to put in +your ``local.conf`` file: +:: + + INHERIT += "externalsrc" + EXTERNALSRC_pn-myrecipe = "path-to-your-source-tree" + +This next example shows how to accomplish the same thing by setting +``EXTERNALSRC`` in the recipe itself or in the recipe's append file: +:: + + EXTERNALSRC = "path" + EXTERNALSRC_BUILD = "path" + +.. note:: + + In order for these settings to take effect, you must globally or + locally inherit the + externalsrc + class. + +By default, ``externalsrc.bbclass`` builds the source code in a +directory separate from the external source directory as specified by +:term:`EXTERNALSRC`. If you need +to have the source built in the same directory in which it resides, or +some other nominated directory, you can set +:term:`EXTERNALSRC_BUILD` +to point to that directory: +:: + + EXTERNALSRC_BUILD_pn-myrecipe = "path-to-your-source-tree" + +Replicating a Build Offline +--------------------------- + +It can be useful to take a "snapshot" of upstream sources used in a +build and then use that "snapshot" later to replicate the build offline. +To do so, you need to first prepare and populate your downloads +directory your "snapshot" of files. Once your downloads directory is +ready, you can use it at any time and from any machine to replicate your +build. + +Follow these steps to populate your Downloads directory: + +1. *Create a Clean Downloads Directory:* Start with an empty downloads + directory (:term:`DL_DIR`). You + start with an empty downloads directory by either removing the files + in the existing directory or by setting ``DL_DIR`` to point to either + an empty location or one that does not yet exist. + +2. *Generate Tarballs of the Source Git Repositories:* Edit your + ``local.conf`` configuration file as follows: + :: + + DL_DIR = "/home/your-download-dir/" + BB_GENERATE_MIRROR_TARBALLS = "1" + + During + the fetch process in the next step, BitBake gathers the source files + and creates tarballs in the directory pointed to by ``DL_DIR``. See + the + :term:`BB_GENERATE_MIRROR_TARBALLS` + variable for more information. + +3. *Populate Your Downloads Directory Without Building:* Use BitBake to + fetch your sources but inhibit the build: + :: + + $ bitbake target --runonly=fetch + + The downloads directory (i.e. ``${DL_DIR}``) now has + a "snapshot" of the source files in the form of tarballs, which can + be used for the build. + +4. *Optionally Remove Any Git or other SCM Subdirectories From the + Downloads Directory:* If you want, you can clean up your downloads + directory by removing any Git or other Source Control Management + (SCM) subdirectories such as ``${DL_DIR}/git2/*``. The tarballs + already contain these subdirectories. + +Once your downloads directory has everything it needs regarding source +files, you can create your "own-mirror" and build your target. +Understand that you can use the files to build the target offline from +any machine and at any time. + +Follow these steps to build your target using the files in the downloads +directory: + +1. *Using Local Files Only:* Inside your ``local.conf`` file, add the + :term:`SOURCE_MIRROR_URL` + variable, inherit the + :ref:`own-mirrors ` + class, and use the + :term:`bitbake:BB_NO_NETWORK` + variable to your ``local.conf``. + :: + + SOURCE_MIRROR_URL ?= "file:///home/your-download-dir/" + INHERIT += "own-mirrors" + BB_NO_NETWORK = "1" + + The ``SOURCE_MIRROR_URL`` and ``own-mirror`` + class set up the system to use the downloads directory as your "own + mirror". Using the ``BB_NO_NETWORK`` variable makes sure that + BitBake's fetching process in step 3 stays local, which means files + from your "own-mirror" are used. + +2. *Start With a Clean Build:* You can start with a clean build by + removing the + ``${``\ :term:`TMPDIR`\ ``}`` + directory or using a new :term:`Build Directory`. + +3. *Build Your Target:* Use BitBake to build your target: + :: + + $ bitbake target + + The build completes using the known local "snapshot" of source + files from your mirror. The resulting tarballs for your "snapshot" of + source files are in the downloads directory. + + .. note:: + + The offline build does not work if recipes attempt to find the + latest version of software by setting + :term:`SRCREV` to + ``${``\ :term:`AUTOREV`\ ``}``: + SRCREV = "${AUTOREV}" When a recipe sets ``SRCREV`` to + ``${AUTOREV}``, the build system accesses the network in an + attempt to determine the latest version of software from the SCM. + Typically, recipes that use ``AUTOREV`` are custom or modified + recipes. Recipes that reside in public repositories usually do not + use ``AUTOREV``. + + If you do have recipes that use ``AUTOREV``, you can take steps to + still use the recipes in an offline build. Do the following: + + 1. Use a configuration generated by enabling `build + history <#maintaining-build-output-quality>`__. + + 2. Use the ``buildhistory-collect-srcrevs`` command to collect the + stored ``SRCREV`` values from the build's history. For more + information on collecting these values, see the "`Build History + Package Information <#build-history-package-information>`__" + section. + + 3. Once you have the correct source revisions, you can modify + those recipes to to set ``SRCREV`` to specific versions of the + software. + +Speeding Up a Build +=================== + +Build time can be an issue. By default, the build system uses simple +controls to try and maximize build efficiency. In general, the default +settings for all the following variables result in the most efficient +build times when dealing with single socket systems (i.e. a single CPU). +If you have multiple CPUs, you might try increasing the default values +to gain more speed. See the descriptions in the glossary for each +variable for more information: + +- :term:`BB_NUMBER_THREADS`: + The maximum number of threads BitBake simultaneously executes. + +- :term:`bitbake:BB_NUMBER_PARSE_THREADS`: + The number of threads BitBake uses during parsing. + +- :term:`PARALLEL_MAKE`: Extra + options passed to the ``make`` command during the + :ref:`ref-tasks-compile` task in + order to specify parallel compilation on the local build host. + +- :term:`PARALLEL_MAKEINST`: + Extra options passed to the ``make`` command during the + :ref:`ref-tasks-install` task in + order to specify parallel installation on the local build host. + +As mentioned, these variables all scale to the number of processor cores +available on the build system. For single socket systems, this +auto-scaling ensures that the build system fundamentally takes advantage +of potential parallel operations during the build based on the build +machine's capabilities. + +Following are additional factors that can affect build speed: + +- File system type: The file system type that the build is being + performed on can also influence performance. Using ``ext4`` is + recommended as compared to ``ext2`` and ``ext3`` due to ``ext4`` + improved features such as extents. + +- Disabling the updating of access time using ``noatime``: The + ``noatime`` mount option prevents the build system from updating file + and directory access times. + +- Setting a longer commit: Using the "commit=" mount option increases + the interval in seconds between disk cache writes. Changing this + interval from the five second default to something longer increases + the risk of data loss but decreases the need to write to the disk, + thus increasing the build performance. + +- Choosing the packaging backend: Of the available packaging backends, + IPK is the fastest. Additionally, selecting a singular packaging + backend also helps. + +- Using ``tmpfs`` for :term:`TMPDIR` + as a temporary file system: While this can help speed up the build, + the benefits are limited due to the compiler using ``-pipe``. The + build system goes to some lengths to avoid ``sync()`` calls into the + file system on the principle that if there was a significant failure, + the :term:`Build Directory` + contents could easily be rebuilt. + +- Inheriting the + :ref:`rm_work ` class: + Inheriting this class has shown to speed up builds due to + significantly lower amounts of data stored in the data cache as well + as on disk. Inheriting this class also makes cleanup of + :term:`TMPDIR` faster, at the + expense of being easily able to dive into the source code. File + system maintainers have recommended that the fastest way to clean up + large numbers of files is to reformat partitions rather than delete + files due to the linear nature of partitions. This, of course, + assumes you structure the disk partitions and file systems in a way + that this is practical. + +Aside from the previous list, you should keep some trade offs in mind +that can help you speed up the build: + +- Remove items from + :term:`DISTRO_FEATURES` + that you might not need. + +- Exclude debug symbols and other debug information: If you do not need + these symbols and other debug information, disabling the ``*-dbg`` + package generation can speed up the build. You can disable this + generation by setting the + :term:`INHIBIT_PACKAGE_DEBUG_SPLIT` + variable to "1". + +- Disable static library generation for recipes derived from + ``autoconf`` or ``libtool``: Following is an example showing how to + disable static libraries and still provide an override to handle + exceptions: + :: + + STATICLIBCONF = "--disable-static" + STATICLIBCONF_sqlite3-native = "" + EXTRA_OECONF += "${STATICLIBCONF}" + + .. note:: + + - Some recipes need static libraries in order to work correctly + (e.g. ``pseudo-native`` needs ``sqlite3-native``). Overrides, + as in the previous example, account for these kinds of + exceptions. + + - Some packages have packaging code that assumes the presence of + the static libraries. If so, you might need to exclude them as + well. + +.. _platdev-working-with-libraries: + +Working With Libraries +====================== + +Libraries are an integral part of your system. This section describes +some common practices you might find helpful when working with libraries +to build your system: + +- `How to include static library + files <#including-static-library-files>`__ + +- `How to use the Multilib feature to combine multiple versions of + library files into a single + image <#combining-multiple-versions-library-files-into-one-image>`__ + +- `How to install multiple versions of the same library in parallel on + the same + system <#installing-multiple-versions-of-the-same-library>`__ + +Including Static Library Files +------------------------------ + +If you are building a library and the library offers static linking, you +can control which static library files (``*.a`` files) get included in +the built library. + +The :term:`PACKAGES` and +:term:`FILES_* ` variables in the +``meta/conf/bitbake.conf`` configuration file define how files installed +by the ``do_install`` task are packaged. By default, the ``PACKAGES`` +variable includes ``${PN}-staticdev``, which represents all static +library files. + +.. note:: + + Some previously released versions of the Yocto Project defined the + static library files through + ${PN}-dev + . + +Following is part of the BitBake configuration file, where you can see +how the static library files are defined: +:: + + PACKAGE_BEFORE_PN ?= "" + PACKAGES = "${PN}-dbg ${PN}-staticdev ${PN}-dev ${PN}-doc ${PN}-locale ${PACKAGE_BEFORE_PN} ${PN}" + PACKAGES_DYNAMIC = "^${PN}-locale-.*" + FILES = "" + + FILES_${PN} = "${bindir}/* ${sbindir}/* ${libexecdir}/* ${libdir}/lib*${SOLIBS} \ + ${sysconfdir} ${sharedstatedir} ${localstatedir} \ + ${base_bindir}/* ${base_sbindir}/* \ + ${base_libdir}/*${SOLIBS} \ + ${base_prefix}/lib/udev/rules.d ${prefix}/lib/udev/rules.d \ + ${datadir}/${BPN} ${libdir}/${BPN}/* \ + ${datadir}/pixmaps ${datadir}/applications \ + ${datadir}/idl ${datadir}/omf ${datadir}/sounds \ + ${libdir}/bonobo/servers" + + FILES_${PN}-bin = "${bindir}/* ${sbindir}/*" + + FILES_${PN}-doc = "${docdir} ${mandir} ${infodir} ${datadir}/gtk-doc \ + ${datadir}/gnome/help" + SECTION_${PN}-doc = "doc" + + FILES_SOLIBSDEV ?= "${base_libdir}/lib*${SOLIBSDEV} ${libdir}/lib*${SOLIBSDEV}" + FILES_${PN}-dev = "${includedir} ${FILES_SOLIBSDEV} ${libdir}/*.la \ + ${libdir}/*.o ${libdir}/pkgconfig ${datadir}/pkgconfig \ + ${datadir}/aclocal ${base_libdir}/*.o \ + ${libdir}/${BPN}/*.la ${base_libdir}/*.la" + SECTION_${PN}-dev = "devel" + ALLOW_EMPTY_${PN}-dev = "1" + RDEPENDS_${PN}-dev = "${PN} (= ${EXTENDPKGV})" + + FILES_${PN}-staticdev = "${libdir}/*.a ${base_libdir}/*.a ${libdir}/${BPN}/*.a" + SECTION_${PN}-staticdev = "devel" + RDEPENDS_${PN}-staticdev = "${PN}-dev (= ${EXTENDPKGV})" + +.. _combining-multiple-versions-library-files-into-one-image: + +Combining Multiple Versions of Library Files into One Image +----------------------------------------------------------- + +The build system offers the ability to build libraries with different +target optimizations or architecture formats and combine these together +into one system image. You can link different binaries in the image +against the different libraries as needed for specific use cases. This +feature is called "Multilib." + +An example would be where you have most of a system compiled in 32-bit +mode using 32-bit libraries, but you have something large, like a +database engine, that needs to be a 64-bit application and uses 64-bit +libraries. Multilib allows you to get the best of both 32-bit and 64-bit +libraries. + +While the Multilib feature is most commonly used for 32 and 64-bit +differences, the approach the build system uses facilitates different +target optimizations. You could compile some binaries to use one set of +libraries and other binaries to use a different set of libraries. The +libraries could differ in architecture, compiler options, or other +optimizations. + +Several examples exist in the ``meta-skeleton`` layer found in the +:term:`Source Directory`: + +- ``conf/multilib-example.conf`` configuration file + +- ``conf/multilib-example2.conf`` configuration file + +- ``recipes-multilib/images/core-image-multilib-example.bb`` recipe + +Preparing to Use Multilib +~~~~~~~~~~~~~~~~~~~~~~~~~ + +User-specific requirements drive the Multilib feature. Consequently, +there is no one "out-of-the-box" configuration that likely exists to +meet your needs. + +In order to enable Multilib, you first need to ensure your recipe is +extended to support multiple libraries. Many standard recipes are +already extended and support multiple libraries. You can check in the +``meta/conf/multilib.conf`` configuration file in the +:term:`Source Directory` to see how this is +done using the +:term:`BBCLASSEXTEND` variable. +Eventually, all recipes will be covered and this list will not be +needed. + +For the most part, the Multilib class extension works automatically to +extend the package name from ``${PN}`` to ``${MLPREFIX}${PN}``, where +``MLPREFIX`` is the particular multilib (e.g. "lib32-" or "lib64-"). +Standard variables such as +:term:`DEPENDS`, +:term:`RDEPENDS`, +:term:`RPROVIDES`, +:term:`RRECOMMENDS`, +:term:`PACKAGES`, and +:term:`PACKAGES_DYNAMIC` are +automatically extended by the system. If you are extending any manual +code in the recipe, you can use the ``${MLPREFIX}`` variable to ensure +those names are extended correctly. This automatic extension code +resides in ``multilib.bbclass``. + +Using Multilib +~~~~~~~~~~~~~~ + +After you have set up the recipes, you need to define the actual +combination of multiple libraries you want to build. You accomplish this +through your ``local.conf`` configuration file in the +:term:`Build Directory`. An example +configuration would be as follows: +:: + + MACHINE = "qemux86-64" + require conf/multilib.conf + MULTILIBS = "multilib:lib32" + DEFAULTTUNE_virtclass-multilib-lib32 = "x86" + IMAGE_INSTALL_append = "lib32-glib-2.0" + +This example enables an additional library named +``lib32`` alongside the normal target packages. When combining these +"lib32" alternatives, the example uses "x86" for tuning. For information +on this particular tuning, see +``meta/conf/machine/include/ia32/arch-ia32.inc``. + +The example then includes ``lib32-glib-2.0`` in all the images, which +illustrates one method of including a multiple library dependency. You +can use a normal image build to include this dependency, for example: +:: + + $ bitbake core-image-sato + +You can also build Multilib packages +specifically with a command like this: +:: + + $ bitbake lib32-glib-2.0 + +Additional Implementation Details +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Generic implementation details as well as details that are specific to +package management systems exist. Following are implementation details +that exist regardless of the package management system: + +- The typical convention used for the class extension code as used by + Multilib assumes that all package names specified in + :term:`PACKAGES` that contain + ``${PN}`` have ``${PN}`` at the start of the name. When that + convention is not followed and ``${PN}`` appears at the middle or the + end of a name, problems occur. + +- The :term:`TARGET_VENDOR` + value under Multilib will be extended to "-vendormlmultilib" (e.g. + "-pokymllib32" for a "lib32" Multilib with Poky). The reason for this + slightly unwieldy contraction is that any "-" characters in the + vendor string presently break Autoconf's ``config.sub``, and other + separators are problematic for different reasons. + +For the RPM Package Management System, the following implementation +details exist: + +- A unique architecture is defined for the Multilib packages, along + with creating a unique deploy folder under ``tmp/deploy/rpm`` in the + :term:`Build Directory`. For + example, consider ``lib32`` in a ``qemux86-64`` image. The possible + architectures in the system are "all", "qemux86_64", + "lib32_qemux86_64", and "lib32_x86". + +- The ``${MLPREFIX}`` variable is stripped from ``${PN}`` during RPM + packaging. The naming for a normal RPM package and a Multilib RPM + package in a ``qemux86-64`` system resolves to something similar to + ``bash-4.1-r2.x86_64.rpm`` and ``bash-4.1.r2.lib32_x86.rpm``, + respectively. + +- When installing a Multilib image, the RPM backend first installs the + base image and then installs the Multilib libraries. + +- The build system relies on RPM to resolve the identical files in the + two (or more) Multilib packages. + +For the IPK Package Management System, the following implementation +details exist: + +- The ``${MLPREFIX}`` is not stripped from ``${PN}`` during IPK + packaging. The naming for a normal RPM package and a Multilib IPK + package in a ``qemux86-64`` system resolves to something like + ``bash_4.1-r2.x86_64.ipk`` and ``lib32-bash_4.1-rw_x86.ipk``, + respectively. + +- The IPK deploy folder is not modified with ``${MLPREFIX}`` because + packages with and without the Multilib feature can exist in the same + folder due to the ``${PN}`` differences. + +- IPK defines a sanity check for Multilib installation using certain + rules for file comparison, overridden, etc. + +Installing Multiple Versions of the Same Library +------------------------------------------------ + +Situations can exist where you need to install and use multiple versions +of the same library on the same system at the same time. These +situations almost always exist when a library API changes and you have +multiple pieces of software that depend on the separate versions of the +library. To accommodate these situations, you can install multiple +versions of the same library in parallel on the same system. + +The process is straightforward as long as the libraries use proper +versioning. With properly versioned libraries, all you need to do to +individually specify the libraries is create separate, appropriately +named recipes where the :term:`PN` part of +the name includes a portion that differentiates each library version +(e.g.the major part of the version number). Thus, instead of having a +single recipe that loads one version of a library (e.g. ``clutter``), +you provide multiple recipes that result in different versions of the +libraries you want. As an example, the following two recipes would allow +the two separate versions of the ``clutter`` library to co-exist on the +same system: +:: + + clutter-1.6_1.6.20.bb + clutter-1.8_1.8.4.bb + +Additionally, if +you have other recipes that depend on a given library, you need to use +the :term:`DEPENDS` variable to +create the dependency. Continuing with the same example, if you want to +have a recipe depend on the 1.8 version of the ``clutter`` library, use +the following in your recipe: +:: + + DEPENDS = "clutter-1.8" + +Using x32 psABI +=============== + +x32 processor-specific Application Binary Interface (`x32 +psABI `__) is a native +32-bit processor-specific ABI for Intel 64 (x86-64) architectures. An +ABI defines the calling conventions between functions in a processing +environment. The interface determines what registers are used and what +the sizes are for various C data types. + +Some processing environments prefer using 32-bit applications even when +running on Intel 64-bit platforms. Consider the i386 psABI, which is a +very old 32-bit ABI for Intel 64-bit platforms. The i386 psABI does not +provide efficient use and access of the Intel 64-bit processor +resources, leaving the system underutilized. Now consider the x86_64 +psABI. This ABI is newer and uses 64-bits for data sizes and program +pointers. The extra bits increase the footprint size of the programs, +libraries, and also increases the memory and file system size +requirements. Executing under the x32 psABI enables user programs to +utilize CPU and system resources more efficiently while keeping the +memory footprint of the applications low. Extra bits are used for +registers but not for addressing mechanisms. + +The Yocto Project supports the final specifications of x32 psABI as +follows: + +- You can create packages and images in x32 psABI format on x86_64 + architecture targets. + +- You can successfully build recipes with the x32 toolchain. + +- You can create and boot ``core-image-minimal`` and + ``core-image-sato`` images. + +- RPM Package Manager (RPM) support exists for x32 binaries. + +- Support for large images exists. + +To use the x32 psABI, you need to edit your ``conf/local.conf`` +configuration file as follows: +:: + + MACHINE = "qemux86-64" + DEFAULTTUNE = "x86-64-x32" + baselib = "${@d.getVar('BASE_LIB_tune-' + (d.getVar('DEFAULTTUNE') \ + or 'INVALID')) or 'lib'}" + +Once you have set +up your configuration file, use BitBake to build an image that supports +the x32 psABI. Here is an example: +:: + + $ bitbake core-image-sato + +Enabling GObject Introspection Support +====================================== + +`GObject +introspection `__ +is the standard mechanism for accessing GObject-based software from +runtime environments. GObject is a feature of the GLib library that +provides an object framework for the GNOME desktop and related software. +GObject Introspection adds information to GObject that allows objects +created within it to be represented across different programming +languages. If you want to construct GStreamer pipelines using Python, or +control UPnP infrastructure using Javascript and GUPnP, GObject +introspection is the only way to do it. + +This section describes the Yocto Project support for generating and +packaging GObject introspection data. GObject introspection data is a +description of the API provided by libraries built on top of GLib +framework, and, in particular, that framework's GObject mechanism. +GObject Introspection Repository (GIR) files go to ``-dev`` packages, +``typelib`` files go to main packages as they are packaged together with +libraries that are introspected. + +The data is generated when building such a library, by linking the +library with a small executable binary that asks the library to describe +itself, and then executing the binary and processing its output. + +Generating this data in a cross-compilation environment is difficult +because the library is produced for the target architecture, but its +code needs to be executed on the build host. This problem is solved with +the OpenEmbedded build system by running the code through QEMU, which +allows precisely that. Unfortunately, QEMU does not always work +perfectly as mentioned in the "`Known Issues <#known-issues>`__" +section. + +Enabling the Generation of Introspection Data +--------------------------------------------- + +Enabling the generation of introspection data (GIR files) in your +library package involves the following: + +1. Inherit the + :ref:`gobject-introspection ` + class. + +2. Make sure introspection is not disabled anywhere in the recipe or + from anything the recipe includes. Also, make sure that + "gobject-introspection-data" is not in + :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED` + and that "qemu-usermode" is not in + :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`. + If either of these conditions exist, nothing will happen. + +3. Try to build the recipe. If you encounter build errors that look like + something is unable to find ``.so`` libraries, check where these + libraries are located in the source tree and add the following to the + recipe: + :: + + GIR_EXTRA_LIBS_PATH = "${B}/something/.libs" + + .. note:: + + See recipes in the + oe-core + repository that use that + GIR_EXTRA_LIBS_PATH + variable as an example. + +4. Look for any other errors, which probably mean that introspection + support in a package is not entirely standard, and thus breaks down + in a cross-compilation environment. For such cases, custom-made fixes + are needed. A good place to ask and receive help in these cases is + the :ref:`Yocto Project mailing + lists `. + +.. note:: + + Using a library that no longer builds against the latest Yocto + Project release and prints introspection related errors is a good + candidate for the previous procedure. + +Disabling the Generation of Introspection Data +---------------------------------------------- + +You might find that you do not want to generate introspection data. Or, +perhaps QEMU does not work on your build host and target architecture +combination. If so, you can use either of the following methods to +disable GIR file generations: + +- Add the following to your distro configuration: + :: + + DISTRO_FEATURES_BACKFILL_CONSIDERED = "gobject-introspection-data" + + Adding this statement disables generating introspection data using + QEMU but will still enable building introspection tools and libraries + (i.e. building them does not require the use of QEMU). + +- Add the following to your machine configuration: + :: + + MACHINE_FEATURES_BACKFILL_CONSIDERED = "qemu-usermode" + + Adding this statement disables the use of QEMU when building packages for your + machine. Currently, this feature is used only by introspection + recipes and has the same effect as the previously described option. + + .. note:: + + Future releases of the Yocto Project might have other features + affected by this option. + +If you disable introspection data, you can still obtain it through other +means such as copying the data from a suitable sysroot, or by generating +it on the target hardware. The OpenEmbedded build system does not +currently provide specific support for these techniques. + +Testing that Introspection Works in an Image +-------------------------------------------- + +Use the following procedure to test if generating introspection data is +working in an image: + +1. Make sure that "gobject-introspection-data" is not in + :term:`DISTRO_FEATURES_BACKFILL_CONSIDERED` + and that "qemu-usermode" is not in + :term:`MACHINE_FEATURES_BACKFILL_CONSIDERED`. + +2. Build ``core-image-sato``. + +3. Launch a Terminal and then start Python in the terminal. + +4. Enter the following in the terminal: + :: + + >>> from gi.repository import GLib + >>> GLib.get_host_name() + +5. For something a little more advanced, enter the following see: + http://python-gtk-3-tutorial.readthedocs.org/en/latest/introduction.html + +Known Issues +------------ + +The following know issues exist for GObject Introspection Support: + +- ``qemu-ppc64`` immediately crashes. Consequently, you cannot build + introspection data on that architecture. + +- x32 is not supported by QEMU. Consequently, introspection data is + disabled. + +- musl causes transient GLib binaries to crash on assertion failures. + Consequently, generating introspection data is disabled. + +- Because QEMU is not able to run the binaries correctly, introspection + is disabled for some specific packages under specific architectures + (e.g. ``gcr``, ``libsecret``, and ``webkit``). + +- QEMU usermode might not work properly when running 64-bit binaries + under 32-bit host machines. In particular, "qemumips64" is known to + not work under i686. + +.. _dev-optionally-using-an-external-toolchain: + +Optionally Using an External Toolchain +====================================== + +You might want to use an external toolchain as part of your development. +If this is the case, the fundamental steps you need to accomplish are as +follows: + +- Understand where the installed toolchain resides. For cases where you + need to build the external toolchain, you would need to take separate + steps to build and install the toolchain. + +- Make sure you add the layer that contains the toolchain to your + ``bblayers.conf`` file through the + :term:`BBLAYERS` variable. + +- Set the ``EXTERNAL_TOOLCHAIN`` variable in your ``local.conf`` file + to the location in which you installed the toolchain. + +A good example of an external toolchain used with the Yocto Project is +Mentor Graphics Sourcery G++ Toolchain. You can see information on how +to use that particular layer in the ``README`` file at +http://github.com/MentorEmbedded/meta-sourcery/. You can find +further information by reading about the +:term:`TCMODE` variable in the Yocto +Project Reference Manual's variable glossary. + +Creating Partitioned Images Using Wic +===================================== + +Creating an image for a particular hardware target using the +OpenEmbedded build system does not necessarily mean you can boot that +image as is on your device. Physical devices accept and boot images in +various ways depending on the specifics of the device. Usually, +information about the hardware can tell you what image format the device +requires. Should your device require multiple partitions on an SD card, +flash, or an HDD, you can use the OpenEmbedded Image Creator, Wic, to +create the properly partitioned image. + +The ``wic`` command generates partitioned images from existing +OpenEmbedded build artifacts. Image generation is driven by partitioning +commands contained in an Openembedded kickstart file (``.wks``) +specified either directly on the command line or as one of a selection +of canned kickstart files as shown with the ``wic list images`` command +in the "`Using an Existing Kickstart +File <#using-a-provided-kickstart-file>`__" section. When you apply the +command to a given set of build artifacts, the result is an image or set +of images that can be directly written onto media and used on a +particular system. + +.. note:: + + For a kickstart file reference, see the " + OpenEmbedded Kickstart ( + .wks + ) Reference + " Chapter in the Yocto Project Reference Manual. + +The ``wic`` command and the infrastructure it is based on is by +definition incomplete. The purpose of the command is to allow the +generation of customized images, and as such, was designed to be +completely extensible through a plugin interface. See the "`Using the +Wic PlugIn Interface <#wic-using-the-wic-plugin-interface>`__" section +for information on these plugins. + +This section provides some background information on Wic, describes what +you need to have in place to run the tool, provides instruction on how +to use the Wic utility, provides information on using the Wic plugins +interface, and provides several examples that show how to use Wic. + +.. _wic-background: + +Background +---------- + +This section provides some background on the Wic utility. While none of +this information is required to use Wic, you might find it interesting. + +- The name "Wic" is derived from OpenEmbedded Image Creator (oeic). The + "oe" diphthong in "oeic" was promoted to the letter "w", because + "oeic" is both difficult to remember and to pronounce. + +- Wic is loosely based on the Meego Image Creator (``mic``) framework. + The Wic implementation has been heavily modified to make direct use + of OpenEmbedded build artifacts instead of package installation and + configuration, which are already incorporated within the OpenEmbedded + artifacts. + +- Wic is a completely independent standalone utility that initially + provides easier-to-use and more flexible replacements for an existing + functionality in OE-Core's + :ref:`image-live ` + class. The difference between Wic and those examples is that with Wic + the functionality of those scripts is implemented by a + general-purpose partitioning language, which is based on Redhat + kickstart syntax. + +.. _wic-requirements: + +Requirements +------------ + +In order to use the Wic utility with the OpenEmbedded Build system, your +system needs to meet the following requirements: + +- The Linux distribution on your development host must support the + Yocto Project. See the ":ref:`detailed-supported-distros`" + section in the Yocto Project Reference Manual for the list of + distributions that support the Yocto Project. + +- The standard system utilities, such as ``cp``, must be installed on + your development host system. + +- You must have sourced the build environment setup script (i.e. + :ref:`structure-core-script`) found in the + :term:`Build Directory`. + +- You need to have the build artifacts already available, which + typically means that you must have already created an image using the + Openembedded build system (e.g. ``core-image-minimal``). While it + might seem redundant to generate an image in order to create an image + using Wic, the current version of Wic requires the artifacts in the + form generated by the OpenEmbedded build system. + +- You must build several native tools, which are built to run on the + build system: $ bitbake parted-native dosfstools-native mtools-native + +- Include "wic" as part of the + :term:`IMAGE_FSTYPES` + variable. + +- Include the name of the :ref:`wic kickstart file ` + as part of the :term:`WKS_FILE` variable + +.. _wic-getting-help: + +Getting Help +------------ + +You can get general help for the ``wic`` command by entering the ``wic`` +command by itself or by entering the command with a help argument as +follows: +:: + + $ wic -h + $ wic --help + $ wic help + +Currently, Wic supports seven commands: ``cp``, ``create``, ``help``, +``list``, ``ls``, ``rm``, and ``write``. You can get help for all these +commands except "help" by using the following form: +:: + + $ wic help command + +For example, the following command returns help for the ``write`` +command: +:: + + $ wic help write + +Wic supports help for three topics: ``overview``, ``plugins``, and +``kickstart``. You can get help for any topic using the following form: +:: + + $ wic help topic + +For example, the following returns overview help for Wic: +:: + + $ wic help overview + +One additional level of help exists for Wic. You can get help on +individual images through the ``list`` command. You can use the ``list`` +command to return the available Wic images as follows: +:: + + $ wic list images + genericx86 Create an EFI disk image for genericx86* + beaglebone-yocto Create SD card image for Beaglebone + edgerouter Create SD card image for Edgerouter + qemux86-directdisk Create a qemu machine 'pcbios' direct disk image + directdisk-gpt Create a 'pcbios' direct disk image + mkefidisk Create an EFI disk image + directdisk Create a 'pcbios' direct disk image + systemd-bootdisk Create an EFI disk image with systemd-boot + mkhybridiso Create a hybrid ISO image + sdimage-bootpart Create SD card image with a boot partition + directdisk-multi-rootfs Create multi rootfs image using rootfs plugin + directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config + +Once you know the list of available +Wic images, you can use ``help`` with the command to get help on a +particular image. For example, the following command returns help on the +"beaglebone-yocto" image: +:: + + $ wic list beaglebone-yocto help + + Creates a partitioned SD card image for Beaglebone. + Boot files are located in the first vfat partition. + +Operational Modes +----------------- + +You can use Wic in two different modes, depending on how much control +you need for specifying the Openembedded build artifacts that are used +for creating the image: Raw and Cooked: + +- *Raw Mode:* You explicitly specify build artifacts through Wic + command-line arguments. + +- *Cooked Mode:* The current + :term:`MACHINE` setting and image + name are used to automatically locate and provide the build + artifacts. You just supply a kickstart file and the name of the image + from which to use artifacts. + +Regardless of the mode you use, you need to have the build artifacts +ready and available. + +Raw Mode +~~~~~~~~ + +Running Wic in raw mode allows you to specify all the partitions through +the ``wic`` command line. The primary use for raw mode is if you have +built your kernel outside of the Yocto Project +:term:`Build Directory`. In other words, you +can point to arbitrary kernel, root filesystem locations, and so forth. +Contrast this behavior with cooked mode where Wic looks in the Build +Directory (e.g. ``tmp/deploy/images/``\ machine). + +The general form of the ``wic`` command in raw mode is: +:: + + $ wic create wks_file options ... + + Where: + + wks_file: + An OpenEmbedded kickstart file. You can provide + your own custom file or use a file from a set of + existing files as described by further options. + + optional arguments: + -h, --help show this help message and exit + -o OUTDIR, --outdir OUTDIR + name of directory to create image in + -e IMAGE_NAME, --image-name IMAGE_NAME + name of the image to use the artifacts from e.g. core- + image-sato + -r ROOTFS_DIR, --rootfs-dir ROOTFS_DIR + path to the /rootfs dir to use as the .wks rootfs + source + -b BOOTIMG_DIR, --bootimg-dir BOOTIMG_DIR + path to the dir containing the boot artifacts (e.g. + /EFI or /syslinux dirs) to use as the .wks bootimg + source + -k KERNEL_DIR, --kernel-dir KERNEL_DIR + path to the dir containing the kernel to use in the + .wks bootimg + -n NATIVE_SYSROOT, --native-sysroot NATIVE_SYSROOT + path to the native sysroot containing the tools to use + to build the image + -s, --skip-build-check + skip the build check + -f, --build-rootfs build rootfs + -c {gzip,bzip2,xz}, --compress-with {gzip,bzip2,xz} + compress image with specified compressor + -m, --bmap generate .bmap + --no-fstab-update Do not change fstab file. + -v VARS_DIR, --vars VARS_DIR + directory with .env files that store bitbake + variables + -D, --debug output debug information + +.. note:: + + You do not need root privileges to run Wic. In fact, you should not + run as root when using the utility. + +Cooked Mode +~~~~~~~~~~~ + +Running Wic in cooked mode leverages off artifacts in the Build +Directory. In other words, you do not have to specify kernel or root +filesystem locations as part of the command. All you need to provide is +a kickstart file and the name of the image from which to use artifacts +by using the "-e" option. Wic looks in the Build Directory (e.g. +``tmp/deploy/images/``\ machine) for artifacts. + +The general form of the ``wic`` command using Cooked Mode is as follows: +:: + + $ wic create wks_file -e IMAGE_NAME + + Where: + + wks_file: + An OpenEmbedded kickstart file. You can provide + your own custom file or use a file from a set of + existing files provided with the Yocto Project + release. + + required argument: + -e IMAGE_NAME, --image-name IMAGE_NAME + name of the image to use the artifacts from e.g. core- + image-sato + +.. _using-a-provided-kickstart-file: + +Using an Existing Kickstart File +-------------------------------- + +If you do not want to create your own kickstart file, you can use an +existing file provided by the Wic installation. As shipped, kickstart +files can be found in the :ref:`overview-manual/overview-manual-development-environment:yocto project source repositories` in the +following two locations: +:: + + poky/meta-yocto-bsp/wic + poky/scripts/lib/wic/canned-wks + +Use the following command to list the available kickstart files: +:: + + $ wic list images + genericx86 Create an EFI disk image for genericx86* + beaglebone-yocto Create SD card image for Beaglebone + edgerouter Create SD card image for Edgerouter + qemux86-directdisk Create a qemu machine 'pcbios' direct disk image + directdisk-gpt Create a 'pcbios' direct disk image + mkefidisk Create an EFI disk image + directdisk Create a 'pcbios' direct disk image + systemd-bootdisk Create an EFI disk image with systemd-boot + mkhybridiso Create a hybrid ISO image + sdimage-bootpart Create SD card image with a boot partition + directdisk-multi-rootfs Create multi rootfs image using rootfs plugin + directdisk-bootloader-config Create a 'pcbios' direct disk image with custom bootloader config + +When you use an existing file, you +do not have to use the ``.wks`` extension. Here is an example in Raw +Mode that uses the ``directdisk`` file: +:: + + $ wic create directdisk -r rootfs_dir -b bootimg_dir \ + -k kernel_dir -n native_sysroot + +Here are the actual partition language commands used in the +``genericx86.wks`` file to generate an image: +:: + + # short-description: Create an EFI disk image for genericx86* + # long-description: Creates a partitioned EFI disk image for genericx86* machines + part /boot --source bootimg-efi --sourceparams="loader=grub-efi" --ondisk sda --label msdos --active --align 1024 + part / --source rootfs --ondisk sda --fstype=ext4 --label platform --align 1024 --use-uuid + part swap --ondisk sda --size 44 --label swap1 --fstype=swap + + bootloader --ptable gpt --timeout=5 --append="rootfstype=ext4 console=ttyS0,115200 console=tty0" + +.. _wic-using-the-wic-plugin-interface: + +Using the Wic Plugin Interface +------------------------------ + +You can extend and specialize Wic functionality by using Wic plugins. +This section explains the Wic plugin interface. + +.. note:: + + Wic plugins consist of "source" and "imager" plugins. Imager plugins + are beyond the scope of this section. + +Source plugins provide a mechanism to customize partition content during +the Wic image generation process. You can use source plugins to map +values that you specify using ``--source`` commands in kickstart files +(i.e. ``*.wks``) to a plugin implementation used to populate a given +partition. + +.. note:: + + If you use plugins that have build-time dependencies (e.g. native + tools, bootloaders, and so forth) when building a Wic image, you need + to specify those dependencies using the + WKS_FILE_DEPENDS + variable. + +Source plugins are subclasses defined in plugin files. As shipped, the +Yocto Project provides several plugin files. You can see the source +plugin files that ship with the Yocto Project +:yocto_git:`here `. +Each of these plugin files contains source plugins that are designed to +populate a specific Wic image partition. + +Source plugins are subclasses of the ``SourcePlugin`` class, which is +defined in the ``poky/scripts/lib/wic/pluginbase.py`` file. For example, +the ``BootimgEFIPlugin`` source plugin found in the ``bootimg-efi.py`` +file is a subclass of the ``SourcePlugin`` class, which is found in the +``pluginbase.py`` file. + +You can also implement source plugins in a layer outside of the Source +Repositories (external layer). To do so, be sure that your plugin files +are located in a directory whose path is +``scripts/lib/wic/plugins/source/`` within your external layer. When the +plugin files are located there, the source plugins they contain are made +available to Wic. + +When the Wic implementation needs to invoke a partition-specific +implementation, it looks for the plugin with the same name as the +``--source`` parameter used in the kickstart file given to that +partition. For example, if the partition is set up using the following +command in a kickstart file: +:: + + part /boot --source bootimg-pcbios --ondisk sda --label boot --active --align 1024 + +The methods defined as class +members of the matching source plugin (i.e. ``bootimg-pcbios``) in the +``bootimg-pcbios.py`` plugin file are used. + +To be more concrete, here is the corresponding plugin definition from +the ``bootimg-pcbios.py`` file for the previous command along with an +example method called by the Wic implementation when it needs to prepare +a partition using an implementation-specific function: +:: + + . + . + . + class BootimgPcbiosPlugin(SourcePlugin): + """ + Create MBR boot partition and install syslinux on it. + """ + + name = 'bootimg-pcbios' + . + . + . + @classmethod + def do_prepare_partition(cls, part, source_params, creator, cr_workdir, + oe_builddir, bootimg_dir, kernel_dir, + rootfs_dir, native_sysroot): + """ + Called to do the actual content population for a partition i.e. it + 'prepares' the partition to be incorporated into the image. + In this case, prepare content for legacy bios boot partition. + """ + . + . + . + +If a +subclass (plugin) itself does not implement a particular function, Wic +locates and uses the default version in the superclass. It is for this +reason that all source plugins are derived from the ``SourcePlugin`` +class. + +The ``SourcePlugin`` class defined in the ``pluginbase.py`` file defines +a set of methods that source plugins can implement or override. Any +plugins (subclass of ``SourcePlugin``) that do not implement a +particular method inherit the implementation of the method from the +``SourcePlugin`` class. For more information, see the ``SourcePlugin`` +class in the ``pluginbase.py`` file for details: + +The following list describes the methods implemented in the +``SourcePlugin`` class: + +- ``do_prepare_partition()``: Called to populate a partition with + actual content. In other words, the method prepares the final + partition image that is incorporated into the disk image. + +- ``do_configure_partition()``: Called before + ``do_prepare_partition()`` to create custom configuration files for a + partition (e.g. syslinux or grub configuration files). + +- ``do_install_disk()``: Called after all partitions have been + prepared and assembled into a disk image. This method provides a hook + to allow finalization of a disk image (e.g. writing an MBR). + +- ``do_stage_partition()``: Special content-staging hook called + before ``do_prepare_partition()``. This method is normally empty. + + Typically, a partition just uses the passed-in parameters (e.g. the + unmodified value of ``bootimg_dir``). However, in some cases, things + might need to be more tailored. As an example, certain files might + additionally need to be taken from ``bootimg_dir + /boot``. This hook + allows those files to be staged in a customized fashion. + + .. note:: + + get_bitbake_var() + allows you to access non-standard variables that you might want to + use for this behavior. + +You can extend the source plugin mechanism. To add more hooks, create +more source plugin methods within ``SourcePlugin`` and the corresponding +derived subclasses. The code that calls the plugin methods uses the +``plugin.get_source_plugin_methods()`` function to find the method or +methods needed by the call. Retrieval of those methods is accomplished +by filling up a dict with keys that contain the method names of +interest. On success, these will be filled in with the actual methods. +See the Wic implementation for examples and details. + +.. _wic-usage-examples: + +Wic Examples +------------ + +This section provides several examples that show how to use the Wic +utility. All the examples assume the list of requirements in the +"`Requirements <#wic-requirements>`__" section have been met. The +examples assume the previously generated image is +``core-image-minimal``. + +.. _generate-an-image-using-a-provided-kickstart-file: + +Generate an Image using an Existing Kickstart File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example runs in Cooked Mode and uses the ``mkefidisk`` kickstart +file: +:: + + $ wic create mkefidisk -e core-image-minimal + INFO: Building wic-tools... + . + . + . + INFO: The new image(s) can be found here: + ./mkefidisk-201804191017-sda.direct + + The following build artifacts were used to create the image(s): + ROOTFS_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs + BOOTIMG_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share + KERNEL_DIR: /home/stephano/build/master/build/tmp-glibc/deploy/images/qemux86 + NATIVE_SYSROOT: /home/stephano/build/master/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native + + INFO: The image(s) were created using OE kickstart file: + /home/stephano/build/master/openembedded-core/scripts/lib/wic/canned-wks/mkefidisk.wks + +The previous example shows the easiest way to create an image by running +in cooked mode and supplying a kickstart file and the "-e" option to +point to the existing build artifacts. Your ``local.conf`` file needs to +have the :term:`MACHINE` variable set +to the machine you are using, which is "qemux86" in this example. + +Once the image builds, the output provides image location, artifact use, +and kickstart file information. + +.. note:: + + You should always verify the details provided in the output to make + sure that the image was indeed created exactly as expected. + +Continuing with the example, you can now write the image from the Build +Directory onto a USB stick, or whatever media for which you built your +image, and boot from the media. You can write the image by using +``bmaptool`` or ``dd``: +:: + + $ oe-run-native bmaptool copy mkefidisk-201804191017-sda.direct /dev/sdX + +or :: + + $ sudo dd if=mkefidisk-201804191017-sda.direct of=/dev/sdX + +.. note:: + + For more information on how to use the + bmaptool + to flash a device with an image, see the " + Flashing Images Using + bmaptool + " section. + +Using a Modified Kickstart File +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Because partitioned image creation is driven by the kickstart file, it +is easy to affect image creation by changing the parameters in the file. +This next example demonstrates that through modification of the +``directdisk-gpt`` kickstart file. + +As mentioned earlier, you can use the command ``wic list images`` to +show the list of existing kickstart files. The directory in which the +``directdisk-gpt.wks`` file resides is +``scripts/lib/image/canned-wks/``, which is located in the +:term:`Source Directory` (e.g. ``poky``). +Because available files reside in this directory, you can create and add +your own custom files to the directory. Subsequent use of the +``wic list images`` command would then include your kickstart files. + +In this example, the existing ``directdisk-gpt`` file already does most +of what is needed. However, for the hardware in this example, the image +will need to boot from ``sdb`` instead of ``sda``, which is what the +``directdisk-gpt`` kickstart file uses. + +The example begins by making a copy of the ``directdisk-gpt.wks`` file +in the ``scripts/lib/image/canned-wks`` directory and then by changing +the lines that specify the target disk from which to boot. +:: + + $ cp /home/stephano/poky/scripts/lib/wic/canned-wks/directdisk-gpt.wks \ + /home/stephano/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks + +Next, the example modifies the ``directdisksdb-gpt.wks`` file and +changes all instances of "``--ondisk sda``" to "``--ondisk sdb``". The +example changes the following two lines and leaves the remaining lines +untouched: +:: + + part /boot --source bootimg-pcbios --ondisk sdb --label boot --active --align 1024 + part / --source rootfs --ondisk sdb --fstype=ext4 --label platform --align 1024 --use-uuid + +Once the lines are changed, the +example generates the ``directdisksdb-gpt`` image. The command points +the process at the ``core-image-minimal`` artifacts for the Next Unit of +Computing (nuc) :term:`MACHINE` the +``local.conf``. +:: + + $ wic create directdisksdb-gpt -e core-image-minimal + INFO: Building wic-tools... + . + . + . + Initialising tasks: 100% |#######################################| Time: 0:00:01 + NOTE: Executing SetScene Tasks + NOTE: Executing RunQueue Tasks + NOTE: Tasks Summary: Attempted 1161 tasks of which 1157 didn't need to be rerun and all succeeded. + INFO: Creating image(s)... + + INFO: The new image(s) can be found here: + ./directdisksdb-gpt-201710090938-sdb.direct + + The following build artifacts were used to create the image(s): + ROOTFS_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs + BOOTIMG_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share + KERNEL_DIR: /home/stephano/build/master/build/tmp-glibc/deploy/images/qemux86 + NATIVE_SYSROOT: /home/stephano/build/master/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native + + INFO: The image(s) were created using OE kickstart file: + /home/stephano/poky/scripts/lib/wic/canned-wks/directdisksdb-gpt.wks + +Continuing with the example, you can now directly ``dd`` the image to a +USB stick, or whatever media for which you built your image, and boot +the resulting media: +:: + + $ sudo dd if=directdisksdb-gpt-201710090938-sdb.direct of=/dev/sdb + 140966+0 records in + 140966+0 records out + 72174592 bytes (72 MB, 69 MiB) copied, 78.0282 s, 925 kB/s + $ sudo eject /dev/sdb + +Using a Modified Kickstart File and Running in Raw Mode +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This next example manually specifies each build artifact (runs in Raw +Mode) and uses a modified kickstart file. The example also uses the +``-o`` option to cause Wic to create the output somewhere other than the +default output directory, which is the current directory: +:: + + $ wic create /home/stephano/my_yocto/test.wks -o /home/stephano/testwic \ + --rootfs-dir /home/stephano/build/master/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/rootfs \ + --bootimg-dir /home/stephano/build/master/build/tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share \ + --kernel-dir /home/stephano/build/master/build/tmp/deploy/images/qemux86 \ + --native-sysroot /home/stephano/build/master/build/tmp/work/i586-poky-linux/wic-tools/1.0-r0/recipe-sysroot-native + + INFO: Creating image(s)... + + INFO: The new image(s) can be found here: + /home/stephano/testwic/test-201710091445-sdb.direct + + The following build artifacts were used to create the image(s): + ROOTFS_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/rootfs + BOOTIMG_DIR: /home/stephano/build/master/build/tmp-glibc/work/qemux86-oe-linux/core-image-minimal/1.0-r0/recipe-sysroot/usr/share + KERNEL_DIR: /home/stephano/build/master/build/tmp-glibc/deploy/images/qemux86 + NATIVE_SYSROOT: /home/stephano/build/master/build/tmp-glibc/work/i586-oe-linux/wic-tools/1.0-r0/recipe-sysroot-native + + INFO: The image(s) were created using OE kickstart file: + /home/stephano/my_yocto/test.wks + +For this example, +:term:`MACHINE` did not have to be +specified in the ``local.conf`` file since the artifact is manually +specified. + +Using Wic to Manipulate an Image +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Wic image manipulation allows you to shorten turnaround time during +image development. For example, you can use Wic to delete the kernel +partition of a Wic image and then insert a newly built kernel. This +saves you time from having to rebuild the entire image each time you +modify the kernel. + +.. note:: + + In order to use Wic to manipulate a Wic image as in this example, + your development machine must have the + mtools + package installed. + +The following example examines the contents of the Wic image, deletes +the existing kernel, and then inserts a new kernel: + +1. *List the Partitions:* Use the ``wic ls`` command to list all the + partitions in the Wic image: + :: + + $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic + Num Start End Size Fstype + 1 1048576 25041919 23993344 fat16 + 2 25165824 72157183 46991360 ext4 + + The previous output shows two partitions in the + ``core-image-minimal-qemux86.wic`` image. + +2. *Examine a Particular Partition:* Use the ``wic ls`` command again + but in a different form to examine a particular partition. + + .. note:: + + You can get command usage on any Wic command using the following + form: + :: + + $ wic help command + + + For example, the following command shows you the various ways to + use the + wic ls + command: + :: + + $ wic help ls + + + The following command shows what is in Partition one: + :: + + $ wic ls tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1 + Volume in drive : is boot + Volume Serial Number is E894-1809 + Directory for ::/ + + libcom32 c32 186500 2017-10-09 16:06 + libutil c32 24148 2017-10-09 16:06 + syslinux cfg 220 2017-10-09 16:06 + vesamenu c32 27104 2017-10-09 16:06 + vmlinuz 6904608 2017-10-09 16:06 + 5 files 7 142 580 bytes + 16 582 656 bytes free + + The previous output shows five files, with the + ``vmlinuz`` being the kernel. + + .. note:: + + If you see the following error, you need to update or create a + ~/.mtoolsrc + file and be sure to have the line "mtools_skip_check=1" in the + file. Then, run the Wic command again: + :: + + ERROR: _exec_cmd: /usr/bin/mdir -i /tmp/wic-parttfokuwra ::/ returned '1' instead of 0 + output: Total number of sectors (47824) not a multiple of sectors per track (32)! + Add mtools_skip_check=1 to your .mtoolsrc file to skip this test + + +3. *Remove the Old Kernel:* Use the ``wic rm`` command to remove the + ``vmlinuz`` file (kernel): + :: + + $ wic rm tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz + +4. *Add In the New Kernel:* Use the ``wic cp`` command to add the + updated kernel to the Wic image. Depending on how you built your + kernel, it could be in different places. If you used ``devtool`` and + an SDK to build your kernel, it resides in the ``tmp/work`` directory + of the extensible SDK. If you used ``make`` to build the kernel, the + kernel will be in the ``workspace/sources`` area. + + The following example assumes ``devtool`` was used to build the + kernel: + :: + + cp ~/poky_sdk/tmp/work/qemux86-poky-linux/linux-yocto/4.12.12+git999-r0/linux-yocto-4.12.12+git999/arch/x86/boot/bzImage \ + ~/poky/build/tmp/deploy/images/qemux86/core-image-minimal-qemux86.wic:1/vmlinuz + + Once the new kernel is added back into the image, you can use the + ``dd`` command or ```bmaptool`` <#flashing-images-using-bmaptool>`__ + to flash your wic image onto an SD card or USB stick and test your + target. + + .. note:: + + Using + bmaptool + is generally 10 to 20 times faster than using + dd + . + +Flashing Images Using ``bmaptool`` +================================== + +A fast and easy way to flash an image to a bootable device is to use +Bmaptool, which is integrated into the OpenEmbedded build system. +Bmaptool is a generic tool that creates a file's block map (bmap) and +then uses that map to copy the file. As compared to traditional tools +such as dd or cp, Bmaptool can copy (or flash) large files like raw +system image files much faster. + +.. note:: + + - If you are using Ubuntu or Debian distributions, you can install + the ``bmap-tools`` package using the following command and then + use the tool without specifying ``PATH`` even from the root + account: $ sudo apt-get install bmap-tools + + - If you are unable to install the ``bmap-tools`` package, you will + need to build Bmaptool before using it. Use the following command: + $ bitbake bmap-tools-native + +Following, is an example that shows how to flash a Wic image. Realize +that while this example uses a Wic image, you can use Bmaptool to flash +any type of image. Use these steps to flash an image using Bmaptool: + +1. *Update your local.conf File:* You need to have the following set + in your ``local.conf`` file before building your image: + :: + + IMAGE_FSTYPES += "wic wic.bmap" + +2. *Get Your Image:* Either have your image ready (pre-built with the + :term:`IMAGE_FSTYPES` + setting previously mentioned) or take the step to build the image: + :: + + $ bitbake image + +3. *Flash the Device:* Flash the device with the image by using Bmaptool + depending on your particular setup. The following commands assume the + image resides in the Build Directory's ``deploy/images/`` area: + + - If you have write access to the media, use this command form: + :: + + $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX + + - If you do not have write access to the media, set your permissions + first and then use the same command form: + :: + + $ sudo chmod 666 /dev/sdX + $ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX + +For help on the ``bmaptool`` command, use the following command: +:: + + $ bmaptool --help + +Making Images More Secure +========================= + +Security is of increasing concern for embedded devices. Consider the +issues and problems discussed in just this sampling of work found across +the Internet: + +- *"*\ `Security Risks of Embedded + Systems `__\ *"* + by Bruce Schneier + +- *"*\ `Internet Census + 2012 `__\ *"* by Carna + Botnet + +- *"*\ `Security Issues for Embedded + Devices `__\ *"* + by Jake Edge + +When securing your image is of concern, there are steps, tools, and +variables that you can consider to help you reach the security goals you +need for your particular device. Not all situations are identical when +it comes to making an image secure. Consequently, this section provides +some guidance and suggestions for consideration when you want to make +your image more secure. + +.. note:: + + Because the security requirements and risks are different for every + type of device, this section cannot provide a complete reference on + securing your custom OS. It is strongly recommended that you also + consult other sources of information on embedded Linux system + hardening and on security. + +General Considerations +---------------------- + +General considerations exist that help you create more secure images. +You should consider the following suggestions to help make your device +more secure: + +- Scan additional code you are adding to the system (e.g. application + code) by using static analysis tools. Look for buffer overflows and + other potential security problems. + +- Pay particular attention to the security for any web-based + administration interface. + + Web interfaces typically need to perform administrative functions and + tend to need to run with elevated privileges. Thus, the consequences + resulting from the interface's security becoming compromised can be + serious. Look for common web vulnerabilities such as + cross-site-scripting (XSS), unvalidated inputs, and so forth. + + As with system passwords, the default credentials for accessing a + web-based interface should not be the same across all devices. This + is particularly true if the interface is enabled by default as it can + be assumed that many end-users will not change the credentials. + +- Ensure you can update the software on the device to mitigate + vulnerabilities discovered in the future. This consideration + especially applies when your device is network-enabled. + +- Ensure you remove or disable debugging functionality before producing + the final image. For information on how to do this, see the + "`Considerations Specific to the OpenEmbedded Build + System <#considerations-specific-to-the-openembedded-build-system>`__" + section. + +- Ensure you have no network services listening that are not needed. + +- Remove any software from the image that is not needed. + +- Enable hardware support for secure boot functionality when your + device supports this functionality. + +Security Flags +-------------- + +The Yocto Project has security flags that you can enable that help make +your build output more secure. The security flags are in the +``meta/conf/distro/include/security_flags.inc`` file in your +:term:`Source Directory` (e.g. ``poky``). + +.. note:: + + Depending on the recipe, certain security flags are enabled and + disabled by default. + +Use the following line in your ``local.conf`` file or in your custom +distribution configuration file to enable the security compiler and +linker flags for your build: +:: + + require conf/distro/include/security_flags.inc + +Considerations Specific to the OpenEmbedded Build System +-------------------------------------------------------- + +You can take some steps that are specific to the OpenEmbedded build +system to make your images more secure: + +- Ensure "debug-tweaks" is not one of your selected + :term:`IMAGE_FEATURES`. + When creating a new project, the default is to provide you with an + initial ``local.conf`` file that enables this feature using the + :term:`EXTRA_IMAGE_FEATURES` + variable with the line: + :: + + EXTRA_IMAGE_FEATURES = "debug-tweaks" + + To disable that feature, simply comment out that line in your + ``local.conf`` file, or make sure ``IMAGE_FEATURES`` does not contain + "debug-tweaks" before producing your final image. Among other things, + leaving this in place sets the root password as blank, which makes + logging in for debugging or inspection easy during development but + also means anyone can easily log in during production. + +- It is possible to set a root password for the image and also to set + passwords for any extra users you might add (e.g. administrative or + service type users). When you set up passwords for multiple images or + users, you should not duplicate passwords. + + To set up passwords, use the + :ref:`extrausers ` + class, which is the preferred method. For an example on how to set up + both root and user passwords, see the + ":ref:`extrausers.bbclass `" + section. + + .. note:: + + When adding extra user accounts or setting a root password, be + cautious about setting the same password on every device. If you + do this, and the password you have set is exposed, then every + device is now potentially compromised. If you need this access but + want to ensure security, consider setting a different, random + password for each device. Typically, you do this as a separate + step after you deploy the image onto the device. + +- Consider enabling a Mandatory Access Control (MAC) framework such as + SMACK or SELinux and tuning it appropriately for your device's usage. + You can find more information in the + `meta-selinux `__ + layer. + +Tools for Hardening Your Image +------------------------------ + +The Yocto Project provides tools for making your image more secure. You +can find these tools in the ``meta-security`` layer of the +:yocto_git:`Yocto Project Source Repositories <>`. + +Creating Your Own Distribution +============================== + +When you build an image using the Yocto Project and do not alter any +distribution :term:`Metadata`, you are +creating a Poky distribution. If you wish to gain more control over +package alternative selections, compile-time options, and other +low-level configurations, you can create your own distribution. + +To create your own distribution, the basic steps consist of creating +your own distribution layer, creating your own distribution +configuration file, and then adding any needed code and Metadata to the +layer. The following steps provide some more detail: + +- *Create a layer for your new distro:* Create your distribution layer + so that you can keep your Metadata and code for the distribution + separate. It is strongly recommended that you create and use your own + layer for configuration and code. Using your own layer as compared to + just placing configurations in a ``local.conf`` configuration file + makes it easier to reproduce the same build configuration when using + multiple build machines. See the + ":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`" + section for information on how to quickly set up a layer. + +- *Create the distribution configuration file:* The distribution + configuration file needs to be created in the ``conf/distro`` + directory of your layer. You need to name it using your distribution + name (e.g. ``mydistro.conf``). + + .. note:: + + The + DISTRO + variable in your + local.conf + file determines the name of your distribution. + + You can split out parts of your configuration file into include files + and then "require" them from within your distribution configuration + file. Be sure to place the include files in the + ``conf/distro/include`` directory of your layer. A common example + usage of include files would be to separate out the selection of + desired version and revisions for individual recipes. + + Your configuration file needs to set the following required + variables: + + - :term:`DISTRO_NAME` + + - :term:`DISTRO_VERSION` + + These following variables are optional and you typically set them + from the distribution configuration file: + + - :term:`DISTRO_FEATURES` + + - :term:`DISTRO_EXTRA_RDEPENDS` + + - :term:`DISTRO_EXTRA_RRECOMMENDS` + + - :term:`TCLIBC` + + .. tip:: + + If you want to base your distribution configuration file on the + very basic configuration from OE-Core, you can use + conf/distro/defaultsetup.conf + as a reference and just include variables that differ as compared + to + defaultsetup.conf + . Alternatively, you can create a distribution configuration file + from scratch using the + defaultsetup.conf + file or configuration files from other distributions such as Poky + or Angstrom as references. + +- *Provide miscellaneous variables:* Be sure to define any other + variables for which you want to create a default or enforce as part + of the distribution configuration. You can include nearly any + variable from the ``local.conf`` file. The variables you use are not + limited to the list in the previous bulleted item. + +- *Point to Your distribution configuration file:* In your + ``local.conf`` file in the :term:`Build Directory`, + set your + :term:`DISTRO` variable to point to + your distribution's configuration file. For example, if your + distribution's configuration file is named ``mydistro.conf``, then + you point to it as follows: + :: + + DISTRO = "mydistro" + +- *Add more to the layer if necessary:* Use your layer to hold other + information needed for the distribution: + + - Add recipes for installing distro-specific configuration files + that are not already installed by another recipe. If you have + distro-specific configuration files that are included by an + existing recipe, you should add an append file (``.bbappend``) for + those. For general information and recommendations on how to add + recipes to your layer, see the "`Creating Your Own + Layer <#creating-your-own-layer>`__" and "`Following Best + Practices When Creating + Layers <#best-practices-to-follow-when-creating-layers>`__" + sections. + + - Add any image recipes that are specific to your distribution. + + - Add a ``psplash`` append file for a branded splash screen. For + information on append files, see the "`Using .bbappend Files in + Your Layer <#using-bbappend-files>`__" section. + + - Add any other append files to make custom changes that are + specific to individual recipes. + +Creating a Custom Template Configuration Directory +================================================== + +If you are producing your own customized version of the build system for +use by other users, you might want to customize the message shown by the +setup script or you might want to change the template configuration +files (i.e. ``local.conf`` and ``bblayers.conf``) that are created in a +new build directory. + +The OpenEmbedded build system uses the environment variable +``TEMPLATECONF`` to locate the directory from which it gathers +configuration information that ultimately ends up in the +:term:`Build Directory` ``conf`` directory. +By default, ``TEMPLATECONF`` is set as follows in the ``poky`` +repository: +:: + + TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf} + +This is the +directory used by the build system to find templates from which to build +some key configuration files. If you look at this directory, you will +see the ``bblayers.conf.sample``, ``local.conf.sample``, and +``conf-notes.txt`` files. The build system uses these files to form the +respective ``bblayers.conf`` file, ``local.conf`` file, and display the +list of BitBake targets when running the setup script. + +To override these default configuration files with configurations you +want used within every new Build Directory, simply set the +``TEMPLATECONF`` variable to your directory. The ``TEMPLATECONF`` +variable is set in the ``.templateconf`` file, which is in the top-level +:term:`Source Directory` folder +(e.g. ``poky``). Edit the ``.templateconf`` so that it can locate your +directory. + +Best practices dictate that you should keep your template configuration +directory in your custom distribution layer. For example, suppose you +have a layer named ``meta-mylayer`` located in your home directory and +you want your template configuration directory named ``myconf``. +Changing the ``.templateconf`` as follows causes the OpenEmbedded build +system to look in your directory and base its configuration files on the +``*.sample`` configuration files it finds. The final configuration files +(i.e. ``local.conf`` and ``bblayers.conf`` ultimately still end up in +your Build Directory, but they are based on your ``*.sample`` files. +:: + + TEMPLATECONF=${TEMPLATECONF:-meta-mylayer/myconf} + +Aside from the ``*.sample`` configuration files, the ``conf-notes.txt`` +also resides in the default ``meta-poky/conf`` directory. The script +that sets up the build environment (i.e. +:ref:`structure-core-script`) uses this file to +display BitBake targets as part of the script output. Customizing this +``conf-notes.txt`` file is a good way to make sure your list of custom +targets appears as part of the script's output. + +Here is the default list of targets displayed as a result of running +either of the setup scripts: +:: + + You can now run 'bitbake ' + + Common targets are: + core-image-minimal + core-image-sato + meta-toolchain + meta-ide-support + +Changing the listed common targets is as easy as editing your version of +``conf-notes.txt`` in your custom template configuration directory and +making sure you have ``TEMPLATECONF`` set to your directory. + +.. _dev-saving-memory-during-a-build: + +Conserving Disk Space During Builds +=================================== + +To help conserve disk space during builds, you can add the following +statement to your project's ``local.conf`` configuration file found in +the :term:`Build Directory`: +:: + + INHERIT += "rm_work" + +Adding this statement deletes the work directory used for +building a recipe once the recipe is built. For more information on +"rm_work", see the +:ref:`rm_work ` class in the +Yocto Project Reference Manual. + +Working with Packages +===================== + +This section describes a few tasks that involve packages: + +- `Excluding packages from an + image <#excluding-packages-from-an-image>`__ + +- `Incrementing a binary package + version <#incrementing-a-binary-package-version>`__ + +- `Handling optional module + packaging <#handling-optional-module-packaging>`__ + +- `Using runtime package + management <#using-runtime-package-management>`__ + +- `Generating and using signed + packages <#generating-and-using-signed-packages>`__ + +- `Setting up and running package test + (ptest) <#testing-packages-with-ptest>`__ + +- `Creating node package manager (NPM) + packages <#creating-node-package-manager-npm-packages>`__ + +- `Adding custom metadata to + packages <#adding-custom-metadata-to-packages>`__ + +Excluding Packages from an Image +-------------------------------- + +You might find it necessary to prevent specific packages from being +installed into an image. If so, you can use several variables to direct +the build system to essentially ignore installing recommended packages +or to not install a package at all. + +The following list introduces variables you can use to prevent packages +from being installed into your image. Each of these variables only works +with IPK and RPM package types. Support for Debian packages does not +exist. Also, you can use these variables from your ``local.conf`` file +or attach them to a specific image recipe by using a recipe name +override. For more detail on the variables, see the descriptions in the +Yocto Project Reference Manual's glossary chapter. + +- :term:`BAD_RECOMMENDATIONS`: + Use this variable to specify "recommended-only" packages that you do + not want installed. + +- :term:`NO_RECOMMENDATIONS`: + Use this variable to prevent all "recommended-only" packages from + being installed. + +- :term:`PACKAGE_EXCLUDE`: + Use this variable to prevent specific packages from being installed + regardless of whether they are "recommended-only" or not. You need to + realize that the build process could fail with an error when you + prevent the installation of a package whose presence is required by + an installed package. + +.. _incrementing-a-binary-package-version: + +Incrementing a Package Version +------------------------------ + +This section provides some background on how binary package versioning +is accomplished and presents some of the services, variables, and +terminology involved. + +In order to understand binary package versioning, you need to consider +the following: + +- Binary Package: The binary package that is eventually built and + installed into an image. + +- Binary Package Version: The binary package version is composed of two + components - a version and a revision. + + .. note:: + + Technically, a third component, the "epoch" (i.e. + PE + ) is involved but this discussion for the most part ignores + PE + . + + The version and revision are taken from the + :term:`PV` and + :term:`PR` variables, respectively. + +- ``PV``: The recipe version. ``PV`` represents the version of the + software being packaged. Do not confuse ``PV`` with the binary + package version. + +- ``PR``: The recipe revision. + +- :term:`SRCPV`: The OpenEmbedded + build system uses this string to help define the value of ``PV`` when + the source code revision needs to be included in it. + +- :yocto_wiki:`PR Service `: A + network-based service that helps automate keeping package feeds + compatible with existing package manager applications such as RPM, + APT, and OPKG. + +Whenever the binary package content changes, the binary package version +must change. Changing the binary package version is accomplished by +changing or "bumping" the ``PR`` and/or ``PV`` values. Increasing these +values occurs one of two ways: + +- Automatically using a Package Revision Service (PR Service). + +- Manually incrementing the ``PR`` and/or ``PV`` variables. + +Given a primary challenge of any build system and its users is how to +maintain a package feed that is compatible with existing package manager +applications such as RPM, APT, and OPKG, using an automated system is +much preferred over a manual system. In either system, the main +requirement is that binary package version numbering increases in a +linear fashion and that a number of version components exist that +support that linear progression. For information on how to ensure +package revisioning remains linear, see the "`Automatically Incrementing +a Binary Package Revision +Number <#automatically-incrementing-a-binary-package-revision-number>`__" +section. + +The following three sections provide related information on the PR +Service, the manual method for "bumping" ``PR`` and/or ``PV``, and on +how to ensure binary package revisioning remains linear. + +Working With a PR Service +~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned, attempting to maintain revision numbers in the +:term:`Metadata` is error prone, inaccurate, +and causes problems for people submitting recipes. Conversely, the PR +Service automatically generates increasing numbers, particularly the +revision field, which removes the human element. + +.. note:: + + For additional information on using a PR Service, you can see the + PR Service + wiki page. + +The Yocto Project uses variables in order of decreasing priority to +facilitate revision numbering (i.e. +:term:`PE`, +:term:`PV`, and +:term:`PR` for epoch, version, and +revision, respectively). The values are highly dependent on the policies +and procedures of a given distribution and package feed. + +Because the OpenEmbedded build system uses +":ref:`signatures `", which are +unique to a given build, the build system knows when to rebuild +packages. All the inputs into a given task are represented by a +signature, which can trigger a rebuild when different. Thus, the build +system itself does not rely on the ``PR``, ``PV``, and ``PE`` numbers to +trigger a rebuild. The signatures, however, can be used to generate +these values. + +The PR Service works with both ``OEBasic`` and ``OEBasicHash`` +generators. The value of ``PR`` bumps when the checksum changes and the +different generator mechanisms change signatures under different +circumstances. + +As implemented, the build system includes values from the PR Service +into the ``PR`` field as an addition using the form "``.x``" so ``r0`` +becomes ``r0.1``, ``r0.2`` and so forth. This scheme allows existing +``PR`` values to be used for whatever reasons, which include manual +``PR`` bumps, should it be necessary. + +By default, the PR Service is not enabled or running. Thus, the packages +generated are just "self consistent". The build system adds and removes +packages and there are no guarantees about upgrade paths but images will +be consistent and correct with the latest changes. + +The simplest form for a PR Service is for it to exist for a single host +development system that builds the package feed (building system). For +this scenario, you can enable a local PR Service by setting +:term:`PRSERV_HOST` in your +``local.conf`` file in the :term:`Build Directory`: +:: + + PRSERV_HOST = "localhost:0" + +Once the service is started, packages will automatically +get increasing ``PR`` values and BitBake takes care of starting and +stopping the server. + +If you have a more complex setup where multiple host development systems +work against a common, shared package feed, you have a single PR Service +running and it is connected to each building system. For this scenario, +you need to start the PR Service using the ``bitbake-prserv`` command: +:: + + bitbake-prserv --host ip --port port --start + +In addition to +hand-starting the service, you need to update the ``local.conf`` file of +each building system as described earlier so each system points to the +server and port. + +It is also recommended you use build history, which adds some sanity +checks to binary package versions, in conjunction with the server that +is running the PR Service. To enable build history, add the following to +each building system's ``local.conf`` file: +:: + + # It is recommended to activate "buildhistory" for testing the PR service + INHERIT += "buildhistory" + BUILDHISTORY_COMMIT = "1" + +For information on build +history, see the "`Maintaining Build Output +Quality <#maintaining-build-output-quality>`__" section. + +.. note:: + + The OpenEmbedded build system does not maintain ``PR`` information as + part of the shared state (sstate) packages. If you maintain an sstate + feed, its expected that either all your building systems that + contribute to the sstate feed use a shared PR Service, or you do not + run a PR Service on any of your building systems. Having some systems + use a PR Service while others do not leads to obvious problems. + + For more information on shared state, see the + ":ref:`overview-manual/overview-manual-concepts:shared state cache`" + section in the Yocto Project Overview and Concepts Manual. + +Manually Bumping PR +~~~~~~~~~~~~~~~~~~~ + +The alternative to setting up a PR Service is to manually "bump" the +:term:`PR` variable. + +If a committed change results in changing the package output, then the +value of the PR variable needs to be increased (or "bumped") as part of +that commit. For new recipes you should add the ``PR`` variable and set +its initial value equal to "r0", which is the default. Even though the +default value is "r0", the practice of adding it to a new recipe makes +it harder to forget to bump the variable when you make changes to the +recipe in future. + +If you are sharing a common ``.inc`` file with multiple recipes, you can +also use the ``INC_PR`` variable to ensure that the recipes sharing the +``.inc`` file are rebuilt when the ``.inc`` file itself is changed. The +``.inc`` file must set ``INC_PR`` (initially to "r0"), and all recipes +referring to it should set ``PR`` to "${INC_PR}.0" initially, +incrementing the last number when the recipe is changed. If the ``.inc`` +file is changed then its ``INC_PR`` should be incremented. + +When upgrading the version of a binary package, assuming the ``PV`` +changes, the ``PR`` variable should be reset to "r0" (or "${INC_PR}.0" +if you are using ``INC_PR``). + +Usually, version increases occur only to binary packages. However, if +for some reason ``PV`` changes but does not increase, you can increase +the ``PE`` variable (Package Epoch). The ``PE`` variable defaults to +"0". + +Binary package version numbering strives to follow the `Debian Version +Field Policy +Guidelines `__. +These guidelines define how versions are compared and what "increasing" +a version means. + +.. _automatically-incrementing-a-binary-package-revision-number: + +Automatically Incrementing a Package Version Number +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When fetching a repository, BitBake uses the +:term:`SRCREV` variable to determine +the specific source code revision from which to build. You set the +``SRCREV`` variable to +:term:`AUTOREV` to cause the +OpenEmbedded build system to automatically use the latest revision of +the software: +:: + + SRCREV = "${AUTOREV}" + +Furthermore, you need to reference ``SRCPV`` in ``PV`` in order to +automatically update the version whenever the revision of the source +code changes. Here is an example: +:: + + PV = "1.0+git${SRCPV}" + +The OpenEmbedded build system substitutes ``SRCPV`` with the following: +:: + + AUTOINC+source_code_revision + +The build system replaces the ``AUTOINC`` +with a number. The number used depends on the state of the PR Service: + +- If PR Service is enabled, the build system increments the number, + which is similar to the behavior of + :term:`PR`. This behavior results in + linearly increasing package versions, which is desirable. Here is an + example: + :: + + hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk + hello-world-git_0.0+git1+dd2f5c3565-r0.0_armv7a-neon.ipk + +- If PR Service is not enabled, the build system replaces the + ``AUTOINC`` placeholder with zero (i.e. "0"). This results in + changing the package version since the source revision is included. + However, package versions are not increased linearly. Here is an + example: + :: + + hello-world-git_0.0+git0+b6558dd387-r0.0_armv7a-neon.ipk + hello-world-git_0.0+git0+dd2f5c3565-r0.0_armv7a-neon.ipk + +In summary, the OpenEmbedded build system does not track the history of +binary package versions for this purpose. ``AUTOINC``, in this case, is +comparable to ``PR``. If PR server is not enabled, ``AUTOINC`` in the +package version is simply replaced by "0". If PR server is enabled, the +build system keeps track of the package versions and bumps the number +when the package revision changes. + +Handling Optional Module Packaging +---------------------------------- + +Many pieces of software split functionality into optional modules (or +plugins) and the plugins that are built might depend on configuration +options. To avoid having to duplicate the logic that determines what +modules are available in your recipe or to avoid having to package each +module by hand, the OpenEmbedded build system provides functionality to +handle module packaging dynamically. + +To handle optional module packaging, you need to do two things: + +- Ensure the module packaging is actually done. + +- Ensure that any dependencies on optional modules from other recipes + are satisfied by your recipe. + +Making Sure the Packaging is Done +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To ensure the module packaging actually gets done, you use the +``do_split_packages`` function within the ``populate_packages`` Python +function in your recipe. The ``do_split_packages`` function searches for +a pattern of files or directories under a specified path and creates a +package for each one it finds by appending to the +:term:`PACKAGES` variable and +setting the appropriate values for ``FILES_packagename``, +``RDEPENDS_packagename``, ``DESCRIPTION_packagename``, and so forth. +Here is an example from the ``lighttpd`` recipe: +:: + + python populate_packages_prepend () { + lighttpd_libdir = d.expand('${libdir}') + do_split_packages(d, lighttpd_libdir, '^mod_(.*).so$', + 'lighttpd-module-%s', 'Lighttpd module for %s', + extra_depends='') + } + +The previous example specifies a number of things in the call to +``do_split_packages``. + +- A directory within the files installed by your recipe through + ``do_install`` in which to search. + +- A regular expression used to match module files in that directory. In + the example, note the parentheses () that mark the part of the + expression from which the module name should be derived. + +- A pattern to use for the package names. + +- A description for each package. + +- An empty string for ``extra_depends``, which disables the default + dependency on the main ``lighttpd`` package. Thus, if a file in + ``${libdir}`` called ``mod_alias.so`` is found, a package called + ``lighttpd-module-alias`` is created for it and the + :term:`DESCRIPTION` is set to + "Lighttpd module for alias". + +Often, packaging modules is as simple as the previous example. However, +more advanced options exist that you can use within +``do_split_packages`` to modify its behavior. And, if you need to, you +can add more logic by specifying a hook function that is called for each +package. It is also perfectly acceptable to call ``do_split_packages`` +multiple times if you have more than one set of modules to package. + +For more examples that show how to use ``do_split_packages``, see the +``connman.inc`` file in the ``meta/recipes-connectivity/connman/`` +directory of the ``poky`` :ref:`source repository `. You can +also find examples in ``meta/classes/kernel.bbclass``. + +Following is a reference that shows ``do_split_packages`` mandatory and +optional arguments: +:: + + Mandatory arguments + + root + The path in which to search + file_regex + Regular expression to match searched files. + Use parentheses () to mark the part of this + expression that should be used to derive the + module name (to be substituted where %s is + used in other function arguments as noted below) + output_pattern + Pattern to use for the package names. Must + include %s. + description + Description to set for each package. Must + include %s. + + Optional arguments + + postinst + Postinstall script to use for all packages + (as a string) + recursive + True to perform a recursive search - default + False + hook + A hook function to be called for every match. + The function will be called with the following + arguments (in the order listed): + + f + Full path to the file/directory match + pkg + The package name + file_regex + As above + output_pattern + As above + modulename + The module name derived using file_regex + extra_depends + Extra runtime dependencies (RDEPENDS) to be + set for all packages. The default value of None + causes a dependency on the main package + (${PN}) - if you do not want this, pass empty + string '' for this parameter. + aux_files_pattern + Extra item(s) to be added to FILES for each + package. Can be a single string item or a list + of strings for multiple items. Must include %s. + postrm + postrm script to use for all packages (as a + string) + allow_dirs + True to allow directories to be matched - + default False + prepend + If True, prepend created packages to PACKAGES + instead of the default False which appends them + match_path + match file_regex on the whole relative path to + the root rather than just the file name + aux_files_pattern_verbatim + Extra item(s) to be added to FILES for each + package, using the actual derived module name + rather than converting it to something legal + for a package name. Can be a single string item + or a list of strings for multiple items. Must + include %s. + allow_links + True to allow symlinks to be matched - default + False + summary + Summary to set for each package. Must include %s; + defaults to description if not set. + + + +Satisfying Dependencies +~~~~~~~~~~~~~~~~~~~~~~~ + +The second part for handling optional module packaging is to ensure that +any dependencies on optional modules from other recipes are satisfied by +your recipe. You can be sure these dependencies are satisfied by using +the :term:`PACKAGES_DYNAMIC` +variable. Here is an example that continues with the ``lighttpd`` recipe +shown earlier: +:: + + PACKAGES_DYNAMIC = "lighttpd-module-.*" + +The name +specified in the regular expression can of course be anything. In this +example, it is ``lighttpd-module-`` and is specified as the prefix to +ensure that any :term:`RDEPENDS` and +:term:`RRECOMMENDS` on a package +name starting with the prefix are satisfied during build time. If you +are using ``do_split_packages`` as described in the previous section, +the value you put in ``PACKAGES_DYNAMIC`` should correspond to the name +pattern specified in the call to ``do_split_packages``. + +Using Runtime Package Management +-------------------------------- + +During a build, BitBake always transforms a recipe into one or more +packages. For example, BitBake takes the ``bash`` recipe and produces a +number of packages (e.g. ``bash``, ``bash-bashbug``, +``bash-completion``, ``bash-completion-dbg``, ``bash-completion-dev``, +``bash-completion-extra``, ``bash-dbg``, and so forth). Not all +generated packages are included in an image. + +In several situations, you might need to update, add, remove, or query +the packages on a target device at runtime (i.e. without having to +generate a new image). Examples of such situations include: + +- You want to provide in-the-field updates to deployed devices (e.g. + security updates). + +- You want to have a fast turn-around development cycle for one or more + applications that run on your device. + +- You want to temporarily install the "debug" packages of various + applications on your device so that debugging can be greatly improved + by allowing access to symbols and source debugging. + +- You want to deploy a more minimal package selection of your device + but allow in-the-field updates to add a larger selection for + customization. + +In all these situations, you have something similar to a more +traditional Linux distribution in that in-field devices are able to +receive pre-compiled packages from a server for installation or update. +Being able to install these packages on a running, in-field device is +what is termed "runtime package management". + +In order to use runtime package management, you need a host or server +machine that serves up the pre-compiled packages plus the required +metadata. You also need package manipulation tools on the target. The +build machine is a likely candidate to act as the server. However, that +machine does not necessarily have to be the package server. The build +machine could push its artifacts to another machine that acts as the +server (e.g. Internet-facing). In fact, doing so is advantageous for a +production environment as getting the packages away from the development +system's build directory prevents accidental overwrites. + +A simple build that targets just one device produces more than one +package database. In other words, the packages produced by a build are +separated out into a couple of different package groupings based on +criteria such as the target's CPU architecture, the target board, or the +C library used on the target. For example, a build targeting the +``qemux86`` device produces the following three package databases: +``noarch``, ``i586``, and ``qemux86``. If you wanted your ``qemux86`` +device to be aware of all the packages that were available to it, you +would need to point it to each of these databases individually. In a +similar way, a traditional Linux distribution usually is configured to +be aware of a number of software repositories from which it retrieves +packages. + +Using runtime package management is completely optional and not required +for a successful build or deployment in any way. But if you want to make +use of runtime package management, you need to do a couple things above +and beyond the basics. The remainder of this section describes what you +need to do. + +.. _runtime-package-management-build: + +Build Considerations +~~~~~~~~~~~~~~~~~~~~ + +This section describes build considerations of which you need to be +aware in order to provide support for runtime package management. + +When BitBake generates packages, it needs to know what format or formats +to use. In your configuration, you use the +:term:`PACKAGE_CLASSES` +variable to specify the format: + +1. Open the ``local.conf`` file inside your + :term:`Build Directory` (e.g. + ``~/poky/build/conf/local.conf``). + +2. Select the desired package format as follows: + :: + + PACKAGE_CLASSES ?= "package_packageformat" + + where packageformat can be "ipk", "rpm", + "deb", or "tar" which are the supported package formats. + + .. note:: + + Because the Yocto Project supports four different package formats, + you can set the variable with more than one argument. However, the + OpenEmbedded build system only uses the first argument when + creating an image or Software Development Kit (SDK). + +If you would like your image to start off with a basic package database +containing the packages in your current build as well as to have the +relevant tools available on the target for runtime package management, +you can include "package-management" in the +:term:`IMAGE_FEATURES` +variable. Including "package-management" in this configuration variable +ensures that when the image is assembled for your target, the image +includes the currently-known package databases as well as the +target-specific tools required for runtime package management to be +performed on the target. However, this is not strictly necessary. You +could start your image off without any databases but only include the +required on-target package tool(s). As an example, you could include +"opkg" in your +:term:`IMAGE_INSTALL` variable +if you are using the IPK package format. You can then initialize your +target's package database(s) later once your image is up and running. + +Whenever you perform any sort of build step that can potentially +generate a package or modify existing package, it is always a good idea +to re-generate the package index after the build by using the following +command: +:: + + $ bitbake package-index + +It might be tempting to build the +package and the package index at the same time with a command such as +the following: +:: + + $ bitbake some-package package-index + +Do not do this as +BitBake does not schedule the package index for after the completion of +the package you are building. Consequently, you cannot be sure of the +package index including information for the package you just built. +Thus, be sure to run the package update step separately after building +any packages. + +You can use the +:term:`PACKAGE_FEED_ARCHS`, +:term:`PACKAGE_FEED_BASE_PATHS`, +and +:term:`PACKAGE_FEED_URIS` +variables to pre-configure target images to use a package feed. If you +do not define these variables, then manual steps as described in the +subsequent sections are necessary to configure the target. You should +set these variables before building the image in order to produce a +correctly configured image. + +When your build is complete, your packages reside in the +``${TMPDIR}/deploy/packageformat`` directory. For example, if +``${``\ :term:`TMPDIR`\ ``}`` is +``tmp`` and your selected package type is RPM, then your RPM packages +are available in ``tmp/deploy/rpm``. + +.. _runtime-package-management-server: + +Host or Server Machine Setup +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Although other protocols are possible, a server using HTTP typically +serves packages. If you want to use HTTP, then set up and configure a +web server such as Apache 2, lighttpd, or SimpleHTTPServer on the +machine serving the packages. + +To keep things simple, this section describes how to set up a +SimpleHTTPServer web server to share package feeds from the developer's +machine. Although this server might not be the best for a production +environment, the setup is simple and straight forward. Should you want +to use a different server more suited for production (e.g. Apache 2, +Lighttpd, or Nginx), take the appropriate steps to do so. + +From within the build directory where you have built an image based on +your packaging choice (i.e. the +:term:`PACKAGE_CLASSES` +setting), simply start the server. The following example assumes a build +directory of ``~/poky/build/tmp/deploy/rpm`` and a ``PACKAGE_CLASSES`` +setting of "package_rpm": +:: + + $ cd ~/poky/build/tmp/deploy/rpm + $ python -m SimpleHTTPServer + +.. _runtime-package-management-target: + +Target Setup +~~~~~~~~~~~~ + +Setting up the target differs depending on the package management +system. This section provides information for RPM, IPK, and DEB. + +.. _runtime-package-management-target-rpm: + +Using RPM +^^^^^^^^^ + +The `Dandified Packaging +Tool `__ (DNF) performs +runtime package management of RPM packages. In order to use DNF for +runtime package management, you must perform an initial setup on the +target machine for cases where the ``PACKAGE_FEED_*`` variables were not +set as part of the image that is running on the target. This means if +you built your image and did not not use these variables as part of the +build and your image is now running on the target, you need to perform +the steps in this section if you want to use runtime package management. + +.. note:: + + For information on the PACKAGE_FEED_* variables, see + PACKAGE_FEED_ARCHS + , + PACKAGE_FEED_BASE_PATHS + , and + PACKAGE_FEED_URIS + in the Yocto Project Reference Manual variables glossary. + +On the target, you must inform DNF that package databases are available. +You do this by creating a file named +``/etc/yum.repos.d/oe-packages.repo`` and defining the ``oe-packages``. + +As an example, assume the target is able to use the following package +databases: ``all``, ``i586``, and ``qemux86`` from a server named +``my.server``. The specifics for setting up the web server are up to +you. The critical requirement is that the URIs in the target repository +configuration point to the correct remote location for the feeds. + +.. note:: + + For development purposes, you can point the web server to the build + system's + deploy + directory. However, for production use, it is better to copy the + package directories to a location outside of the build area and use + that location. Doing so avoids situations where the build system + overwrites or changes the + deploy + directory. + +When telling DNF where to look for the package databases, you must +declare individual locations per architecture or a single location used +for all architectures. You cannot do both: + +- *Create an Explicit List of Architectures:* Define individual base + URLs to identify where each package database is located: + :: + + [oe-packages] + baseurl=http://my.server/rpm/i586 http://my.server/rpm/qemux86 http://my.server/rpm/all + + This example + informs DNF about individual package databases for all three + architectures. + +- *Create a Single (Full) Package Index:* Define a single base URL that + identifies where a full package database is located: + :: + + [oe-packages] + baseurl=http://my.server/rpm + + This example informs DNF about a single + package database that contains all the package index information for + all supported architectures. + +Once you have informed DNF where to find the package databases, you need +to fetch them: +:: + + # dnf makecache + +DNF is now able to find, install, and +upgrade packages from the specified repository or repositories. + +.. note:: + + See the + DNF documentation + for additional information. + +.. _runtime-package-management-target-ipk: + +Using IPK +^^^^^^^^^ + +The ``opkg`` application performs runtime package management of IPK +packages. You must perform an initial setup for ``opkg`` on the target +machine if the +:term:`PACKAGE_FEED_ARCHS`, +:term:`PACKAGE_FEED_BASE_PATHS`, +and +:term:`PACKAGE_FEED_URIS` +variables have not been set or the target image was built before the +variables were set. + +The ``opkg`` application uses configuration files to find available +package databases. Thus, you need to create a configuration file inside +the ``/etc/opkg/`` direction, which informs ``opkg`` of any repository +you want to use. + +As an example, suppose you are serving packages from a ``ipk/`` +directory containing the ``i586``, ``all``, and ``qemux86`` databases +through an HTTP server named ``my.server``. On the target, create a +configuration file (e.g. ``my_repo.conf``) inside the ``/etc/opkg/`` +directory containing the following: +:: + + src/gz all http://my.server/ipk/all + src/gz i586 http://my.server/ipk/i586 + src/gz qemux86 http://my.server/ipk/qemux86 + +Next, instruct ``opkg`` to fetch the +repository information: # opkg update The ``opkg`` application is now +able to find, install, and upgrade packages from the specified +repository. + +.. _runtime-package-management-target-deb: + +Using DEB +^^^^^^^^^ + +The ``apt`` application performs runtime package management of DEB +packages. This application uses a source list file to find available +package databases. You must perform an initial setup for ``apt`` on the +target machine if the +:term:`PACKAGE_FEED_ARCHS`, +:term:`PACKAGE_FEED_BASE_PATHS`, +and +:term:`PACKAGE_FEED_URIS` +variables have not been set or the target image was built before the +variables were set. + +To inform ``apt`` of the repository you want to use, you might create a +list file (e.g. ``my_repo.list``) inside the +``/etc/apt/sources.list.d/`` directory. As an example, suppose you are +serving packages from a ``deb/`` directory containing the ``i586``, +``all``, and ``qemux86`` databases through an HTTP server named +``my.server``. The list file should contain: +:: + + deb http://my.server/deb/all ./ + deb http://my.server/deb/i586 ./ + deb http://my.server/deb/qemux86 ./ + +Next, instruct the ``apt`` application +to fetch the repository information: +:: + + # apt-get update + +After this step, +``apt`` is able to find, install, and upgrade packages from the +specified repository. + +Generating and Using Signed Packages +------------------------------------ + +In order to add security to RPM packages used during a build, you can +take steps to securely sign them. Once a signature is verified, the +OpenEmbedded build system can use the package in the build. If security +fails for a signed package, the build system aborts the build. + +This section describes how to sign RPM packages during a build and how +to use signed package feeds (repositories) when doing a build. + +Signing RPM Packages +~~~~~~~~~~~~~~~~~~~~ + +To enable signing RPM packages, you must set up the following +configurations in either your ``local.config`` or ``distro.config`` +file: +:: + + # Inherit sign_rpm.bbclass to enable signing functionality + INHERIT += " sign_rpm" + # Define the GPG key that will be used for signing. + RPM_GPG_NAME = "key_name" + # Provide passphrase for the key + RPM_GPG_PASSPHRASE = "passphrase" + +.. note:: + + Be sure to supply appropriate values for both + key_name + and + passphrase + +Aside from the ``RPM_GPG_NAME`` and ``RPM_GPG_PASSPHRASE`` variables in +the previous example, two optional variables related to signing exist: + +- *GPG_BIN:* Specifies a ``gpg`` binary/wrapper that is executed + when the package is signed. + +- *GPG_PATH:* Specifies the ``gpg`` home directory used when the + package is signed. + +Processing Package Feeds +~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to being able to sign RPM packages, you can also enable +signed package feeds for IPK and RPM packages. + +The steps you need to take to enable signed package feed use are similar +to the steps used to sign RPM packages. You must define the following in +your ``local.config`` or ``distro.config`` file: +:: + + INHERIT += "sign_package_feed" + PACKAGE_FEED_GPG_NAME = "key_name" + PACKAGE_FEED_GPG_PASSPHRASE_FILE = "path_to_file_containing_passphrase" + +For signed package feeds, the passphrase must exist in a separate file, +which is pointed to by the ``PACKAGE_FEED_GPG_PASSPHRASE_FILE`` +variable. Regarding security, keeping a plain text passphrase out of the +configuration is more secure. + +Aside from the ``PACKAGE_FEED_GPG_NAME`` and +``PACKAGE_FEED_GPG_PASSPHRASE_FILE`` variables, three optional variables +related to signed package feeds exist: + +- *GPG_BIN* Specifies a ``gpg`` binary/wrapper that is executed + when the package is signed. + +- *GPG_PATH:* Specifies the ``gpg`` home directory used when the + package is signed. + +- *PACKAGE_FEED_GPG_SIGNATURE_TYPE:* Specifies the type of ``gpg`` + signature. This variable applies only to RPM and IPK package feeds. + Allowable values for the ``PACKAGE_FEED_GPG_SIGNATURE_TYPE`` are + "ASC", which is the default and specifies ascii armored, and "BIN", + which specifies binary. + +Testing Packages With ptest +--------------------------- + +A Package Test (ptest) runs tests against packages built by the +OpenEmbedded build system on the target machine. A ptest contains at +least two items: the actual test, and a shell script (``run-ptest``) +that starts the test. The shell script that starts the test must not +contain the actual test - the script only starts the test. On the other +hand, the test can be anything from a simple shell script that runs a +binary and checks the output to an elaborate system of test binaries and +data files. + +The test generates output in the format used by Automake: +:: + + result: testname + +where the result can be ``PASS``, ``FAIL``, or ``SKIP``, and +the testname can be any identifying string. + +For a list of Yocto Project recipes that are already enabled with ptest, +see the :yocto_wiki:`Ptest ` wiki page. + +.. note:: + + A recipe is "ptest-enabled" if it inherits the + ptest + class. + +Adding ptest to Your Build +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To add package testing to your build, add the +:term:`DISTRO_FEATURES` and +:term:`EXTRA_IMAGE_FEATURES` +variables to your ``local.conf`` file, which is found in the +:term:`Build Directory`: +:: + + DISTRO_FEATURES_append = " ptest" + EXTRA_IMAGE_FEATURES += "ptest-pkgs" + +Once your build is complete, the ptest files are installed into the +``/usr/lib/package/ptest`` directory within the image, where ``package`` +is the name of the package. + +Running ptest +~~~~~~~~~~~~~ + +The ``ptest-runner`` package installs a shell script that loops through +all installed ptest test suites and runs them in sequence. Consequently, +you might want to add this package to your image. + +Getting Your Package Ready +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to enable a recipe to run installed ptests on target hardware, +you need to prepare the recipes that build the packages you want to +test. Here is what you have to do for each recipe: + +- *Be sure the recipe inherits + the*\ :ref:`ptest `\ *class:* + Include the following line in each recipe: + :: + + inherit ptest + +- *Create run-ptest:* This script starts your test. Locate the + script where you will refer to it using + :term:`SRC_URI`. Here is an + example that starts a test for ``dbus``: + :: + + #!/bin/sh + cd test + make -k runtest-TESTS + +- *Ensure dependencies are met:* If the test adds build or runtime + dependencies that normally do not exist for the package (such as + requiring "make" to run the test suite), use the + :term:`DEPENDS` and + :term:`RDEPENDS` variables in + your recipe in order for the package to meet the dependencies. Here + is an example where the package has a runtime dependency on "make": + :: + + RDEPENDS_${PN}-ptest += "make" + +- *Add a function to build the test suite:* Not many packages support + cross-compilation of their test suites. Consequently, you usually + need to add a cross-compilation function to the package. + + Many packages based on Automake compile and run the test suite by + using a single command such as ``make check``. However, the host + ``make check`` builds and runs on the same computer, while + cross-compiling requires that the package is built on the host but + executed for the target architecture (though often, as in the case + for ptest, the execution occurs on the host). The built version of + Automake that ships with the Yocto Project includes a patch that + separates building and execution. Consequently, packages that use the + unaltered, patched version of ``make check`` automatically + cross-compiles. + + Regardless, you still must add a ``do_compile_ptest`` function to + build the test suite. Add a function similar to the following to your + recipe: + :: + + do_compile_ptest() { + oe_runmake buildtest-TESTS + } + +- *Ensure special configurations are set:* If the package requires + special configurations prior to compiling the test code, you must + insert a ``do_configure_ptest`` function into the recipe. + +- *Install the test suite:* The ``ptest`` class automatically copies + the file ``run-ptest`` to the target and then runs make + ``install-ptest`` to run the tests. If this is not enough, you need + to create a ``do_install_ptest`` function and make sure it gets + called after the "make install-ptest" completes. + +Creating Node Package Manager (NPM) Packages +-------------------------------------------- + +`NPM `__ is a package +manager for the JavaScript programming language. The Yocto Project +supports the NPM :ref:`fetcher `. You can +use this fetcher in combination with +:doc:```devtool`` <../ref-manual/ref-devtool-reference>` to create +recipes that produce NPM packages. + +Two workflows exist that allow you to create NPM packages using +``devtool``: the NPM registry modules method and the NPM project code +method. + +.. note:: + + While it is possible to create NPM recipes manually, using + devtool + is far simpler. + +Additionally, some requirements and caveats exist. + +.. _npm-package-creation-requirements: + +Requirements and Caveats +~~~~~~~~~~~~~~~~~~~~~~~~ + +You need to be aware of the following before using ``devtool`` to create +NPM packages: + +- Of the two methods that you can use ``devtool`` to create NPM + packages, the registry approach is slightly simpler. However, you + might consider the project approach because you do not have to + publish your module in the NPM registry + (`npm-registry `_), which + is NPM's public registry. + +- Be familiar with + :doc:```devtool`` <../ref-manual/ref-devtool-reference>`. + +- The NPM host tools need the native ``nodejs-npm`` package, which is + part of the OpenEmbedded environment. You need to get the package by + cloning the https://github.com/openembedded/meta-openembedded + repository out of GitHub. Be sure to add the path to your local copy + to your ``bblayers.conf`` file. + +- ``devtool`` cannot detect native libraries in module dependencies. + Consequently, you must manually add packages to your recipe. + +- While deploying NPM packages, ``devtool`` cannot determine which + dependent packages are missing on the target (e.g. the node runtime + ``nodejs``). Consequently, you need to find out what files are + missing and be sure they are on the target. + +- Although you might not need NPM to run your node package, it is + useful to have NPM on your target. The NPM package name is + ``nodejs-npm``. + +.. _npm-using-the-registry-modules-method: + +Using the Registry Modules Method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section presents an example that uses the ``cute-files`` module, +which is a file browser web application. + +.. note:: + + You must know the + cute-files + module version. + +The first thing you need to do is use ``devtool`` and the NPM fetcher to +create the recipe: +:: + + $ devtool add "npm://registry.npmjs.org;package=cute-files;version=1.0.2" + +The +``devtool add`` command runs ``recipetool create`` and uses the same +fetch URI to download each dependency and capture license details where +possible. The result is a generated recipe. + +The recipe file is fairly simple and contains every license that +``recipetool`` finds and includes the licenses in the recipe's +:term:`LIC_FILES_CHKSUM` +variables. You need to examine the variables and look for those with +"unknown" in the :term:`LICENSE` +field. You need to track down the license information for "unknown" +modules and manually add the information to the recipe. + +``recipetool`` creates a "shrinkwrap" file for your recipe. Shrinkwrap +files capture the version of all dependent modules. Many packages do not +provide shrinkwrap files. ``recipetool`` create a shrinkwrap file as it +runs. + +.. note:: + + A package is created for each sub-module. This policy is the only + practical way to have the licenses for all of the dependencies + represented in the license manifest of the image. + +The ``devtool edit-recipe`` command lets you take a look at the recipe: +:: + + $ devtool edit-recipe cute-files + SUMMARY = "Turn any folder on your computer into a cute file browser, available on the local network." + LICENSE = "MIT & ISC & Unknown" + LIC_FILES_CHKSUM = "file://LICENSE;md5=71d98c0a1db42956787b1909c74a86ca \ + file://node_modules/toidentifier/LICENSE;md5=1a261071a044d02eb6f2bb47f51a3502 \ + file://node_modules/debug/LICENSE;md5=ddd815a475e7338b0be7a14d8ee35a99 \ + ... + SRC_URI = " \ + npm://registry.npmjs.org/;package=cute-files;version=${PV} \ + npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \ + " + S = "${WORKDIR}/npm" + inherit npm LICENSE_${PN} = "MIT" + LICENSE_${PN}-accepts = "MIT" + LICENSE_${PN}-array-flatten = "MIT" + ... + LICENSE_${PN}-vary = "MIT" + +Three key points exist in the previous example: + +- :term:`SRC_URI` uses the NPM + scheme so that the NPM fetcher is used. + +- ``recipetool`` collects all the license information. If a + sub-module's license is unavailable, the sub-module's name appears in + the comments. + +- The ``inherit npm`` statement causes the + :ref:`npm ` class to package + up all the modules. + +You can run the following command to build the ``cute-files`` package: +:: + + $ devtool build cute-files + +Remember that ``nodejs`` must be installed on +the target before your package. + +Assuming 192.168.7.2 for the target's IP address, use the following +command to deploy your package: +:: + + $ devtool deploy-target -s cute-files root@192.168.7.2 + +Once the package is installed on the target, you can +test the application: + +.. note:: + + Because of a know issue, you cannot simply run + cute-files + as you would if you had run + npm install + . + +:: + + $ cd /usr/lib/node_modules/cute-files + $ node cute-files.js + +On a browser, +go to ``http://192.168.7.2:3000`` and you see the following: + +.. image:: figures/cute-files-npm-example.png + :align: center + +You can find the recipe in ``workspace/recipes/cute-files``. You can use +the recipe in any layer you choose. + +.. _npm-using-the-npm-projects-method: + +Using the NPM Projects Code Method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Although it is useful to package modules already in the NPM registry, +adding ``node.js`` projects under development is a more common developer +use case. + +This section covers the NPM projects code method, which is very similar +to the "registry" approach described in the previous section. In the NPM +projects method, you provide ``devtool`` with an URL that points to the +source files. + +Replicating the same example, (i.e. ``cute-files``) use the following +command: +:: + + $ devtool add https://github.com/martinaglv/cute-files.git + +The +recipe this command generates is very similar to the recipe created in +the previous section. However, the ``SRC_URI`` looks like the following: +:: + + SRC_URI = " \ + git://github.com/martinaglv/cute-files.git;protocol=https \ + npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \ + " + +In this example, +the main module is taken from the Git repository and dependents are +taken from the NPM registry. Other than those differences, the recipe is +basically the same between the two methods. You can build and deploy the +package exactly as described in the previous section that uses the +registry modules method. + +Adding custom metadata to packages +---------------------------------- + +The variable +:term:`PACKAGE_ADD_METADATA` +can be used to add additional metadata to packages. This is reflected in +the package control/spec file. To take the ipk format for example, the +CONTROL file stored inside would contain the additional metadata as +additional lines. + +The variable can be used in multiple ways, including using suffixes to +set it for a specific package type and/or package. Note that the order +of precedence is the same as this list: + +- ``PACKAGE_ADD_METADATA__`` + +- ``PACKAGE_ADD_METADATA_`` + +- ``PACKAGE_ADD_METADATA_`` + +- ``PACKAGE_ADD_METADATA`` + + is a parameter and expected to be a distinct name of specific +package type: + +- IPK for .ipk packages + +- DEB for .deb packages + +- RPM for .rpm packages + + is a parameter and expected to be a package name. + +The variable can contain multiple [one-line] metadata fields separated +by the literal sequence '\n'. The separator can be redefined using the +variable flag ``separator``. + +The following is an example that adds two custom fields for ipk +packages: PACKAGE_ADD_METADATA_IPK = "Vendor: CustomIpk\nGroup: +Applications/Spreadsheets" + +Efficiently Fetching Source Files During a Build +================================================ + +The OpenEmbedded build system works with source files located through +the :term:`SRC_URI` variable. When +you build something using BitBake, a big part of the operation is +locating and downloading all the source tarballs. For images, +downloading all the source for various packages can take a significant +amount of time. + +This section shows you how you can use mirrors to speed up fetching +source files and how you can pre-fetch files all of which leads to more +efficient use of resources and time. + +Setting up Effective Mirrors +---------------------------- + +A good deal that goes into a Yocto Project build is simply downloading +all of the source tarballs. Maybe you have been working with another +build system (OpenEmbedded or Angstrom) for which you have built up a +sizable directory of source tarballs. Or, perhaps someone else has such +a directory for which you have read access. If so, you can save time by +adding statements to your configuration file so that the build process +checks local directories first for existing tarballs before checking the +Internet. + +Here is an efficient way to set it up in your ``local.conf`` file: +:: + + SOURCE_MIRROR_URL ?= "file:///home/you/your-download-dir/" + INHERIT += "own-mirrors" + BB_GENERATE_MIRROR_TARBALLS = "1" + # BB_NO_NETWORK = "1" + +In the previous example, the +:term:`BB_GENERATE_MIRROR_TARBALLS` +variable causes the OpenEmbedded build system to generate tarballs of +the Git repositories and store them in the +:term:`DL_DIR` directory. Due to +performance reasons, generating and storing these tarballs is not the +build system's default behavior. + +You can also use the +:term:`PREMIRRORS` variable. For +an example, see the variable's glossary entry in the Yocto Project +Reference Manual. + +Getting Source Files and Suppressing the Build +---------------------------------------------- + +Another technique you can use to ready yourself for a successive string +of build operations, is to pre-fetch all the source files without +actually starting a build. This technique lets you work through any +download issues and ultimately gathers all the source files into your +download directory :ref:`structure-build-downloads`, +which is located with :term:`DL_DIR`. + +Use the following BitBake command form to fetch all the necessary +sources without starting the build: +:: + + $ bitbake target --runall=fetch + +This +variation of the BitBake command guarantees that you have all the +sources for that BitBake target should you disconnect from the Internet +and want to do the build later offline. + +Selecting an Initialization Manager +=================================== + +By default, the Yocto Project uses SysVinit as the initialization +manager. However, support also exists for systemd, which is a full +replacement for init with parallel starting of services, reduced shell +overhead and other features that are used by many distributions. + +Within the system, SysVinit treats system components as services. These +services are maintained as shell scripts stored in the ``/etc/init.d/`` +directory. Services organize into different run levels. This +organization is maintained by putting links to the services in the +``/etc/rcN.d/`` directories, where N/ is one of the following options: +"S", "0", "1", "2", "3", "4", "5", or "6". + +.. note:: + + Each runlevel has a dependency on the previous runlevel. This + dependency allows the services to work properly. + +In comparison, systemd treats components as units. Using units is a +broader concept as compared to using a service. A unit includes several +different types of entities. Service is one of the types of entities. +The runlevel concept in SysVinit corresponds to the concept of a target +in systemd, where target is also a type of supported unit. + +In a SysVinit-based system, services load sequentially (i.e. one by one) +during and parallelization is not supported. With systemd, services +start in parallel. Needless to say, the method can have an impact on +system startup performance. + +If you want to use SysVinit, you do not have to do anything. But, if you +want to use systemd, you must take some steps as described in the +following sections. + +Using systemd Exclusively +------------------------- + +Set these variables in your distribution configuration file as follows: +:: + + DISTRO_FEATURES_append = " systemd" + VIRTUAL-RUNTIME_init_manager = "systemd" + +You can also prevent the SysVinit distribution feature from +being automatically enabled as follows: +:: + + DISTRO_FEATURES_BACKFILL_CONSIDERED = "sysvinit" + +Doing so removes any +redundant SysVinit scripts. + +To remove initscripts from your image altogether, set this variable +also: +:: + + VIRTUAL-RUNTIME_initscripts = "" + +For information on the backfill variable, see +:term:`DISTRO_FEATURES_BACKFILL_CONSIDERED`. + +Using systemd for the Main Image and Using SysVinit for the Rescue Image +------------------------------------------------------------------------ + +Set these variables in your distribution configuration file as follows: +:: + + DISTRO_FEATURES_append = " systemd" + VIRTUAL-RUNTIME_init_manager = "systemd" + +Doing so causes your main image to use the +``packagegroup-core-boot.bb`` recipe and systemd. The rescue/minimal +image cannot use this package group. However, it can install SysVinit +and the appropriate packages will have support for both systemd and +SysVinit. + +.. _selecting-dev-manager: + +Selecting a Device Manager +========================== + +The Yocto Project provides multiple ways to manage the device manager +(``/dev``): + +- Persistent and Pre-Populated\ ``/dev``: For this case, the ``/dev`` + directory is persistent and the required device nodes are created + during the build. + +- Use ``devtmpfs`` with a Device Manager: For this case, the ``/dev`` + directory is provided by the kernel as an in-memory file system and + is automatically populated by the kernel at runtime. Additional + configuration of device nodes is done in user space by a device + manager like ``udev`` or ``busybox-mdev``. + +.. _static-dev-management: + +Using Persistent and Pre-Populated\ ``/dev`` +-------------------------------------------- + +To use the static method for device population, you need to set the +:term:`USE_DEVFS` variable to "0" +as follows: +:: + + USE_DEVFS = "0" + +The content of the resulting ``/dev`` directory is defined in a Device +Table file. The +:term:`IMAGE_DEVICE_TABLES` +variable defines the Device Table to use and should be set in the +machine or distro configuration file. Alternatively, you can set this +variable in your ``local.conf`` configuration file. + +If you do not define the ``IMAGE_DEVICE_TABLES`` variable, the default +``device_table-minimal.txt`` is used: +:: + + IMAGE_DEVICE_TABLES = "device_table-mymachine.txt" + +The population is handled by the ``makedevs`` utility during image +creation: + +.. _devtmpfs-dev-management: + +Using ``devtmpfs`` and a Device Manager +--------------------------------------- + +To use the dynamic method for device population, you need to use (or be +sure to set) the :term:`USE_DEVFS` +variable to "1", which is the default: +:: + + USE_DEVFS = "1" + +With this +setting, the resulting ``/dev`` directory is populated by the kernel +using ``devtmpfs``. Make sure the corresponding kernel configuration +variable ``CONFIG_DEVTMPFS`` is set when building you build a Linux +kernel. + +All devices created by ``devtmpfs`` will be owned by ``root`` and have +permissions ``0600``. + +To have more control over the device nodes, you can use a device manager +like ``udev`` or ``busybox-mdev``. You choose the device manager by +defining the ``VIRTUAL-RUNTIME_dev_manager`` variable in your machine or +distro configuration file. Alternatively, you can set this variable in +your ``local.conf`` configuration file: +:: + + VIRTUAL-RUNTIME_dev_manager = "udev" + + # Some alternative values + # VIRTUAL-RUNTIME_dev_manager = "busybox-mdev" + # VIRTUAL-RUNTIME_dev_manager = "systemd" + +.. _platdev-appdev-srcrev: + +Using an External SCM +===================== + +If you're working on a recipe that pulls from an external Source Code +Manager (SCM), it is possible to have the OpenEmbedded build system +notice new recipe changes added to the SCM and then build the resulting +packages that depend on the new recipes by using the latest versions. +This only works for SCMs from which it is possible to get a sensible +revision number for changes. Currently, you can do this with Apache +Subversion (SVN), Git, and Bazaar (BZR) repositories. + +To enable this behavior, the :term:`PV` of +the recipe needs to reference +:term:`SRCPV`. Here is an example: +:: + + PV = "1.2.3+git${SRCPV}" + +Then, you can add the following to your +``local.conf``: +:: + + SRCREV_pn-PN = "${AUTOREV}" + +:term:`PN` is the name of the recipe for +which you want to enable automatic source revision updating. + +If you do not want to update your local configuration file, you can add +the following directly to the recipe to finish enabling the feature: +:: + + SRCREV = "${AUTOREV}" + +The Yocto Project provides a distribution named ``poky-bleeding``, whose +configuration file contains the line: +:: + + require conf/distro/include/poky-floating-revisions.inc + +This line pulls in the +listed include file that contains numerous lines of exactly that form: +:: + + #SRCREV_pn-opkg-native ?= "${AUTOREV}" + #SRCREV_pn-opkg-sdk ?= "${AUTOREV}" + #SRCREV_pn-opkg ?= "${AUTOREV}" + #SRCREV_pn-opkg-utils-native ?= "${AUTOREV}" + #SRCREV_pn-opkg-utils ?= "${AUTOREV}" + SRCREV_pn-gconf-dbus ?= "${AUTOREV}" + SRCREV_pn-matchbox-common ?= "${AUTOREV}" + SRCREV_pn-matchbox-config-gtk ?= "${AUTOREV}" + SRCREV_pn-matchbox-desktop ?= "${AUTOREV}" + SRCREV_pn-matchbox-keyboard ?= "${AUTOREV}" + SRCREV_pn-matchbox-panel-2 ?= "${AUTOREV}" + SRCREV_pn-matchbox-themes-extra ?= "${AUTOREV}" + SRCREV_pn-matchbox-terminal ?= "${AUTOREV}" + SRCREV_pn-matchbox-wm ?= "${AUTOREV}" + SRCREV_pn-settings-daemon ?= "${AUTOREV}" + SRCREV_pn-screenshot ?= "${AUTOREV}" + . . . + +These lines allow you to +experiment with building a distribution that tracks the latest +development source for numerous packages. + +.. note:: + + The + poky-bleeding + distribution is not tested on a regular basis. Keep this in mind if + you use it. + +Creating a Read-Only Root Filesystem +==================================== + +Suppose, for security reasons, you need to disable your target device's +root filesystem's write permissions (i.e. you need a read-only root +filesystem). Or, perhaps you are running the device's operating system +from a read-only storage device. For either case, you can customize your +image for that behavior. + +.. note:: + + Supporting a read-only root filesystem requires that the system and + applications do not try to write to the root filesystem. You must + configure all parts of the target system to write elsewhere, or to + gracefully fail in the event of attempting to write to the root + filesystem. + +Creating the Root Filesystem +---------------------------- + +To create the read-only root filesystem, simply add the +"read-only-rootfs" feature to your image, normally in one of two ways. +The first way is to add the "read-only-rootfs" image feature in the +image's recipe file via the ``IMAGE_FEATURES`` variable: +:: + + IMAGE_FEATURES += "read-only-rootfs" + +As an alternative, you can add the same feature +from within your build directory's ``local.conf`` file with the +associated ``EXTRA_IMAGE_FEATURES`` variable, as in: +:: + + EXTRA_IMAGE_FEATURES = "read-only-rootfs" + +For more information on how to use these variables, see the +":ref:`usingpoky-extend-customimage-imagefeatures`" +section. For information on the variables, see +:term:`IMAGE_FEATURES` and +:term:`EXTRA_IMAGE_FEATURES`. + +Post-Installation Scripts and Read-Only Root Filesystem +------------------------------------------------------- + +It is very important that you make sure all post-Installation +(``pkg_postinst``) scripts for packages that are installed into the +image can be run at the time when the root filesystem is created during +the build on the host system. These scripts cannot attempt to run during +first-boot on the target device. With the "read-only-rootfs" feature +enabled, the build system checks during root filesystem creation to make +sure all post-installation scripts succeed. If any of these scripts +still need to be run after the root filesystem is created, the build +immediately fails. These build-time checks ensure that the build fails +rather than the target device fails later during its initial boot +operation. + +Most of the common post-installation scripts generated by the build +system for the out-of-the-box Yocto Project are engineered so that they +can run during root filesystem creation (e.g. post-installation scripts +for caching fonts). However, if you create and add custom scripts, you +need to be sure they can be run during this file system creation. + +Here are some common problems that prevent post-installation scripts +from running during root filesystem creation: + +- *Not using $D in front of absolute paths:* The build system defines + ``$``\ :term:`D` when the root + filesystem is created. Furthermore, ``$D`` is blank when the script + is run on the target device. This implies two purposes for ``$D``: + ensuring paths are valid in both the host and target environments, + and checking to determine which environment is being used as a method + for taking appropriate actions. + +- *Attempting to run processes that are specific to or dependent on the + target architecture:* You can work around these attempts by using + native tools, which run on the host system, to accomplish the same + tasks, or by alternatively running the processes under QEMU, which + has the ``qemu_run_binary`` function. For more information, see the + :ref:`qemu ` class. + +Areas With Write Access +----------------------- + +With the "read-only-rootfs" feature enabled, any attempt by the target +to write to the root filesystem at runtime fails. Consequently, you must +make sure that you configure processes and applications that attempt +these types of writes do so to directories with write access (e.g. +``/tmp`` or ``/var/run``). + +Maintaining Build Output Quality +================================ + +Many factors can influence the quality of a build. For example, if you +upgrade a recipe to use a new version of an upstream software package or +you experiment with some new configuration options, subtle changes can +occur that you might not detect until later. Consider the case where +your recipe is using a newer version of an upstream package. In this +case, a new version of a piece of software might introduce an optional +dependency on another library, which is auto-detected. If that library +has already been built when the software is building, the software will +link to the built library and that library will be pulled into your +image along with the new software even if you did not want the library. + +The :ref:`buildhistory ` +class exists to help you maintain the quality of your build output. You +can use the class to highlight unexpected and possibly unwanted changes +in the build output. When you enable build history, it records +information about the contents of each package and image and then +commits that information to a local Git repository where you can examine +the information. + +The remainder of this section describes the following: + +- How you can enable and disable build history + +- How to understand what the build history contains + +- How to limit the information used for build history + +- How to examine the build history from both a command-line and web + interface + +Enabling and Disabling Build History +------------------------------------ + +Build history is disabled by default. To enable it, add the following +``INHERIT`` statement and set the +:term:`BUILDHISTORY_COMMIT` +variable to "1" at the end of your ``conf/local.conf`` file found in the +:term:`Build Directory`: +:: + + INHERIT += "buildhistory" + BUILDHISTORY_COMMIT = "1" + +Enabling build history as +previously described causes the OpenEmbedded build system to collect +build output information and commit it as a single commit to a local +:ref:`overview-manual/overview-manual-development-environment:git` repository. + +.. note:: + + Enabling build history increases your build times slightly, + particularly for images, and increases the amount of disk space used + during the build. + +You can disable build history by removing the previous statements from +your ``conf/local.conf`` file. + +Understanding What the Build History Contains +--------------------------------------------- + +Build history information is kept in +``${``\ :term:`TOPDIR`\ ``}/buildhistory`` +in the Build Directory as defined by the +:term:`BUILDHISTORY_DIR` +variable. The following is an example abbreviated listing: + +.. image:: figures/buildhistory.png + :align: center + +At the top level, a ``metadata-revs`` file exists that lists the +revisions of the repositories for the enabled layers when the build was +produced. The rest of the data splits into separate ``packages``, +``images`` and ``sdk`` directories, the contents of which are described +as follows. + +Build History Package Information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The history for each package contains a text file that has name-value +pairs with information about the package. For example, +``buildhistory/packages/i586-poky-linux/busybox/busybox/latest`` +contains the following: +:: + + PV = 1.22.1 + PR = r32 + RPROVIDES = + RDEPENDS = glibc (>= 2.20) update-alternatives-opkg + RRECOMMENDS = busybox-syslog busybox-udhcpc update-rc.d + PKGSIZE = 540168 + FILES = /usr/bin/* /usr/sbin/* /usr/lib/busybox/* /usr/lib/lib*.so.* \ + /etc /com /var /bin/* /sbin/* /lib/*.so.* /lib/udev/rules.d \ + /usr/lib/udev/rules.d /usr/share/busybox /usr/lib/busybox/* \ + /usr/share/pixmaps /usr/share/applications /usr/share/idl \ + /usr/share/omf /usr/share/sounds /usr/lib/bonobo/servers + FILELIST = /bin/busybox /bin/busybox.nosuid /bin/busybox.suid /bin/sh \ + /etc/busybox.links.nosuid /etc/busybox.links.suid + +Most of these +name-value pairs correspond to variables used to produce the package. +The exceptions are ``FILELIST``, which is the actual list of files in +the package, and ``PKGSIZE``, which is the total size of files in the +package in bytes. + +A file also exists that corresponds to the recipe from which the package +came (e.g. ``buildhistory/packages/i586-poky-linux/busybox/latest``): +:: + + PV = 1.22.1 + PR = r32 + DEPENDS = initscripts kern-tools-native update-rc.d-native \ + virtual/i586-poky-linux-compilerlibs virtual/i586-poky-linux-gcc \ + virtual/libc virtual/update-alternatives + PACKAGES = busybox-ptest busybox-httpd busybox-udhcpd busybox-udhcpc \ + busybox-syslog busybox-mdev busybox-hwclock busybox-dbg \ + busybox-staticdev busybox-dev busybox-doc busybox-locale busybox + +Finally, for those recipes fetched from a version control system (e.g., +Git), a file exists that lists source revisions that are specified in +the recipe and lists the actual revisions used during the build. Listed +and actual revisions might differ when +:term:`SRCREV` is set to +${:term:`AUTOREV`}. Here is an +example assuming +``buildhistory/packages/qemux86-poky-linux/linux-yocto/latest_srcrev``): +:: + + # SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1" + SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1" + # SRCREV_meta = "a227f20eff056e511d504b2e490f3774ab260d6f" + SRCREV_meta ="a227f20eff056e511d504b2e490f3774ab260d6f" + +You can use the +``buildhistory-collect-srcrevs`` command with the ``-a`` option to +collect the stored ``SRCREV`` values from build history and report them +in a format suitable for use in global configuration (e.g., +``local.conf`` or a distro include file) to override floating +``AUTOREV`` values to a fixed set of revisions. Here is some example +output from this command: +:: + + $ buildhistory-collect-srcrevs -a + # i586-poky-linux + SRCREV_pn-glibc = "b8079dd0d360648e4e8de48656c5c38972621072" + SRCREV_pn-glibc-initial = "b8079dd0d360648e4e8de48656c5c38972621072" + SRCREV_pn-opkg-utils = "53274f087565fd45d8452c5367997ba6a682a37a" + SRCREV_pn-kmod = "fd56638aed3fe147015bfa10ed4a5f7491303cb4" + # x86_64-linux + SRCREV_pn-gtk-doc-stub-native = "1dea266593edb766d6d898c79451ef193eb17cfa" + SRCREV_pn-dtc-native = "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf" + SRCREV_pn-update-rc.d-native = "eca680ddf28d024954895f59a241a622dd575c11" + SRCREV_glibc_pn-cross-localedef-native = "b8079dd0d360648e4e8de48656c5c38972621072" + SRCREV_localedef_pn-cross-localedef-native = "c833367348d39dad7ba018990bfdaffaec8e9ed3" + SRCREV_pn-prelink-native = "faa069deec99bf61418d0bab831c83d7c1b797ca" + SRCREV_pn-opkg-utils-native = "53274f087565fd45d8452c5367997ba6a682a37a" + SRCREV_pn-kern-tools-native = "23345b8846fe4bd167efdf1bd8a1224b2ba9a5ff" + SRCREV_pn-kmod-native = "fd56638aed3fe147015bfa10ed4a5f7491303cb4" + # qemux86-poky-linux + SRCREV_machine_pn-linux-yocto = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1" + SRCREV_meta_pn-linux-yocto = "a227f20eff056e511d504b2e490f3774ab260d6f" + # all-poky-linux + SRCREV_pn-update-rc.d = "eca680ddf28d024954895f59a241a622dd575c11" + +.. note:: + + Here are some notes on using the + buildhistory-collect-srcrevs + command: + + - By default, only values where the ``SRCREV`` was not hardcoded + (usually when ``AUTOREV`` is used) are reported. Use the ``-a`` + option to see all ``SRCREV`` values. + + - The output statements might not have any effect if overrides are + applied elsewhere in the build system configuration. Use the + ``-f`` option to add the ``forcevariable`` override to each output + line if you need to work around this restriction. + + - The script does apply special handling when building for multiple + machines. However, the script does place a comment before each set + of values that specifies which triplet to which they belong as + previously shown (e.g., ``i586-poky-linux``). + +Build History Image Information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The files produced for each image are as follows: + +- ``image-files:`` A directory containing selected files from the root + filesystem. The files are defined by + :term:`BUILDHISTORY_IMAGE_FILES`. + +- ``build-id.txt:`` Human-readable information about the build + configuration and metadata source revisions. This file contains the + full build header as printed by BitBake. + +- ``*.dot:`` Dependency graphs for the image that are compatible with + ``graphviz``. + +- ``files-in-image.txt:`` A list of files in the image with + permissions, owner, group, size, and symlink information. + +- ``image-info.txt:`` A text file containing name-value pairs with + information about the image. See the following listing example for + more information. + +- ``installed-package-names.txt:`` A list of installed packages by name + only. + +- ``installed-package-sizes.txt:`` A list of installed packages ordered + by size. + +- ``installed-packages.txt:`` A list of installed packages with full + package filenames. + +.. note:: + + Installed package information is able to be gathered and produced + even if package management is disabled for the final image. + +Here is an example of ``image-info.txt``: +:: + + DISTRO = poky + DISTRO_VERSION = 1.7 + USER_CLASSES = buildstats image-mklibs image-prelink + IMAGE_CLASSES = image_types + IMAGE_FEATURES = debug-tweaks + IMAGE_LINGUAS = + IMAGE_INSTALL = packagegroup-core-boot run-postinsts + BAD_RECOMMENDATIONS = + NO_RECOMMENDATIONS = + PACKAGE_EXCLUDE = + ROOTFS_POSTPROCESS_COMMAND = write_package_manifest; license_create_manifest; \ + write_image_manifest ; buildhistory_list_installed_image ; \ + buildhistory_get_image_installed ; ssh_allow_empty_password; \ + postinst_enable_logging; rootfs_update_timestamp ; ssh_disable_dns_lookup ; + IMAGE_POSTPROCESS_COMMAND = buildhistory_get_imageinfo ; + IMAGESIZE = 6900 + +Other than ``IMAGESIZE``, +which is the total size of the files in the image in Kbytes, the +name-value pairs are variables that may have influenced the content of +the image. This information is often useful when you are trying to +determine why a change in the package or file listings has occurred. + +Using Build History to Gather Image Information Only +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As you can see, build history produces image information, including +dependency graphs, so you can see why something was pulled into the +image. If you are just interested in this information and not interested +in collecting specific package or SDK information, you can enable +writing only image information without any history by adding the +following to your ``conf/local.conf`` file found in the +:term:`Build Directory`: +:: + + INHERIT += "buildhistory" + BUILDHISTORY_COMMIT = "0" + BUILDHISTORY_FEATURES = "image" + +Here, you set the +:term:`BUILDHISTORY_FEATURES` +variable to use the image feature only. + +Build History SDK Information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Build history collects similar information on the contents of SDKs (e.g. +``bitbake -c populate_sdk imagename``) as compared to information it +collects for images. Furthermore, this information differs depending on +whether an extensible or standard SDK is being produced. + +The following list shows the files produced for SDKs: + +- ``files-in-sdk.txt:`` A list of files in the SDK with permissions, + owner, group, size, and symlink information. This list includes both + the host and target parts of the SDK. + +- ``sdk-info.txt:`` A text file containing name-value pairs with + information about the SDK. See the following listing example for more + information. + +- ``sstate-task-sizes.txt:`` A text file containing name-value pairs + with information about task group sizes (e.g. ``do_populate_sysroot`` + tasks have a total size). The ``sstate-task-sizes.txt`` file exists + only when an extensible SDK is created. + +- ``sstate-package-sizes.txt:`` A text file containing name-value pairs + with information for the shared-state packages and sizes in the SDK. + The ``sstate-package-sizes.txt`` file exists only when an extensible + SDK is created. + +- ``sdk-files:`` A folder that contains copies of the files mentioned + in ``BUILDHISTORY_SDK_FILES`` if the files are present in the output. + Additionally, the default value of ``BUILDHISTORY_SDK_FILES`` is + specific to the extensible SDK although you can set it differently if + you would like to pull in specific files from the standard SDK. + + The default files are ``conf/local.conf``, ``conf/bblayers.conf``, + ``conf/auto.conf``, ``conf/locked-sigs.inc``, and + ``conf/devtool.conf``. Thus, for an extensible SDK, these files get + copied into the ``sdk-files`` directory. + +- The following information appears under each of the ``host`` and + ``target`` directories for the portions of the SDK that run on the + host and on the target, respectively: + + .. note:: + + The following files for the most part are empty when producing an + extensible SDK because this type of SDK is not constructed from + packages as is the standard SDK. + + - ``depends.dot:`` Dependency graph for the SDK that is compatible + with ``graphviz``. + + - ``installed-package-names.txt:`` A list of installed packages by + name only. + + - ``installed-package-sizes.txt:`` A list of installed packages + ordered by size. + + - ``installed-packages.txt:`` A list of installed packages with full + package filenames. + +Here is an example of ``sdk-info.txt``: +:: + + DISTRO = poky + DISTRO_VERSION = 1.3+snapshot-20130327 + SDK_NAME = poky-glibc-i686-arm + SDK_VERSION = 1.3+snapshot + SDKMACHINE = + SDKIMAGE_FEATURES = dev-pkgs dbg-pkgs + BAD_RECOMMENDATIONS = + SDKSIZE = 352712 + +Other than ``SDKSIZE``, which is +the total size of the files in the SDK in Kbytes, the name-value pairs +are variables that might have influenced the content of the SDK. This +information is often useful when you are trying to determine why a +change in the package or file listings has occurred. + +Examining Build History Information +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can examine build history output from the command line or from a web +interface. + +To see any changes that have occurred (assuming you have +:term:`BUILDHISTORY_COMMIT` = "1"), +you can simply use any Git command that allows you to view the history +of a repository. Here is one method: +:: + + $ git log -p + +You need to realize, +however, that this method does show changes that are not significant +(e.g. a package's size changing by a few bytes). + +A command-line tool called ``buildhistory-diff`` does exist, though, +that queries the Git repository and prints just the differences that +might be significant in human-readable form. Here is an example: +:: + + $ ~/poky/poky/scripts/buildhistory-diff . HEAD^ + Changes to images/qemux86_64/glibc/core-image-minimal (files-in-image.txt): + /etc/anotherpkg.conf was added + /sbin/anotherpkg was added + * (installed-package-names.txt): + * anotherpkg was added + Changes to images/qemux86_64/glibc/core-image-minimal (installed-package-names.txt): + anotherpkg was added + packages/qemux86_64-poky-linux/v86d: PACKAGES: added "v86d-extras" + * PR changed from "r0" to "r1" + * PV changed from "0.1.10" to "0.1.12" + packages/qemux86_64-poky-linux/v86d/v86d: PKGSIZE changed from 110579 to 144381 (+30%) + * PR changed from "r0" to "r1" + * PV changed from "0.1.10" to "0.1.12" + +.. note:: + + The + buildhistory-diff + tool requires the + GitPython + package. Be sure to install it using Pip3 as follows: + :: + + $ pip3 install GitPython --user + + + Alternatively, you can install + python3-git + using the appropriate distribution package manager (e.g. + apt-get + , + dnf + , or + zipper + ). + +To see changes to the build history using a web interface, follow the +instruction in the ``README`` file here. +http://git.yoctoproject.org/cgit/cgit.cgi/buildhistory-web/. + +Here is a sample screenshot of the interface: + +.. image:: figures/buildhistory-web.png + :align: center + +Performing Automated Runtime Testing +==================================== + +The OpenEmbedded build system makes available a series of automated +tests for images to verify runtime functionality. You can run these +tests on either QEMU or actual target hardware. Tests are written in +Python making use of the ``unittest`` module, and the majority of them +run commands on the target system over SSH. This section describes how +you set up the environment to use these tests, run available tests, and +write and add your own tests. + +For information on the test and QA infrastructure available within the +Yocto Project, see the ":ref:`ref-manual/ref-release-process:testing and quality assurance`" +section in the Yocto Project Reference Manual. + +Enabling Tests +-------------- + +Depending on whether you are planning to run tests using QEMU or on the +hardware, you have to take different steps to enable the tests. See the +following subsections for information on how to enable both types of +tests. + +.. _qemu-image-enabling-tests: + +Enabling Runtime Tests on QEMU +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In order to run tests, you need to do the following: + +- *Set up to avoid interaction with sudo for networking:* To + accomplish this, you must do one of the following: + + - Add ``NOPASSWD`` for your user in ``/etc/sudoers`` either for all + commands or just for ``runqemu-ifup``. You must provide the full + path as that can change if you are using multiple clones of the + source repository. + + .. note:: + + On some distributions, you also need to comment out "Defaults + requiretty" in + /etc/sudoers + . + + - Manually configure a tap interface for your system. + + - Run as root the script in ``scripts/runqemu-gen-tapdevs``, which + should generate a list of tap devices. This is the option + typically chosen for Autobuilder-type environments. + + .. note:: + + - Be sure to use an absolute path when calling this script + with sudo. + + - The package recipe ``qemu-helper-native`` is required to run + this script. Build the package using the following command: + $ bitbake qemu-helper-native + +- *Set the DISPLAY variable:* You need to set this variable so that + you have an X server available (e.g. start ``vncserver`` for a + headless machine). + +- *Be sure your host's firewall accepts incoming connections from + 192.168.7.0/24:* Some of the tests (in particular DNF tests) start an + HTTP server on a random high number port, which is used to serve + files to the target. The DNF module serves + ``${WORKDIR}/oe-rootfs-repo`` so it can run DNF channel commands. + That means your host's firewall must accept incoming connections from + 192.168.7.0/24, which is the default IP range used for tap devices by + ``runqemu``. + +- *Be sure your host has the correct packages installed:* Depending + your host's distribution, you need to have the following packages + installed: + + - Ubuntu and Debian: ``sysstat`` and ``iproute2`` + + - OpenSUSE: ``sysstat`` and ``iproute2`` + + - Fedora: ``sysstat`` and ``iproute`` + + - CentOS: ``sysstat`` and ``iproute`` + +Once you start running the tests, the following happens: + +1. A copy of the root filesystem is written to ``${WORKDIR}/testimage``. + +2. The image is booted under QEMU using the standard ``runqemu`` script. + +3. A default timeout of 500 seconds occurs to allow for the boot process + to reach the login prompt. You can change the timeout period by + setting + :term:`TEST_QEMUBOOT_TIMEOUT` + in the ``local.conf`` file. + +4. Once the boot process is reached and the login prompt appears, the + tests run. The full boot log is written to + ``${WORKDIR}/testimage/qemu_boot_log``. + +5. Each test module loads in the order found in ``TEST_SUITES``. You can + find the full output of the commands run over SSH in + ``${WORKDIR}/testimgage/ssh_target_log``. + +6. If no failures occur, the task running the tests ends successfully. + You can find the output from the ``unittest`` in the task log at + ``${WORKDIR}/temp/log.do_testimage``. + +.. _hardware-image-enabling-tests: + +Enabling Runtime Tests on Hardware +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The OpenEmbedded build system can run tests on real hardware, and for +certain devices it can also deploy the image to be tested onto the +device beforehand. + +For automated deployment, a "master image" is installed onto the +hardware once as part of setup. Then, each time tests are to be run, the +following occurs: + +1. The master image is booted into and used to write the image to be + tested to a second partition. + +2. The device is then rebooted using an external script that you need to + provide. + +3. The device boots into the image to be tested. + +When running tests (independent of whether the image has been deployed +automatically or not), the device is expected to be connected to a +network on a pre-determined IP address. You can either use static IP +addresses written into the image, or set the image to use DHCP and have +your DHCP server on the test network assign a known IP address based on +the MAC address of the device. + +In order to run tests on hardware, you need to set ``TEST_TARGET`` to an +appropriate value. For QEMU, you do not have to change anything, the +default value is "qemu". For running tests on hardware, the following +options exist: + +- *"simpleremote":* Choose "simpleremote" if you are going to run tests + on a target system that is already running the image to be tested and + is available on the network. You can use "simpleremote" in + conjunction with either real hardware or an image running within a + separately started QEMU or any other virtual machine manager. + +- *"SystemdbootTarget":* Choose "SystemdbootTarget" if your hardware is + an EFI-based machine with ``systemd-boot`` as bootloader and + ``core-image-testmaster`` (or something similar) is installed. Also, + your hardware under test must be in a DHCP-enabled network that gives + it the same IP address for each reboot. + + If you choose "SystemdbootTarget", there are additional requirements + and considerations. See the "`Selecting + SystemdbootTarget <#selecting-systemdboottarget>`__" section, which + follows, for more information. + +- *"BeagleBoneTarget":* Choose "BeagleBoneTarget" if you are deploying + images and running tests on the BeagleBone "Black" or original + "White" hardware. For information on how to use these tests, see the + comments at the top of the BeagleBoneTarget + ``meta-yocto-bsp/lib/oeqa/controllers/beaglebonetarget.py`` file. + +- *"EdgeRouterTarget":* Choose "EdgeRouterTarget" is you are deploying + images and running tests on the Ubiquiti Networks EdgeRouter Lite. + For information on how to use these tests, see the comments at the + top of the EdgeRouterTarget + ``meta-yocto-bsp/lib/oeqa/controllers/edgeroutertarget.py`` file. + +- *"GrubTarget":* Choose the "supports deploying images and running + tests on any generic PC that boots using GRUB. For information on how + to use these tests, see the comments at the top of the GrubTarget + ``meta-yocto-bsp/lib/oeqa/controllers/grubtarget.py`` file. + +- *"your-target":* Create your own custom target if you want to run + tests when you are deploying images and running tests on a custom + machine within your BSP layer. To do this, you need to add a Python + unit that defines the target class under ``lib/oeqa/controllers/`` + within your layer. You must also provide an empty ``__init__.py``. + For examples, see files in ``meta-yocto-bsp/lib/oeqa/controllers/``. + +Selecting SystemdbootTarget +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you did not set ``TEST_TARGET`` to "SystemdbootTarget", then you do +not need any information in this section. You can skip down to the +"`Running Tests <#qemu-image-running-tests>`__" section. + +If you did set ``TEST_TARGET`` to "SystemdbootTarget", you also need to +perform a one-time setup of your master image by doing the following: + +1. *Set EFI_PROVIDER:* Be sure that ``EFI_PROVIDER`` is as follows: + :: + + EFI_PROVIDER = "systemd-boot" + +2. *Build the master image:* Build the ``core-image-testmaster`` image. + The ``core-image-testmaster`` recipe is provided as an example for a + "master" image and you can customize the image recipe as you would + any other recipe. + + Here are the image recipe requirements: + + - Inherits ``core-image`` so that kernel modules are installed. + + - Installs normal linux utilities not busybox ones (e.g. ``bash``, + ``coreutils``, ``tar``, ``gzip``, and ``kmod``). + + - Uses a custom Initial RAM Disk (initramfs) image with a custom + installer. A normal image that you can install usually creates a + single rootfs partition. This image uses another installer that + creates a specific partition layout. Not all Board Support + Packages (BSPs) can use an installer. For such cases, you need to + manually create the following partition layout on the target: + + - First partition mounted under ``/boot``, labeled "boot". + + - The main rootfs partition where this image gets installed, + which is mounted under ``/``. + + - Another partition labeled "testrootfs" where test images get + deployed. + +3. *Install image:* Install the image that you just built on the target + system. + +The final thing you need to do when setting ``TEST_TARGET`` to +"SystemdbootTarget" is to set up the test image: + +1. *Set up your local.conf file:* Make sure you have the following + statements in your ``local.conf`` file: + :: + + IMAGE_FSTYPES += "tar.gz" + INHERIT += "testimage" + TEST_TARGET = "SystemdbootTarget" + TEST_TARGET_IP = "192.168.2.3" + +2. *Build your test image:* Use BitBake to build the image: + :: + + $ bitbake core-image-sato + +Power Control +~~~~~~~~~~~~~ + +For most hardware targets other than "simpleremote", you can control +power: + +- You can use ``TEST_POWERCONTROL_CMD`` together with + ``TEST_POWERCONTROL_EXTRA_ARGS`` as a command that runs on the host + and does power cycling. The test code passes one argument to that + command: off, on or cycle (off then on). Here is an example that + could appear in your ``local.conf`` file: + :: + + TEST_POWERCONTROL_CMD = "powercontrol.exp test 10.11.12.1 nuc1" + + In this example, the expect + script does the following: + :: + + ssh test@10.11.12.1 "pyctl nuc1 arg" + + It then runs a Python script that controls power for a label called + ``nuc1``. + + .. note:: + + You need to customize + TEST_POWERCONTROL_CMD + and + TEST_POWERCONTROL_EXTRA_ARGS + for your own setup. The one requirement is that it accepts "on", + "off", and "cycle" as the last argument. + +- When no command is defined, it connects to the device over SSH and + uses the classic reboot command to reboot the device. Classic reboot + is fine as long as the machine actually reboots (i.e. the SSH test + has not failed). It is useful for scenarios where you have a simple + setup, typically with a single board, and where some manual + interaction is okay from time to time. + +If you have no hardware to automatically perform power control but still +wish to experiment with automated hardware testing, you can use the +dialog-power-control script that shows a dialog prompting you to perform +the required power action. This script requires either KDialog or Zenity +to be installed. To use this script, set the +:term:`TEST_POWERCONTROL_CMD` +variable as follows: +:: + + TEST_POWERCONTROL_CMD = "${COREBASE}/scripts/contrib/dialog-power-control" + +Serial Console Connection +~~~~~~~~~~~~~~~~~~~~~~~~~ + +For test target classes requiring a serial console to interact with the +bootloader (e.g. BeagleBoneTarget, EdgeRouterTarget, and GrubTarget), +you need to specify a command to use to connect to the serial console of +the target machine by using the +:term:`TEST_SERIALCONTROL_CMD` +variable and optionally the +:term:`TEST_SERIALCONTROL_EXTRA_ARGS` +variable. + +These cases could be a serial terminal program if the machine is +connected to a local serial port, or a ``telnet`` or ``ssh`` command +connecting to a remote console server. Regardless of the case, the +command simply needs to connect to the serial console and forward that +connection to standard input and output as any normal terminal program +does. For example, to use the picocom terminal program on serial device +``/dev/ttyUSB0`` at 115200bps, you would set the variable as follows: +:: + + TEST_SERIALCONTROL_CMD = "picocom /dev/ttyUSB0 -b 115200" + +For local +devices where the serial port device disappears when the device reboots, +an additional "serdevtry" wrapper script is provided. To use this +wrapper, simply prefix the terminal command with +``${COREBASE}/scripts/contrib/serdevtry``: +:: + + TEST_SERIALCONTROL_CMD = "${COREBASE}/scripts/contrib/serdevtry picocom -b 115200 /dev/ttyUSB0" + +.. _qemu-image-running-tests: + +Running Tests +------------- + +You can start the tests automatically or manually: + +- *Automatically running tests:* To run the tests automatically after + the OpenEmbedded build system successfully creates an image, first + set the + :term:`TESTIMAGE_AUTO` + variable to "1" in your ``local.conf`` file in the + :term:`Build Directory`: + :: + + TESTIMAGE_AUTO = "1" + + Next, build your image. If the image successfully builds, the + tests run: + :: + + bitbake core-image-sato + +- *Manually running tests:* To manually run the tests, first globally + inherit the + :ref:`testimage ` class + by editing your ``local.conf`` file: + :: + + INHERIT += "testimage" + + Next, use BitBake to run the tests: + :: + + bitbake -c testimage image + +All test files reside in ``meta/lib/oeqa/runtime`` in the +:term:`Source Directory`. A test name maps +directly to a Python module. Each test module may contain a number of +individual tests. Tests are usually grouped together by the area tested +(e.g tests for systemd reside in ``meta/lib/oeqa/runtime/systemd.py``). + +You can add tests to any layer provided you place them in the proper +area and you extend :term:`BBPATH` in +the ``local.conf`` file as normal. Be sure that tests reside in +``layer/lib/oeqa/runtime``. + +.. note:: + + Be sure that module names do not collide with module names used in + the default set of test modules in + meta/lib/oeqa/runtime + . + +You can change the set of tests run by appending or overriding +:term:`TEST_SUITES` variable in +``local.conf``. Each name in ``TEST_SUITES`` represents a required test +for the image. Test modules named within ``TEST_SUITES`` cannot be +skipped even if a test is not suitable for an image (e.g. running the +RPM tests on an image without ``rpm``). Appending "auto" to +``TEST_SUITES`` causes the build system to try to run all tests that are +suitable for the image (i.e. each test module may elect to skip itself). + +The order you list tests in ``TEST_SUITES`` is important and influences +test dependencies. Consequently, tests that depend on other tests should +be added after the test on which they depend. For example, since the +``ssh`` test depends on the ``ping`` test, "ssh" needs to come after +"ping" in the list. The test class provides no re-ordering or dependency +handling. + +.. note:: + + Each module can have multiple classes with multiple test methods. + And, Python + unittest + rules apply. + +Here are some things to keep in mind when running tests: + +- The default tests for the image are defined as: + :: + + DEFAULT_TEST_SUITES_pn-image = "ping ssh df connman syslog xorg scp vnc date rpm dnf dmesg" + +- Add your own test to the list of the by using the following: + :: + + TEST_SUITES_append = " mytest" + +- Run a specific list of tests as follows: TEST_SUITES = "test1 test2 + test3" Remember, order is important. Be sure to place a test that is + dependent on another test later in the order. + +Exporting Tests +--------------- + +You can export tests so that they can run independently of the build +system. Exporting tests is required if you want to be able to hand the +test execution off to a scheduler. You can only export tests that are +defined in :term:`TEST_SUITES`. + +If your image is already built, make sure the following are set in your +``local.conf`` file: +:: + + INHERIT +="testexport" + TEST_TARGET_IP = "IP-address-for-the-test-target" + TEST_SERVER_IP = "IP-address-for-the-test-server" + +You can then export the tests with the +following BitBake command form: +:: + + $ bitbake image -c testexport + +Exporting the tests places them in the +:term:`Build Directory` in +``tmp/testexport/``\ image, which is controlled by the +``TEST_EXPORT_DIR`` variable. + +You can now run the tests outside of the build environment: +:: + + $ cd tmp/testexport/image + $ ./runexported.py testdata.json + +Here is a complete example that shows IP addresses and uses the +``core-image-sato`` image: +:: + + INHERIT +="testexport" + TEST_TARGET_IP = "192.168.7.2" + TEST_SERVER_IP = "192.168.7.1" + +Use BitBake to export the tests: +:: + + $ bitbake core-image-sato -c testexport + +Run the tests outside of +the build environment using the following: +:: + + $ cd tmp/testexport/core-image-sato + $ ./runexported.py testdata.json + +.. _qemu-image-writing-new-tests: + +Writing New Tests +----------------- + +As mentioned previously, all new test files need to be in the proper +place for the build system to find them. New tests for additional +functionality outside of the core should be added to the layer that adds +the functionality, in ``layer/lib/oeqa/runtime`` (as long as +:term:`BBPATH` is extended in the +layer's ``layer.conf`` file as normal). Just remember the following: + +- Filenames need to map directly to test (module) names. + +- Do not use module names that collide with existing core tests. + +- Minimally, an empty ``__init__.py`` file must exist in the runtime + directory. + +To create a new test, start by copying an existing module (e.g. +``syslog.py`` or ``gcc.py`` are good ones to use). Test modules can use +code from ``meta/lib/oeqa/utils``, which are helper classes. + +.. note:: + + Structure shell commands such that you rely on them and they return a + single code for success. Be aware that sometimes you will need to + parse the output. See the + df.py + and + date.py + modules for examples. + +You will notice that all test classes inherit ``oeRuntimeTest``, which +is found in ``meta/lib/oetest.py``. This base class offers some helper +attributes, which are described in the following sections: + +.. _qemu-image-writing-tests-class-methods: + +Class Methods +~~~~~~~~~~~~~ + +Class methods are as follows: + +- *hasPackage(pkg):* Returns "True" if ``pkg`` is in the installed + package list of the image, which is based on the manifest file that + is generated during the ``do_rootfs`` task. + +- *hasFeature(feature):* Returns "True" if the feature is in + :term:`IMAGE_FEATURES` or + :term:`DISTRO_FEATURES`. + +.. _qemu-image-writing-tests-class-attributes: + +Class Attributes +~~~~~~~~~~~~~~~~ + +Class attributes are as follows: + +- *pscmd:* Equals "ps -ef" if ``procps`` is installed in the image. + Otherwise, ``pscmd`` equals "ps" (busybox). + +- *tc:* The called test context, which gives access to the + following attributes: + + - *d:* The BitBake datastore, which allows you to use stuff such + as ``oeRuntimeTest.tc.d.getVar("VIRTUAL-RUNTIME_init_manager")``. + + - *testslist and testsrequired:* Used internally. The tests + do not need these. + + - *filesdir:* The absolute path to + ``meta/lib/oeqa/runtime/files``, which contains helper files for + tests meant for copying on the target such as small files written + in C for compilation. + + - *target:* The target controller object used to deploy and + start an image on a particular target (e.g. Qemu, SimpleRemote, + and SystemdbootTarget). Tests usually use the following: + + - *ip:* The target's IP address. + + - *server_ip:* The host's IP address, which is usually used + by the DNF test suite. + + - *run(cmd, timeout=None):* The single, most used method. + This command is a wrapper for: ``ssh root@host "cmd"``. The + command returns a tuple: (status, output), which are what their + names imply - the return code of "cmd" and whatever output it + produces. The optional timeout argument represents the number + of seconds the test should wait for "cmd" to return. If the + argument is "None", the test uses the default instance's + timeout period, which is 300 seconds. If the argument is "0", + the test runs until the command returns. + + - *copy_to(localpath, remotepath):* + ``scp localpath root@ip:remotepath``. + + - *copy_from(remotepath, localpath):* + ``scp root@host:remotepath localpath``. + +.. _qemu-image-writing-tests-instance-attributes: + +Instance Attributes +~~~~~~~~~~~~~~~~~~~ + +A single instance attribute exists, which is ``target``. The ``target`` +instance attribute is identical to the class attribute of the same name, +which is described in the previous section. This attribute exists as +both an instance and class attribute so tests can use +``self.target.run(cmd)`` in instance methods instead of +``oeRuntimeTest.tc.target.run(cmd)``. + +Installing Packages in the DUT Without the Package Manager +---------------------------------------------------------- + +When a test requires a package built by BitBake, it is possible to +install that package. Installing the package does not require a package +manager be installed in the device under test (DUT). It does, however, +require an SSH connection and the target must be using the +``sshcontrol`` class. + +.. note:: + + This method uses + scp + to copy files from the host to the target, which causes permissions + and special attributes to be lost. + +A JSON file is used to define the packages needed by a test. This file +must be in the same path as the file used to define the tests. +Furthermore, the filename must map directly to the test module name with +a ``.json`` extension. + +The JSON file must include an object with the test name as keys of an +object or an array. This object (or array of objects) uses the following +data: + +- "pkg" - A mandatory string that is the name of the package to be + installed. + +- "rm" - An optional boolean, which defaults to "false", that specifies + to remove the package after the test. + +- "extract" - An optional boolean, which defaults to "false", that + specifies if the package must be extracted from the package format. + When set to "true", the package is not automatically installed into + the DUT. + +Following is an example JSON file that handles test "foo" installing +package "bar" and test "foobar" installing packages "foo" and "bar". +Once the test is complete, the packages are removed from the DUT. +:: + + { + "foo": { + "pkg": "bar" + }, + "foobar": [ + { + "pkg": "foo", + "rm": true + }, + { + "pkg": "bar", + "rm": true + } + ] + } + +.. _usingpoky-debugging-tools-and-techniques: + +Debugging Tools and Techniques +============================== + +The exact method for debugging build failures depends on the nature of +the problem and on the system's area from which the bug originates. +Standard debugging practices such as comparison against the last known +working version with examination of the changes and the re-application +of steps to identify the one causing the problem are valid for the Yocto +Project just as they are for any other system. Even though it is +impossible to detail every possible potential failure, this section +provides some general tips to aid in debugging given a variety of +situations. + +.. note:: + + A useful feature for debugging is the error reporting tool. + Configuring the Yocto Project to use this tool causes the + OpenEmbedded build system to produce error reporting commands as part + of the console output. You can enter the commands after the build + completes to log error information into a common database, that can + help you figure out what might be going wrong. For information on how + to enable and use this feature, see the " + Using the Error Reporting Tool + " section. + +The following list shows the debugging topics in the remainder of this +section: + +- "`Viewing Logs from Failed + Tasks <#dev-debugging-viewing-logs-from-failed-tasks>`__" describes + how to find and view logs from tasks that failed during the build + process. + +- "`Viewing Variable + Values <#dev-debugging-viewing-variable-values>`__" describes how to + use the BitBake ``-e`` option to examine variable values after a + recipe has been parsed. + +- ":ref:`dev-manual/dev-manual-common-tasks:viewing package information with \`\`oe-pkgdata-util\`\``" + describes how to use the ``oe-pkgdata-util`` utility to query + :term:`PKGDATA_DIR` and + display package-related information for built packages. + +- "`Viewing Dependencies Between Recipes and + Tasks <#dev-viewing-dependencies-between-recipes-and-tasks>`__" + describes how to use the BitBake ``-g`` option to display recipe + dependency information used during the build. + +- "`Viewing Task Variable + Dependencies <#dev-viewing-task-variable-dependencies>`__" describes + how to use the ``bitbake-dumpsig`` command in conjunction with key + subdirectories in the + :term:`Build Directory` to determine + variable dependencies. + +- "`Running Specific Tasks <#dev-debugging-taskrunning>`__" describes + how to use several BitBake options (e.g. ``-c``, ``-C``, and ``-f``) + to run specific tasks in the build chain. It can be useful to run + tasks "out-of-order" when trying isolate build issues. + +- "`General BitBake Problems <#dev-debugging-bitbake>`__" describes how + to use BitBake's ``-D`` debug output option to reveal more about what + BitBake is doing during the build. + +- "`Building with No Dependencies <#dev-debugging-buildfile>`__" + describes how to use the BitBake ``-b`` option to build a recipe + while ignoring dependencies. + +- "`Recipe Logging Mechanisms <#recipe-logging-mechanisms>`__" + describes how to use the many recipe logging functions to produce + debugging output and report errors and warnings. + +- "`Debugging Parallel Make Races <#debugging-parallel-make-races>`__" + describes how to debug situations where the build consists of several + parts that are run simultaneously and when the output or result of + one part is not ready for use with a different part of the build that + depends on that output. + +- "`Debugging With the GNU Project Debugger (GDB) + Remotely <#platdev-gdb-remotedebug>`__" describes how to use GDB to + allow you to examine running programs, which can help you fix + problems. + +- "`Debugging with the GNU Project Debugger (GDB) on the + Target <#debugging-with-the-gnu-project-debugger-gdb-on-the-target>`__" + describes how to use GDB directly on target hardware for debugging. + +- "`Other Debugging Tips <#dev-other-debugging-others>`__" describes + miscellaneous debugging tips that can be useful. + +.. _dev-debugging-viewing-logs-from-failed-tasks: + +Viewing Logs from Failed Tasks +------------------------------ + +You can find the log for a task in the file +``${``\ :term:`WORKDIR`\ ``}/temp/log.do_``\ taskname. +For example, the log for the +:ref:`ref-tasks-compile` task of the +QEMU minimal image for the x86 machine (``qemux86``) might be in +``tmp/work/qemux86-poky-linux/core-image-minimal/1.0-r0/temp/log.do_compile``. +To see the commands :term:`BitBake` ran +to generate a log, look at the corresponding ``run.do_``\ taskname file +in the same directory. + +``log.do_``\ taskname and ``run.do_``\ taskname are actually symbolic +links to ``log.do_``\ taskname\ ``.``\ pid and +``log.run_``\ taskname\ ``.``\ pid, where pid is the PID the task had +when it ran. The symlinks always point to the files corresponding to the +most recent run. + +.. _dev-debugging-viewing-variable-values: + +Viewing Variable Values +----------------------- + +Sometimes you need to know the value of a variable as a result of +BitBake's parsing step. This could be because some unexpected behavior +occurred in your project. Perhaps an attempt to :ref:`modify a variable +` did not work out as expected. + +BitBake's ``-e`` option is used to display variable values after +parsing. The following command displays the variable values after the +configuration files (i.e. ``local.conf``, ``bblayers.conf``, +``bitbake.conf`` and so forth) have been parsed: +:: + + $ bitbake -e + +The following command displays variable values after a specific recipe has +been parsed. The variables include those from the configuration as well: +:: + + $ bitbake -e recipename + +.. note:: + + Each recipe has its own private set of variables (datastore). + Internally, after parsing the configuration, a copy of the resulting + datastore is made prior to parsing each recipe. This copying implies + that variables set in one recipe will not be visible to other + recipes. + + Likewise, each task within a recipe gets a private datastore based on + the recipe datastore, which means that variables set within one task + will not be visible to other tasks. + +In the output of ``bitbake -e``, each variable is preceded by a +description of how the variable got its value, including temporary +values that were later overriden. This description also includes +variable flags (varflags) set on the variable. The output can be very +helpful during debugging. + +Variables that are exported to the environment are preceded by +``export`` in the output of ``bitbake -e``. See the following example: +:: + + export CC="i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/ulf/poky/build/tmp/sysroots/qemux86" + +In addition to variable values, the output of the ``bitbake -e`` and +``bitbake -e`` recipe commands includes the following information: + +- The output starts with a tree listing all configuration files and + classes included globally, recursively listing the files they include + or inherit in turn. Much of the behavior of the OpenEmbedded build + system (including the behavior of the :ref:`ref-manual/ref-tasks:normal recipe build tasks`) is + implemented in the + :ref:`base ` class and the + classes it inherits, rather than being built into BitBake itself. + +- After the variable values, all functions appear in the output. For + shell functions, variables referenced within the function body are + expanded. If a function has been modified using overrides or using + override-style operators like ``_append`` and ``_prepend``, then the + final assembled function body appears in the output. + +Viewing Package Information with ``oe-pkgdata-util`` +---------------------------------------------------- + +You can use the ``oe-pkgdata-util`` command-line utility to query +:term:`PKGDATA_DIR` and display +various package-related information. When you use the utility, you must +use it to view information on packages that have already been built. + +Following are a few of the available ``oe-pkgdata-util`` subcommands. + +.. note:: + + You can use the standard \* and ? globbing wildcards as part of + package names and paths. + +- ``oe-pkgdata-util list-pkgs [pattern]``: Lists all packages + that have been built, optionally limiting the match to packages that + match pattern. + +- ``oe-pkgdata-util list-pkg-files package ...``: Lists the + files and directories contained in the given packages. + + .. note:: + + A different way to view the contents of a package is to look at + the + ``${``\ :term:`WORKDIR`\ ``}/packages-split`` + directory of the recipe that generates the package. This directory + is created by the + :ref:`ref-tasks-package` task + and has one subdirectory for each package the recipe generates, + which contains the files stored in that package. + + If you want to inspect the ``${WORKDIR}/packages-split`` + directory, make sure that + :ref:`rm_work ` is not + enabled when you build the recipe. + +- ``oe-pkgdata-util find-path path ...``: Lists the names of + the packages that contain the given paths. For example, the following + tells us that ``/usr/share/man/man1/make.1`` is contained in the + ``make-doc`` package: + :: + + $ oe-pkgdata-util find-path /usr/share/man/man1/make.1 make-doc: /usr/share/man/man1/make.1 + +- ``oe-pkgdata-util lookup-recipe package ...``: Lists the name + of the recipes that produce the given packages. + +For more information on the ``oe-pkgdata-util`` command, use the help +facility: +:: + + $ oe-pkgdata-util DASHDASHhelp + $ oe-pkgdata-util subcommand --help + +.. _dev-viewing-dependencies-between-recipes-and-tasks: + +Viewing Dependencies Between Recipes and Tasks +---------------------------------------------- + +Sometimes it can be hard to see why BitBake wants to build other recipes +before the one you have specified. Dependency information can help you +understand why a recipe is built. + +To generate dependency information for a recipe, run the following +command: +:: + + $ bitbake -g recipename + +This command writes the following files in the current directory: + +- ``pn-buildlist``: A list of recipes/targets involved in building + recipename. "Involved" here means that at least one task from the + recipe needs to run when building recipename from scratch. Targets + that are in + :term:`ASSUME_PROVIDED` + are not listed. + +- ``task-depends.dot``: A graph showing dependencies between tasks. + +The graphs are in +`DOT `__ +format and can be converted to images (e.g. using the ``dot`` tool from +`Graphviz `__). + +.. note:: + + - DOT files use a plain text format. The graphs generated using the + ``bitbake -g`` command are often so large as to be difficult to + read without special pruning (e.g. with Bitbake's ``-I`` option) + and processing. Despite the form and size of the graphs, the + corresponding ``.dot`` files can still be possible to read and + provide useful information. + + As an example, the ``task-depends.dot`` file contains lines such + as the following: + :: + + "libxslt.do_configure" -> "libxml2.do_populate_sysroot" + + The above example line reveals that the + :ref:`ref-tasks-configure` + task in ``libxslt`` depends on the + :ref:`ref-tasks-populate_sysroot` + task in ``libxml2``, which is a normal + :term:`DEPENDS` dependency + between the two recipes. + + - For an example of how ``.dot`` files can be processed, see the + ``scripts/contrib/graph-tool`` Python script, which finds and + displays paths between graph nodes. + +You can use a different method to view dependency information by using +the following command: +:: + + $ bitbake -g -u taskexp recipename + +This command +displays a GUI window from which you can view build-time and runtime +dependencies for the recipes involved in building recipename. + +.. _dev-viewing-task-variable-dependencies: + +Viewing Task Variable Dependencies +---------------------------------- + +As mentioned in the +":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-execution:checksums (signatures)`" section of the BitBake +User Manual, BitBake tries to automatically determine what variables a +task depends on so that it can rerun the task if any values of the +variables change. This determination is usually reliable. However, if +you do things like construct variable names at runtime, then you might +have to manually declare dependencies on those variables using +``vardeps`` as described in the +":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section of the BitBake +User Manual. + +If you are unsure whether a variable dependency is being picked up +automatically for a given task, you can list the variable dependencies +BitBake has determined by doing the following: + +1. Build the recipe containing the task: +:: + + $ bitbake recipename + +2. Inside the :term:`STAMPS_DIR` + directory, find the signature data (``sigdata``) file that + corresponds to the task. The ``sigdata`` files contain a pickled + Python database of all the metadata that went into creating the input + checksum for the task. As an example, for the + :ref:`ref-tasks-fetch` task of the + ``db`` recipe, the ``sigdata`` file might be found in the following + location: + :: + + ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1 + + For tasks that are accelerated through the shared state + (:ref:`sstate `) cache, an + additional ``siginfo`` file is written into + :term:`SSTATE_DIR` along with + the cached task output. The ``siginfo`` files contain exactly the + same information as ``sigdata`` files. + +3. Run ``bitbake-dumpsig`` on the ``sigdata`` or ``siginfo`` file. Here + is an example: + :: + + $ bitbake-dumpsig ${BUILDDIR}/tmp/stamps/i586-poky-linux/db/6.0.30-r1.do_fetch.sigdata.7c048c18222b16ff0bcee2000ef648b1 + + In the output of the above command, you will find a line like the + following, which lists all the (inferred) variable dependencies for + the task. This list also includes indirect dependencies from + variables depending on other variables, recursively. + :: + + Task dependencies: ['PV', 'SRCREV', 'SRC_URI', 'SRC_URI[md5sum]', 'SRC_URI[sha256sum]', 'base_do_fetch'] + + .. note:: + + Functions (e.g. + base_do_fetch + ) also count as variable dependencies. These functions in turn + depend on the variables they reference. + + The output of ``bitbake-dumpsig`` also includes the value each + variable had, a list of dependencies for each variable, and + :term:`bitbake:BB_HASHBASE_WHITELIST` + information. + +There is also a ``bitbake-diffsigs`` command for comparing two +``siginfo`` or ``sigdata`` files. This command can be helpful when +trying to figure out what changed between two versions of a task. If you +call ``bitbake-diffsigs`` with just one file, the command behaves like +``bitbake-dumpsig``. + +You can also use BitBake to dump out the signature construction +information without executing tasks by using either of the following +BitBake command-line options: +:: + + ‐‐dump-signatures=SIGNATURE_HANDLER + -S SIGNATURE_HANDLER + + +.. note:: + + Two common values for + SIGNATURE_HANDLER + are "none" and "printdiff", which dump only the signature or compare + the dumped signature with the cached one, respectively. + +Using BitBake with either of these options causes BitBake to dump out +``sigdata`` files in the ``stamps`` directory for every task it would +have executed instead of building the specified target package. + +.. _dev-viewing-metadata-used-to-create-the-input-signature-of-a-shared-state-task: + +Viewing Metadata Used to Create the Input Signature of a Shared State Task +-------------------------------------------------------------------------- + +Seeing what metadata went into creating the input signature of a shared +state (sstate) task can be a useful debugging aid. This information is +available in signature information (``siginfo``) files in +:term:`SSTATE_DIR`. For +information on how to view and interpret information in ``siginfo`` +files, see the "`Viewing Task Variable +Dependencies <#dev-viewing-task-variable-dependencies>`__" section. + +For conceptual information on shared state, see the +":ref:`overview-manual/overview-manual-concepts:shared state`" +section in the Yocto Project Overview and Concepts Manual. + +.. _dev-invalidating-shared-state-to-force-a-task-to-run: + +Invalidating Shared State to Force a Task to Run +------------------------------------------------ + +The OpenEmbedded build system uses +:ref:`checksums ` and +:ref:`overview-manual/overview-manual-concepts:shared state` cache to avoid unnecessarily +rebuilding tasks. Collectively, this scheme is known as "shared state +code." + +As with all schemes, this one has some drawbacks. It is possible that +you could make implicit changes to your code that the checksum +calculations do not take into account. These implicit changes affect a +task's output but do not trigger the shared state code into rebuilding a +recipe. Consider an example during which a tool changes its output. +Assume that the output of ``rpmdeps`` changes. The result of the change +should be that all the ``package`` and ``package_write_rpm`` shared +state cache items become invalid. However, because the change to the +output is external to the code and therefore implicit, the associated +shared state cache items do not become invalidated. In this case, the +build process uses the cached items rather than running the task again. +Obviously, these types of implicit changes can cause problems. + +To avoid these problems during the build, you need to understand the +effects of any changes you make. Realize that changes you make directly +to a function are automatically factored into the checksum calculation. +Thus, these explicit changes invalidate the associated area of shared +state cache. However, you need to be aware of any implicit changes that +are not obvious changes to the code and could affect the output of a +given task. + +When you identify an implicit change, you can easily take steps to +invalidate the cache and force the tasks to run. The steps you can take +are as simple as changing a function's comments in the source code. For +example, to invalidate package shared state files, change the comment +statements of +:ref:`ref-tasks-package` or the +comments of one of the functions it calls. Even though the change is +purely cosmetic, it causes the checksum to be recalculated and forces +the build system to run the task again. + +.. note:: + + For an example of a commit that makes a cosmetic change to invalidate + shared state, see this + commit + . + +.. _dev-debugging-taskrunning: + +Running Specific Tasks +---------------------- + +Any given recipe consists of a set of tasks. The standard BitBake +behavior in most cases is: ``do_fetch``, ``do_unpack``, ``do_patch``, +``do_configure``, ``do_compile``, ``do_install``, ``do_package``, +``do_package_write_*``, and ``do_build``. The default task is +``do_build`` and any tasks on which it depends build first. Some tasks, +such as ``do_devshell``, are not part of the default build chain. If you +wish to run a task that is not part of the default build chain, you can +use the ``-c`` option in BitBake. Here is an example: +:: + + $ bitbake matchbox-desktop -c devshell + +The ``-c`` option respects task dependencies, which means that all other +tasks (including tasks from other recipes) that the specified task +depends on will be run before the task. Even when you manually specify a +task to run with ``-c``, BitBake will only run the task if it considers +it "out of date". See the +":ref:`overview-manual/overview-manual-concepts:stamp files and the rerunning of tasks`" +section in the Yocto Project Overview and Concepts Manual for how +BitBake determines whether a task is "out of date". + +If you want to force an up-to-date task to be rerun (e.g. because you +made manual modifications to the recipe's +:term:`WORKDIR` that you want to try +out), then you can use the ``-f`` option. + +.. note:: + + The reason + -f + is never required when running the + do_devshell + task is because the + [ + nostamp + ] + variable flag is already set for the task. + +The following example shows one way you can use the ``-f`` option: +:: + + $ bitbake matchbox-desktop + . + . + make some changes to the source code in the work directory + . + . + $ bitbake matchbox-desktop -c compile -f + $ bitbake matchbox-desktop + +This sequence first builds and then recompiles ``matchbox-desktop``. The +last command reruns all tasks (basically the packaging tasks) after the +compile. BitBake recognizes that the ``do_compile`` task was rerun and +therefore understands that the other tasks also need to be run again. + +Another, shorter way to rerun a task and all +:ref:`ref-manual/ref-tasks:normal recipe build tasks` +that depend on it is to use the ``-C`` option. + +.. note:: + + This option is upper-cased and is separate from the + -c + option, which is lower-cased. + +Using this option invalidates the given task and then runs the +:ref:`ref-tasks-build` task, which is +the default task if no task is given, and the tasks on which it depends. +You could replace the final two commands in the previous example with +the following single command: +:: + + $ bitbake matchbox-desktop -C compile + +Internally, the ``-f`` and ``-C`` options work by tainting (modifying) +the input checksum of the specified task. This tainting indirectly +causes the task and its dependent tasks to be rerun through the normal +task dependency mechanisms. + +.. note:: + + BitBake explicitly keeps track of which tasks have been tainted in + this fashion, and will print warnings such as the following for + builds involving such tasks: + :: + + WARNING: /home/ulf/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.1.bb.do_compile is tainted from a forced run + + + The purpose of the warning is to let you know that the work directory + and build output might not be in the clean state they would be in for + a "normal" build, depending on what actions you took. To get rid of + such warnings, you can remove the work directory and rebuild the + recipe, as follows: + :: + + $ bitbake matchbox-desktop -c clean + $ bitbake matchbox-desktop + + +You can view a list of tasks in a given package by running the +``do_listtasks`` task as follows: +:: + + $ bitbake matchbox-desktop -c listtasks + +The results appear as output to the console and are also in +the file ``${WORKDIR}/temp/log.do_listtasks``. + +.. _dev-debugging-bitbake: + +General BitBake Problems +------------------------ + +You can see debug output from BitBake by using the ``-D`` option. The +debug output gives more information about what BitBake is doing and the +reason behind it. Each ``-D`` option you use increases the logging +level. The most common usage is ``-DDD``. + +The output from ``bitbake -DDD -v`` targetname can reveal why BitBake +chose a certain version of a package or why BitBake picked a certain +provider. This command could also help you in a situation where you +think BitBake did something unexpected. + +.. _dev-debugging-buildfile: + +Building with No Dependencies +----------------------------- + +To build a specific recipe (``.bb`` file), you can use the following +command form: +:: + + $ bitbake -b somepath/somerecipe.bb + +This command form does +not check for dependencies. Consequently, you should use it only when +you know existing dependencies have been met. + +.. note:: + + You can also specify fragments of the filename. In this case, BitBake + checks for a unique match. + +Recipe Logging Mechanisms +------------------------- + +The Yocto Project provides several logging functions for producing +debugging output and reporting errors and warnings. For Python +functions, the following logging functions exist. All of these functions +log to ``${T}/log.do_``\ task, and can also log to standard output +(stdout) with the right settings: + +- ``bb.plain(msg)``: Writes msg as is to the log while also + logging to stdout. + +- ``bb.note(msg)``: Writes "NOTE: msg" to the log. Also logs to + stdout if BitBake is called with "-v". + +- ``bb.debug(level, msg)``: Writes "DEBUG: msg" to the + log. Also logs to stdout if the log level is greater than or equal to + level. See the ":ref:`-D `" option + in the BitBake User Manual for more information. + +- ``bb.warn(msg)``: Writes "WARNING: msg" to the log while also + logging to stdout. + +- ``bb.error(msg)``: Writes "ERROR: msg" to the log while also + logging to standard out (stdout). + + .. note:: + + Calling this function does not cause the task to fail. + +- ``bb.fatal(``\ msg\ ``)``: This logging function is similar to + ``bb.error(``\ msg\ ``)`` but also causes the calling task to fail. + + .. note:: + + bb.fatal() + raises an exception, which means you do not need to put a "return" + statement after the function. + +The same logging functions are also available in shell functions, under +the names ``bbplain``, ``bbnote``, ``bbdebug``, ``bbwarn``, ``bberror``, +and ``bbfatal``. The +:ref:`logging ` class +implements these functions. See that class in the ``meta/classes`` +folder of the :term:`Source Directory` for information. + +Logging With Python +~~~~~~~~~~~~~~~~~~~ + +When creating recipes using Python and inserting code that handles build +logs, keep in mind the goal is to have informative logs while keeping +the console as "silent" as possible. Also, if you want status messages +in the log, use the "debug" loglevel. + +Following is an example written in Python. The code handles logging for +a function that determines the number of tasks needed to be run. See the +":ref:`ref-tasks-listtasks`" +section for additional information: +:: + + python do_listtasks() { + bb.debug(2, "Starting to figure out the task list") + if noteworthy_condition: + bb.note("There are 47 tasks to run") + bb.debug(2, "Got to point xyz") + if warning_trigger: + bb.warn("Detected warning_trigger, this might be a problem later.") + if recoverable_error: + bb.error("Hit recoverable_error, you really need to fix this!") + if fatal_error: + bb.fatal("fatal_error detected, unable to print the task list") + bb.plain("The tasks present are abc") + bb.debug(2, "Finished figuring out the tasklist") + } + +Logging With Bash +~~~~~~~~~~~~~~~~~ + +When creating recipes using Bash and inserting code that handles build +logs, you have the same goals - informative with minimal console output. +The syntax you use for recipes written in Bash is similar to that of +recipes written in Python described in the previous section. + +Following is an example written in Bash. The code logs the progress of +the ``do_my_function`` function. +:: + + do_my_function() { + bbdebug 2 "Running do_my_function" + if [ exceptional_condition ]; then + bbnote "Hit exceptional_condition" + fi + bbdebug 2 "Got to point xyz" + if [ warning_trigger ]; then + bbwarn "Detected warning_trigger, this might cause a problem later." + fi + if [ recoverable_error ]; then + bberror "Hit recoverable_error, correcting" + fi + if [ fatal_error ]; then + bbfatal "fatal_error detected" + fi + bbdebug 2 "Completed do_my_function" + } + + +Debugging Parallel Make Races +----------------------------- + +A parallel ``make`` race occurs when the build consists of several parts +that are run simultaneously and a situation occurs when the output or +result of one part is not ready for use with a different part of the +build that depends on that output. Parallel make races are annoying and +can sometimes be difficult to reproduce and fix. However, some simple +tips and tricks exist that can help you debug and fix them. This section +presents a real-world example of an error encountered on the Yocto +Project autobuilder and the process used to fix it. + +.. note:: + + If you cannot properly fix a + make + race condition, you can work around it by clearing either the + PARALLEL_MAKE + or + PARALLEL_MAKEINST + variables. + +The Failure +~~~~~~~~~~~ + +For this example, assume that you are building an image that depends on +the "neard" package. And, during the build, BitBake runs into problems +and creates the following output. + +.. note:: + + This example log file has longer lines artificially broken to make + the listing easier to read. + +If you examine the output or the log file, you see the failure during +``make``: +:: + + | DEBUG: SITE files ['endian-little', 'bit-32', 'ix86-common', 'common-linux', 'common-glibc', 'i586-linux', 'common'] + | DEBUG: Executing shell function do_compile + | NOTE: make -j 16 + | make --no-print-directory all-am + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/types.h include/near/types.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/log.h include/near/log.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/plugin.h include/near/plugin.h + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/tag.h include/near/tag.h + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/adapter.h include/near/adapter.h + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/ndef.h include/near/ndef.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/tlv.h include/near/tlv.h + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/setting.h include/near/setting.h + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | /bin/mkdir -p include/near + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/device.h include/near/device.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/nfc_copy.h include/near/nfc_copy.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/snep.h include/near/snep.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/version.h include/near/version.h + | ln -s /home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/work/i586-poky-linux/neard/ + 0.14-r0/neard-0.14/include/dbus.h include/near/dbus.h + | ./src/genbuiltin nfctype1 nfctype2 nfctype3 nfctype4 p2p > src/builtin.h + | i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/ + build/build/tmp/sysroots/qemux86 -DHAVE_CONFIG_H -I. -I./include -I./src -I./gdbus -I/home/pokybuild/ + yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/glib-2.0 + -I/home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/tmp/sysroots/qemux86/usr/ + lib/glib-2.0/include -I/home/pokybuild/yocto-autobuilder/yocto-slave/nightly-x86/build/build/ + tmp/sysroots/qemux86/usr/include/dbus-1.0 -I/home/pokybuild/yocto-autobuilder/yocto-slave/ + nightly-x86/build/build/tmp/sysroots/qemux86/usr/lib/dbus-1.0/include -I/home/pokybuild/yocto-autobuilder/ + yocto-slave/nightly-x86/build/build/tmp/sysroots/qemux86/usr/include/libnl3 + -DNEAR_PLUGIN_BUILTIN -DPLUGINDIR=\""/usr/lib/near/plugins"\" + -DCONFIGDIR=\""/etc/neard\"" -O2 -pipe -g -feliminate-unused-debug-types -c + -o tools/snep-send.o tools/snep-send.c + | In file included from tools/snep-send.c:16:0: + | tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory + | #include + | ^ + | compilation terminated. + | make[1]: *** [tools/snep-send.o] Error 1 + | make[1]: *** Waiting for unfinished jobs.... + | make: *** [all] Error 2 + | ERROR: oe_runmake failed + +Reproducing the Error +~~~~~~~~~~~~~~~~~~~~~ + +Because race conditions are intermittent, they do not manifest +themselves every time you do the build. In fact, most times the build +will complete without problems even though the potential race condition +exists. Thus, once the error surfaces, you need a way to reproduce it. + +In this example, compiling the "neard" package is causing the problem. +So the first thing to do is build "neard" locally. Before you start the +build, set the +:term:`PARALLEL_MAKE` variable +in your ``local.conf`` file to a high number (e.g. "-j 20"). Using a +high value for ``PARALLEL_MAKE`` increases the chances of the race +condition showing up: +:: + + $ bitbake neard + +Once the local build for "neard" completes, start a ``devshell`` build: +:: + + $ bitbake neard -c devshell + +For information on how to use a +``devshell``, see the "`Using a Development +Shell <#platdev-appdev-devshell>`__" section. + +In the ``devshell``, do the following: +:: + + $ make clean + $ make tools/snep-send.o + +The ``devshell`` commands cause the failure to clearly +be visible. In this case, a missing dependency exists for the "neard" +Makefile target. Here is some abbreviated, sample output with the +missing dependency clearly visible at the end: +:: + + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/home/scott-lenovo/...... + . + . + . + tools/snep-send.c + In file included from tools/snep-send.c:16:0: + tools/../src/near.h:41:23: fatal error: near/dbus.h: No such file or directory + #include + ^ + compilation terminated. + make: *** [tools/snep-send.o] Error 1 + $ + + +Creating a Patch for the Fix +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Because there is a missing dependency for the Makefile target, you need +to patch the ``Makefile.am`` file, which is generated from +``Makefile.in``. You can use Quilt to create the patch: +:: + + $ quilt new parallelmake.patch + Patch patches/parallelmake.patch is now on top + $ quilt add Makefile.am + File Makefile.am added to patch patches/parallelmake.patch + +For more information on using Quilt, see the +"`Using Quilt in Your Workflow <#using-a-quilt-workflow>`__" section. + +At this point you need to make the edits to ``Makefile.am`` to add the +missing dependency. For our example, you have to add the following line +to the file: +:: + + tools/snep-send.$(OBJEXT): include/near/dbus.h + +Once you have edited the file, use the ``refresh`` command to create the +patch: +:: + + $ quilt refresh + Refreshed patch patches/parallelmake.patch + +Once +the patch file exists, you need to add it back to the originating recipe +folder. Here is an example assuming a top-level +:term:`Source Directory` named ``poky``: +:: + + $ cp patches/parallelmake.patch poky/meta/recipes-connectivity/neard/neard + +The final thing you need to do to implement the fix in the build is to +update the "neard" recipe (i.e. ``neard-0.14.bb``) so that the +:term:`SRC_URI` statement includes +the patch file. The recipe file is in the folder above the patch. Here +is what the edited ``SRC_URI`` statement would look like: +:: + + SRC_URI = "${KERNELORG_MIRROR}/linux/network/nfc/${BPN}-${PV}.tar.xz \ + file://neard.in \ + file://neard.service.in \ + file://parallelmake.patch \ + " + +With the patch complete and moved to the correct folder and the +``SRC_URI`` statement updated, you can exit the ``devshell``: +:: + + $ exit + +Testing the Build +~~~~~~~~~~~~~~~~~ + +With everything in place, you can get back to trying the build again +locally: +:: + + $ bitbake neard This build should succeed. + +Now you can open up a ``devshell`` again and repeat the clean and make +operations as follows: +:: + + $ bitbake neard -c devshell + $ make clean + $ make tools/snep-send.o + +The build should work without issue. + +As with all solved problems, if they originated upstream, you need to +submit the fix for the recipe in OE-Core and upstream so that the +problem is taken care of at its source. See the "`Submitting a Change to +the Yocto Project <#how-to-submit-a-change>`__" section for more +information. + +.. _platdev-gdb-remotedebug: + +Debugging With the GNU Project Debugger (GDB) Remotely +------------------------------------------------------ + +GDB allows you to examine running programs, which in turn helps you to +understand and fix problems. It also allows you to perform post-mortem +style analysis of program crashes. GDB is available as a package within +the Yocto Project and is installed in SDK images by default. See the +":ref:`ref-manual/ref-images:Images`" chapter in the Yocto +Project Reference Manual for a description of these images. You can find +information on GDB at http://sourceware.org/gdb/. + +.. note:: + + For best results, install debug ( + -dbg + ) packages for the applications you are going to debug. Doing so + makes extra debug symbols available that give you more meaningful + output. + +Sometimes, due to memory or disk space constraints, it is not possible +to use GDB directly on the remote target to debug applications. These +constraints arise because GDB needs to load the debugging information +and the binaries of the process being debugged. Additionally, GDB needs +to perform many computations to locate information such as function +names, variable names and values, stack traces and so forth - even +before starting the debugging process. These extra computations place +more load on the target system and can alter the characteristics of the +program being debugged. + +To help get past the previously mentioned constraints, you can use +gdbserver, which runs on the remote target and does not load any +debugging information from the debugged process. Instead, a GDB instance +processes the debugging information that is run on a remote computer - +the host GDB. The host GDB then sends control commands to gdbserver to +make it stop or start the debugged program, as well as read or write +memory regions of that debugged program. All the debugging information +loaded and processed as well as all the heavy debugging is done by the +host GDB. Offloading these processes gives the gdbserver running on the +target a chance to remain small and fast. + +Because the host GDB is responsible for loading the debugging +information and for doing the necessary processing to make actual +debugging happen, you have to make sure the host can access the +unstripped binaries complete with their debugging information and also +be sure the target is compiled with no optimizations. The host GDB must +also have local access to all the libraries used by the debugged +program. Because gdbserver does not need any local debugging +information, the binaries on the remote target can remain stripped. +However, the binaries must also be compiled without optimization so they +match the host's binaries. + +To remain consistent with GDB documentation and terminology, the binary +being debugged on the remote target machine is referred to as the +"inferior" binary. For documentation on GDB see the `GDB +site `__. + +The following steps show you how to debug using the GNU project +debugger. + +1. *Configure your build system to construct the companion debug + filesystem:* + + In your ``local.conf`` file, set the following: + :: + + IMAGE_GEN_DEBUGFS = "1" + IMAGE_FSTYPES_DEBUGFS = "tar.bz2" + + These options cause the + OpenEmbedded build system to generate a special companion filesystem + fragment, which contains the matching source and debug symbols to + your deployable filesystem. The build system does this by looking at + what is in the deployed filesystem, and pulling the corresponding + ``-dbg`` packages. + + The companion debug filesystem is not a complete filesystem, but only + contains the debug fragments. This filesystem must be combined with + the full filesystem for debugging. Subsequent steps in this procedure + show how to combine the partial filesystem with the full filesystem. + +2. *Configure the system to include gdbserver in the target filesystem:* + + Make the following addition in either your ``local.conf`` file or in + an image recipe: + :: + + IMAGE_INSTALL_append = " gdbserver" + + The change makes + sure the ``gdbserver`` package is included. + +3. *Build the environment:* + + Use the following command to construct the image and the companion + Debug Filesystem: + :: + + $ bitbake image + + Build the cross GDB component and + make it available for debugging. Build the SDK that matches the + image. Building the SDK is best for a production build that can be + used later for debugging, especially during long term maintenance: + :: + + $ bitbake -c populate_sdk image + + Alternatively, you can build the minimal toolchain components that + match the target. Doing so creates a smaller than typical SDK and + only contains a minimal set of components with which to build simple + test applications, as well as run the debugger: + :: + + $ bitbake meta-toolchain + + A final method is to build Gdb itself within the build system: + :: + + $ bitbake gdb-cross- + + Doing so produces a temporary copy of + ``cross-gdb`` you can use for debugging during development. While + this is the quickest approach, the two previous methods in this step + are better when considering long-term maintenance strategies. + + .. note:: + + If you run + bitbake gdb-cross + , the OpenEmbedded build system suggests the actual image (e.g. + gdb-cross-i586 + ). The suggestion is usually the actual name you want to use. + +4. *Set up the* ``debugfs`` + + Run the following commands to set up the ``debugfs``: + :: + + $ mkdir debugfs + $ cd debugfs + $ tar xvfj build-dir/tmp-glibc/deploy/images/machine/image.rootfs.tar.bz2 + $ tar xvfj build-dir/tmp-glibc/deploy/images/machine/image-dbg.rootfs.tar.bz2 + +5. *Set up GDB* + + Install the SDK (if you built one) and then source the correct + environment file. Sourcing the environment file puts the SDK in your + ``PATH`` environment variable. + + If you are using the build system, Gdb is located in + build-dir/tmp/sysroots/host/usr/bin/architecture/architecture-gdb + +6. *Boot the target:* + + For information on how to run QEMU, see the `QEMU + Documentation `__. + + .. note:: + + Be sure to verify that your host can access the target via TCP. + +7. *Debug a program:* + + Debugging a program involves running gdbserver on the target and then + running Gdb on the host. The example in this step debugs ``gzip``: + :: + + root@qemux86:~# gdbserver localhost:1234 /bin/gzip —help + + For + additional gdbserver options, see the `GDB Server + Documentation `__. + + After running gdbserver on the target, you need to run Gdb on the + host and configure it and connect to the target. Use these commands: + :: + + $ cd directory-holding-the-debugfs-directory + $ arch-gdb + (gdb) set sysroot debugfs + (gdb) set substitute-path /usr/src/debug debugfs/usr/src/debug + (gdb) target remote IP-of-target:1234 + + At this + point, everything should automatically load (i.e. matching binaries, + symbols and headers). + + .. note:: + + The Gdb + set + commands in the previous example can be placed into the users + ~/.gdbinit + file. Upon starting, Gdb automatically runs whatever commands are + in that file. + +8. *Deploying without a full image rebuild:* + + In many cases, during development you want a quick method to deploy a + new binary to the target and debug it, without waiting for a full + image build. + + One approach to solving this situation is to just build the component + you want to debug. Once you have built the component, copy the + executable directly to both the target and the host ``debugfs``. + + If the binary is processed through the debug splitting in + OpenEmbedded, you should also copy the debug items (i.e. ``.debug`` + contents and corresponding ``/usr/src/debug`` files) from the work + directory. Here is an example: + :: + + $ bitbake bash + $ bitbake -c devshell bash + $ cd .. + $ scp packages-split/bash/bin/bash target:/bin/bash + $ cp -a packages-split/bash-dbg/\* path/debugfs + +Debugging with the GNU Project Debugger (GDB) on the Target +----------------------------------------------------------- + +The previous section addressed using GDB remotely for debugging +purposes, which is the most usual case due to the inherent hardware +limitations on many embedded devices. However, debugging in the target +hardware itself is also possible with more powerful devices. This +section describes what you need to do in order to support using GDB to +debug on the target hardware. + +To support this kind of debugging, you need do the following: + +- Ensure that GDB is on the target. You can do this by adding "gdb" to + :term:`IMAGE_INSTALL`: + IMAGE_INSTALL_append = " gdb" Alternatively, you can add + "tools-debug" to + :term:`IMAGE_FEATURES`: + :: + + IMAGE_FEATURES_append = " tools-debug" + +- Ensure that debug symbols are present. You can make sure these + symbols are present by installing ``-dbg``: + :: + + IMAGE_INSTALL_append = "packagename-dbg" + + Alternatively, you can do the following to include + all the debug symbols: + :: + + IMAGE_FEATURES_append = " dbg-pkgs" + +.. note:: + + To improve the debug information accuracy, you can reduce the level + of optimization used by the compiler. For example, when adding the + following line to your + local.conf + file, you will reduce optimization from + FULL_OPTIMIZATION + of "-O2" to + DEBUG_OPTIMIZATION + of "-O -fno-omit-frame-pointer": + :: + + DEBUG_BUILD = "1" + + + Consider that this will reduce the application's performance and is + recommended only for debugging purposes. + +.. _dev-other-debugging-others: + +Other Debugging Tips +-------------------- + +Here are some other tips that you might find useful: + +- When adding new packages, it is worth watching for undesirable items + making their way into compiler command lines. For example, you do not + want references to local system files like ``/usr/lib/`` or + ``/usr/include/``. + +- If you want to remove the ``psplash`` boot splashscreen, add + ``psplash=false`` to the kernel command line. Doing so prevents + ``psplash`` from loading and thus allows you to see the console. It + is also possible to switch out of the splashscreen by switching the + virtual console (e.g. Fn+Left or Fn+Right on a Zaurus). + +- Removing :term:`TMPDIR` (usually + ``tmp/``, within the + :term:`Build Directory`) can often fix + temporary build issues. Removing ``TMPDIR`` is usually a relatively + cheap operation, because task output will be cached in + :term:`SSTATE_DIR` (usually + ``sstate-cache/``, which is also in the Build Directory). + + .. note:: + + Removing + TMPDIR + might be a workaround rather than a fix. Consequently, trying to + determine the underlying cause of an issue before removing the + directory is a good idea. + +- Understanding how a feature is used in practice within existing + recipes can be very helpful. It is recommended that you configure + some method that allows you to quickly search through files. + + Using GNU Grep, you can use the following shell function to + recursively search through common recipe-related files, skipping + binary files, ``.git`` directories, and the Build Directory (assuming + its name starts with "build"): + :: + + g() { + grep -Ir \ + --exclude-dir=.git \ + --exclude-dir='build*' \ + --include='*.bb*' \ + --include='*.inc*' \ + --include='*.conf*' \ + --include='*.py*' \ + "$@" + } + + Following are some usage examples: + :: + + $ g FOO # Search recursively for "FOO" + $ g -i foo # Search recursively for "foo", ignoring case + $ g -w FOO # Search recursively for "FOO" as a word, ignoring e.g. "FOOBAR" + + If figuring + out how some feature works requires a lot of searching, it might + indicate that the documentation should be extended or improved. In + such cases, consider filing a documentation bug using the Yocto + Project implementation of + :yocto_bugs:`Bugzilla <>`. For information on + how to submit a bug against the Yocto Project, see the Yocto Project + Bugzilla :yocto_wiki:`wiki page ` + and the "`Submitting a Defect Against the Yocto + Project <#submitting-a-defect-against-the-yocto-project>`__" section. + + .. note:: + + The manuals might not be the right place to document variables + that are purely internal and have a limited scope (e.g. internal + variables used to implement a single + .bbclass + file). + +Making Changes to the Yocto Project +=================================== + +Because the Yocto Project is an open-source, community-based project, +you can effect changes to the project. This section presents procedures +that show you how to submit a defect against the project and how to +submit a change. + +Submitting a Defect Against the Yocto Project +--------------------------------------------- + +Use the Yocto Project implementation of +`Bugzilla `__ to submit a defect (bug) +against the Yocto Project. For additional information on this +implementation of Bugzilla see the :ref:"`Yocto Project +Bugzilla `" section in the +Yocto Project Reference Manual. For more detail on any of the following +steps, see the Yocto Project +:yocto_wiki:`Bugzilla wiki page `. + +Use the following general steps to submit a bug" + +1. Open the Yocto Project implementation of :yocto_bugs:`Bugzilla <>`. + +2. Click "File a Bug" to enter a new bug. + +3. Choose the appropriate "Classification", "Product", and "Component" + for which the bug was found. Bugs for the Yocto Project fall into + one of several classifications, which in turn break down into + several products and components. For example, for a bug against the + ``meta-intel`` layer, you would choose "Build System, Metadata & + Runtime", "BSPs", and "bsps-meta-intel", respectively. + +4. Choose the "Version" of the Yocto Project for which you found the + bug (e.g. DISTRO). + +5. Determine and select the "Severity" of the bug. The severity + indicates how the bug impacted your work. + +6. Choose the "Hardware" that the bug impacts. + +7. Choose the "Architecture" that the bug impacts. + +8. Choose a "Documentation change" item for the bug. Fixing a bug might + or might not affect the Yocto Project documentation. If you are + unsure of the impact to the documentation, select "Don't Know". + +9. Provide a brief "Summary" of the bug. Try to limit your summary to + just a line or two and be sure to capture the essence of the bug. + +10. Provide a detailed "Description" of the bug. You should provide as + much detail as you can about the context, behavior, output, and so + forth that surrounds the bug. You can even attach supporting files + for output from logs by using the "Add an attachment" button. + +11. Click the "Submit Bug" button submit the bug. A new Bugzilla number + is assigned to the bug and the defect is logged in the bug tracking + system. + +Once you file a bug, the bug is processed by the Yocto Project Bug +Triage Team and further details concerning the bug are assigned (e.g. +priority and owner). You are the "Submitter" of the bug and any further +categorization, progress, or comments on the bug result in Bugzilla +sending you an automated email concerning the particular change or +progress to the bug. + +.. _how-to-submit-a-change: + +Submitting a Change to the Yocto Project +---------------------------------------- + +Contributions to the Yocto Project and OpenEmbedded are very welcome. +Because the system is extremely configurable and flexible, we recognize +that developers will want to extend, configure or optimize it for their +specific uses. + +The Yocto Project uses a mailing list and a patch-based workflow that is +similar to the Linux kernel but contains important differences. In +general, a mailing list exists through which you can submit patches. You +should send patches to the appropriate mailing list so that they can be +reviewed and merged by the appropriate maintainer. The specific mailing +list you need to use depends on the location of the code you are +changing. Each component (e.g. layer) should have a ``README`` file that +indicates where to send the changes and which process to follow. + +You can send the patch to the mailing list using whichever approach you +feel comfortable with to generate the patch. Once sent, the patch is +usually reviewed by the community at large. If somebody has concerns +with the patch, they will usually voice their concern over the mailing +list. If a patch does not receive any negative reviews, the maintainer +of the affected layer typically takes the patch, tests it, and then +based on successful testing, merges the patch. + +The "poky" repository, which is the Yocto Project's reference build +environment, is a hybrid repository that contains several individual +pieces (e.g. BitBake, Metadata, documentation, and so forth) built using +the combo-layer tool. The upstream location used for submitting changes +varies by component: + +- *Core Metadata:* Send your patch to the + `openembedded-core `__ + mailing list. For example, a change to anything under the ``meta`` or + ``scripts`` directories should be sent to this mailing list. + +- *BitBake:* For changes to BitBake (i.e. anything under the + ``bitbake`` directory), send your patch to the + `bitbake-devel `__ + mailing list. + +- *"meta-*" trees:* These trees contain Metadata. Use the + `poky `__ mailing list. + +For changes to other layers hosted in the Yocto Project source +repositories (i.e. ``yoctoproject.org``), tools, and the Yocto Project +documentation, use the `Yocto +Project `__ general +mailing list. + +.. note:: + + Sometimes a layer's documentation specifies to use a particular + mailing list. If so, use that list. + +For additional recipes that do not fit into the core Metadata, you +should determine which layer the recipe should go into and submit the +change in the manner recommended by the documentation (e.g. the +``README`` file) supplied with the layer. If in doubt, please ask on the +Yocto general mailing list or on the openembedded-devel mailing list. + +You can also push a change upstream and request a maintainer to pull the +change into the component's upstream repository. You do this by pushing +to a contribution repository that is upstream. See the ":ref:`gs-git-workflows-and-the-yocto-project`" +section in the Yocto Project Overview and Concepts Manual for additional +concepts on working in the Yocto Project development environment. + +Two commonly used testing repositories exist for OpenEmbedded-Core: + +- *"ross/mut" branch:* The "mut" (master-under-test) tree exists in the + ``poky-contrib`` repository in the + :yocto_git:`Yocto Project source repositories <>`. + +- *"master-next" branch:* This branch is part of the main "poky" + repository in the Yocto Project source repositories. + +Maintainers use these branches to test submissions prior to merging +patches. Thus, you can get an idea of the status of a patch based on +whether the patch has been merged into one of these branches. + +.. note:: + + This system is imperfect and changes can sometimes get lost in the + flow. Asking about the status of a patch or change is reasonable if + the change has been idle for a while with no feedback. The Yocto + Project does have plans to use + Patchwork + to track the status of patches and also to automatically preview + patches. + +The following sections provide procedures for submitting a change. + +.. _pushing-a-change-upstream: + +Using Scripts to Push a Change Upstream and Request a Pull +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Follow this procedure to push a change to an upstream "contrib" Git +repository: + +.. note:: + + You can find general Git information on how to push a change upstream + in the + Git Community Book + . + +1. *Make Your Changes Locally:* Make your changes in your local Git + repository. You should make small, controlled, isolated changes. + Keeping changes small and isolated aids review, makes + merging/rebasing easier and keeps the change history clean should + anyone need to refer to it in future. + +2. *Stage Your Changes:* Stage your changes by using the ``git add`` + command on each file you changed. + +3. *Commit Your Changes:* Commit the change by using the ``git commit`` + command. Make sure your commit information follows standards by + following these accepted conventions: + + - Be sure to include a "Signed-off-by:" line in the same style as + required by the Linux kernel. Adding this line signifies that you, + the submitter, have agreed to the Developer's Certificate of + Origin 1.1 as follows: + :: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + + - Provide a single-line summary of the change. and, if more + explanation is needed, provide more detail in the body of the + commit. This summary is typically viewable in the "shortlist" of + changes. Thus, providing something short and descriptive that + gives the reader a summary of the change is useful when viewing a + list of many commits. You should prefix this short description + with the recipe name (if changing a recipe), or else with the + short form path to the file being changed. + + - For the body of the commit message, provide detailed information + that describes what you changed, why you made the change, and the + approach you used. It might also be helpful if you mention how you + tested the change. Provide as much detail as you can in the body + of the commit message. + + .. note:: + + You do not need to provide a more detailed explanation of a + change if the change is minor to the point of the single line + summary providing all the information. + + - If the change addresses a specific bug or issue that is associated + with a bug-tracking ID, include a reference to that ID in your + detailed description. For example, the Yocto Project uses a + specific convention for bug references - any commit that addresses + a specific bug should use the following form for the detailed + description. Be sure to use the actual bug-tracking ID from + Bugzilla for bug-id: + :: + + Fixes [YOCTO #bug-id] + + detailed description of change + +4. *Push Your Commits to a "Contrib" Upstream:* If you have arranged for + permissions to push to an upstream contrib repository, push the + change to that repository: + :: + + $ git push upstream_remote_repo local_branch_name + + For example, suppose you have permissions to push + into the upstream ``meta-intel-contrib`` repository and you are + working in a local branch named your_name\ ``/README``. The following + command pushes your local commits to the ``meta-intel-contrib`` + upstream repository and puts the commit in a branch named + your_name\ ``/README``: + :: + + $ git push meta-intel-contrib your_name/README + +5. *Determine Who to Notify:* Determine the maintainer or the mailing + list that you need to notify for the change. + + Before submitting any change, you need to be sure who the maintainer + is or what mailing list that you need to notify. Use either these + methods to find out: + + - *Maintenance File:* Examine the ``maintainers.inc`` file, which is + located in the :term:`Source Directory` at + ``meta/conf/distro/include``, to see who is responsible for code. + + - *Search by File:* Using :ref:`overview-manual/overview-manual-development-environment:git`, you can + enter the following command to bring up a short list of all + commits against a specific file: + :: + + git shortlog -- filename + + Just provide the name of the file for which you are interested. The + information returned is not ordered by history but does include a + list of everyone who has committed grouped by name. From the list, + you can see who is responsible for the bulk of the changes against + the file. + + - *Examine the List of Mailing Lists:* For a list of the Yocto + Project and related mailing lists, see the ":ref:`Mailing + lists `" section in + the Yocto Project Reference Manual. + +6. *Make a Pull Request:* Notify the maintainer or the mailing list that + you have pushed a change by making a pull request. + + The Yocto Project provides two scripts that conveniently let you + generate and send pull requests to the Yocto Project. These scripts + are ``create-pull-request`` and ``send-pull-request``. You can find + these scripts in the ``scripts`` directory within the + :term:`Source Directory` (e.g. + ``~/poky/scripts``). + + Using these scripts correctly formats the requests without + introducing any whitespace or HTML formatting. The maintainer that + receives your patches either directly or through the mailing list + needs to be able to save and apply them directly from your emails. + Using these scripts is the preferred method for sending patches. + + First, create the pull request. For example, the following command + runs the script, specifies the upstream repository in the contrib + directory into which you pushed the change, and provides a subject + line in the created patch files: + :: + + $ ~/poky/scripts/create-pull-request -u meta-intel-contrib -s "Updated Manual Section Reference in README" + + Running this script forms ``*.patch`` files in a folder named + ``pull-``\ PID in the current directory. One of the patch files is a + cover letter. + + Before running the ``send-pull-request`` script, you must edit the + cover letter patch to insert information about your change. After + editing the cover letter, send the pull request. For example, the + following command runs the script and specifies the patch directory + and email address. In this example, the email address is a mailing + list: + :: + + $ ~/poky/scripts/send-pull-request -p ~/meta-intel/pull-10565 -t meta-intel@yoctoproject.org + + You need to follow the prompts as the script is interactive. + + .. note:: + + For help on using these scripts, simply provide the + -h + argument as follows: + :: + + $ poky/scripts/create-pull-request -h + $ poky/scripts/send-pull-request -h + + +.. _submitting-a-patch: + +Using Email to Submit a Patch +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can submit patches without using the ``create-pull-request`` and +``send-pull-request`` scripts described in the previous section. +However, keep in mind, the preferred method is to use the scripts. + +Depending on the components changed, you need to submit the email to a +specific mailing list. For some guidance on which mailing list to use, +see the `list <#figuring-out-the-mailing-list-to-use>`__ at the +beginning of this section. For a description of all the available +mailing lists, see the ":ref:`Mailing Lists `" section in the +Yocto Project Reference Manual. + +Here is the general procedure on how to submit a patch through email +without using the scripts: + +1. *Make Your Changes Locally:* Make your changes in your local Git + repository. You should make small, controlled, isolated changes. + Keeping changes small and isolated aids review, makes + merging/rebasing easier and keeps the change history clean should + anyone need to refer to it in future. + +2. *Stage Your Changes:* Stage your changes by using the ``git add`` + command on each file you changed. + +3. *Commit Your Changes:* Commit the change by using the + ``git commit --signoff`` command. Using the ``--signoff`` option + identifies you as the person making the change and also satisfies the + Developer's Certificate of Origin (DCO) shown earlier. + + When you form a commit, you must follow certain standards established + by the Yocto Project development team. See `Step + 3 <#making-sure-you-have-correct-commit-information>`__ in the + previous section for information on how to provide commit information + that meets Yocto Project commit message standards. + +4. *Format the Commit:* Format the commit into an email message. To + format commits, use the ``git format-patch`` command. When you + provide the command, you must include a revision list or a number of + patches as part of the command. For example, either of these two + commands takes your most recent single commit and formats it as an + email message in the current directory: + :: + + $ git format-patch -1 + + or :: + + $ git format-patch HEAD~ + + After the command is run, the current directory contains a numbered + ``.patch`` file for the commit. + + If you provide several commits as part of the command, the + ``git format-patch`` command produces a series of numbered files in + the current directory – one for each commit. If you have more than + one patch, you should also use the ``--cover`` option with the + command, which generates a cover letter as the first "patch" in the + series. You can then edit the cover letter to provide a description + for the series of patches. For information on the + ``git format-patch`` command, see ``GIT_FORMAT_PATCH(1)`` displayed + using the ``man git-format-patch`` command. + + .. note:: + + If you are or will be a frequent contributor to the Yocto Project + or to OpenEmbedded, you might consider requesting a contrib area + and the necessary associated rights. + +5. *Import the Files Into Your Mail Client:* Import the files into your + mail client by using the ``git send-email`` command. + + .. note:: + + In order to use + git send-email + , you must have the proper Git packages installed on your host. + For Ubuntu, Debian, and Fedora the package is + git-email + . + + The ``git send-email`` command sends email by using a local or remote + Mail Transport Agent (MTA) such as ``msmtp``, ``sendmail``, or + through a direct ``smtp`` configuration in your Git ``~/.gitconfig`` + file. If you are submitting patches through email only, it is very + important that you submit them without any whitespace or HTML + formatting that either you or your mailer introduces. The maintainer + that receives your patches needs to be able to save and apply them + directly from your emails. A good way to verify that what you are + sending will be applicable by the maintainer is to do a dry run and + send them to yourself and then save and apply them as the maintainer + would. + + The ``git send-email`` command is the preferred method for sending + your patches using email since there is no risk of compromising + whitespace in the body of the message, which can occur when you use + your own mail client. The command also has several options that let + you specify recipients and perform further editing of the email + message. For information on how to use the ``git send-email`` + command, see ``GIT-SEND-EMAIL(1)`` displayed using the + ``man git-send-email`` command. + +Working With Licenses +===================== + +As mentioned in the ":ref:`overview-manual/overview-manual-development-environment:licensing`" +section in the Yocto Project Overview and Concepts Manual, open source +projects are open to the public and they consequently have different +licensing structures in place. This section describes the mechanism by +which the :term:`OpenEmbedded Build System` +tracks changes to +licensing text and covers how to maintain open source license compliance +during your project's lifecycle. The section also describes how to +enable commercially licensed recipes, which by default are disabled. + +.. _usingpoky-configuring-LIC_FILES_CHKSUM: + +Tracking License Changes +------------------------ + +The license of an upstream project might change in the future. In order +to prevent these changes going unnoticed, the +:term:`LIC_FILES_CHKSUM` +variable tracks changes to the license text. The checksums are validated +at the end of the configure step, and if the checksums do not match, the +build will fail. + +.. _usingpoky-specifying-LIC_FILES_CHKSUM: + +Specifying the ``LIC_FILES_CHKSUM`` Variable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``LIC_FILES_CHKSUM`` variable contains checksums of the license text +in the source code for the recipe. Following is an example of how to +specify ``LIC_FILES_CHKSUM``: +:: + + LIC_FILES_CHKSUM = "file://COPYING;md5=xxxx \ + file://licfile1.txt;beginline=5;endline=29;md5=yyyy \ + file://licfile2.txt;endline=50;md5=zzzz \ + ..." + +.. note:: + + - When using "beginline" and "endline", realize that line numbering + begins with one and not zero. Also, the included lines are + inclusive (i.e. lines five through and including 29 in the + previous example for ``licfile1.txt``). + + - When a license check fails, the selected license text is included + as part of the QA message. Using this output, you can determine + the exact start and finish for the needed license text. + +The build system uses the :term:`S` +variable as the default directory when searching files listed in +``LIC_FILES_CHKSUM``. The previous example employs the default +directory. + +Consider this next example: +:: + + LIC_FILES_CHKSUM = "file://src/ls.c;beginline=5;endline=16;\ + md5=bb14ed3c4cda583abc85401304b5cd4e" + LIC_FILES_CHKSUM = "file://${WORKDIR}/license.html;md5=5c94767cedb5d6987c902ac850ded2c6" + +The first line locates a file in ``${S}/src/ls.c`` and isolates lines +five through 16 as license text. The second line refers to a file in +:term:`WORKDIR`. + +Note that ``LIC_FILES_CHKSUM`` variable is mandatory for all recipes, +unless the ``LICENSE`` variable is set to "CLOSED". + +.. _usingpoky-LIC_FILES_CHKSUM-explanation-of-syntax: + +Explanation of Syntax +~~~~~~~~~~~~~~~~~~~~~ + +As mentioned in the previous section, the ``LIC_FILES_CHKSUM`` variable +lists all the important files that contain the license text for the +source code. It is possible to specify a checksum for an entire file, or +a specific section of a file (specified by beginning and ending line +numbers with the "beginline" and "endline" parameters, respectively). +The latter is useful for source files with a license notice header, +README documents, and so forth. If you do not use the "beginline" +parameter, then it is assumed that the text begins on the first line of +the file. Similarly, if you do not use the "endline" parameter, it is +assumed that the license text ends with the last line of the file. + +The "md5" parameter stores the md5 checksum of the license text. If the +license text changes in any way as compared to this parameter then a +mismatch occurs. This mismatch triggers a build failure and notifies the +developer. Notification allows the developer to review and address the +license text changes. Also note that if a mismatch occurs during the +build, the correct md5 checksum is placed in the build log and can be +easily copied to the recipe. + +There is no limit to how many files you can specify using the +``LIC_FILES_CHKSUM`` variable. Generally, however, every project +requires a few specifications for license tracking. Many projects have a +"COPYING" file that stores the license information for all the source +code files. This practice allows you to just track the "COPYING" file as +long as it is kept up to date. + +.. note:: + + - If you specify an empty or invalid "md5" parameter, + :term:`BitBake` returns an md5 + mis-match error and displays the correct "md5" parameter value + during the build. The correct parameter is also captured in the + build log. + + - If the whole file contains only license text, you do not need to + use the "beginline" and "endline" parameters. + +Enabling Commercially Licensed Recipes +-------------------------------------- + +By default, the OpenEmbedded build system disables components that have +commercial or other special licensing requirements. Such requirements +are defined on a recipe-by-recipe basis through the +:term:`LICENSE_FLAGS` variable +definition in the affected recipe. For instance, the +``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` recipe +contains the following statement: +:: + + LICENSE_FLAGS = "commercial" + +Here is a +slightly more complicated example that contains both an explicit recipe +name and version (after variable expansion): +:: + + LICENSE_FLAGS = "license_${PN}_${PV}" + +In order for a component restricted by a +``LICENSE_FLAGS`` definition to be enabled and included in an image, it +needs to have a matching entry in the global +:term:`LICENSE_FLAGS_WHITELIST` +variable, which is a variable typically defined in your ``local.conf`` +file. For example, to enable the +``poky/meta/recipes-multimedia/gstreamer/gst-plugins-ugly`` package, you +could add either the string "commercial_gst-plugins-ugly" or the more +general string "commercial" to ``LICENSE_FLAGS_WHITELIST``. See the +"`License Flag Matching <#license-flag-matching>`__" section for a full +explanation of how ``LICENSE_FLAGS`` matching works. Here is the +example: +:: + + LICENSE_FLAGS_WHITELIST = "commercial_gst-plugins-ugly" + +Likewise, to additionally enable the package built from the recipe +containing ``LICENSE_FLAGS = "license_${PN}_${PV}"``, and assuming that +the actual recipe name was ``emgd_1.10.bb``, the following string would +enable that package as well as the original ``gst-plugins-ugly`` +package: +:: + + LICENSE_FLAGS_WHITELIST = "commercial_gst-plugins-ugly license_emgd_1.10" + +As a convenience, you do not need to specify the +complete license string in the whitelist for every package. You can use +an abbreviated form, which consists of just the first portion or +portions of the license string before the initial underscore character +or characters. A partial string will match any license that contains the +given string as the first portion of its license. For example, the +following whitelist string will also match both of the packages +previously mentioned as well as any other packages that have licenses +starting with "commercial" or "license". +:: + + LICENSE_FLAGS_WHITELIST = "commercial license" + +License Flag Matching +~~~~~~~~~~~~~~~~~~~~~ + +License flag matching allows you to control what recipes the +OpenEmbedded build system includes in the build. Fundamentally, the +build system attempts to match ``LICENSE_FLAGS`` strings found in +recipes against ``LICENSE_FLAGS_WHITELIST`` strings found in the +whitelist. A match causes the build system to include a recipe in the +build, while failure to find a match causes the build system to exclude +a recipe. + +In general, license flag matching is simple. However, understanding some +concepts will help you correctly and effectively use matching. + +Before a flag defined by a particular recipe is tested against the +contents of the whitelist, the expanded string ``_${PN}`` is appended to +the flag. This expansion makes each ``LICENSE_FLAGS`` value +recipe-specific. After expansion, the string is then matched against the +whitelist. Thus, specifying ``LICENSE_FLAGS = "commercial"`` in recipe +"foo", for example, results in the string ``"commercial_foo"``. And, to +create a match, that string must appear in the whitelist. + +Judicious use of the ``LICENSE_FLAGS`` strings and the contents of the +``LICENSE_FLAGS_WHITELIST`` variable allows you a lot of flexibility for +including or excluding recipes based on licensing. For example, you can +broaden the matching capabilities by using license flags string subsets +in the whitelist. + +.. note:: + + When using a string subset, be sure to use the part of the expanded + string that precedes the appended underscore character (e.g. + usethispart_1.3 + , + usethispart_1.4 + , and so forth). + +For example, simply specifying the string "commercial" in the whitelist +matches any expanded ``LICENSE_FLAGS`` definition that starts with the +string "commercial" such as "commercial_foo" and "commercial_bar", which +are the strings the build system automatically generates for +hypothetical recipes named "foo" and "bar" assuming those recipes simply +specify the following: +:: + + LICENSE_FLAGS = "commercial" + +Thus, you can choose +to exhaustively enumerate each license flag in the whitelist and allow +only specific recipes into the image, or you can use a string subset +that causes a broader range of matches to allow a range of recipes into +the image. + +This scheme works even if the ``LICENSE_FLAGS`` string already has +``_${PN}`` appended. For example, the build system turns the license +flag "commercial_1.2_foo" into "commercial_1.2_foo_foo" and would match +both the general "commercial" and the specific "commercial_1.2_foo" +strings found in the whitelist, as expected. + +Here are some other scenarios: + +- You can specify a versioned string in the recipe such as + "commercial_foo_1.2" in a "foo" recipe. The build system expands this + string to "commercial_foo_1.2_foo". Combine this license flag with a + whitelist that has the string "commercial" and you match the flag + along with any other flag that starts with the string "commercial". + +- Under the same circumstances, you can use "commercial_foo" in the + whitelist and the build system not only matches "commercial_foo_1.2" + but also matches any license flag with the string "commercial_foo", + regardless of the version. + +- You can be very specific and use both the package and version parts + in the whitelist (e.g. "commercial_foo_1.2") to specifically match a + versioned recipe. + +Other Variables Related to Commercial Licenses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Other helpful variables related to commercial license handling exist and +are defined in the +``poky/meta/conf/distro/include/default-distrovars.inc`` file: +:: + + COMMERCIAL_AUDIO_PLUGINS ?= "" + COMMERCIAL_VIDEO_PLUGINS ?= "" + +If you +want to enable these components, you can do so by making sure you have +statements similar to the following in your ``local.conf`` configuration +file: +:: + + COMMERCIAL_AUDIO_PLUGINS = "gst-plugins-ugly-mad \ + gst-plugins-ugly-mpegaudioparse" + COMMERCIAL_VIDEO_PLUGINS = "gst-plugins-ugly-mpeg2dec \ + gst-plugins-ugly-mpegstream gst-plugins-bad-mpegvideoparse" + LICENSE_FLAGS_WHITELIST = "commercial_gst-plugins-ugly commercial_gst-plugins-bad commercial_qmmp" + + +Of course, you could also create a matching whitelist for those +components using the more general "commercial" in the whitelist, but +that would also enable all the other packages with ``LICENSE_FLAGS`` +containing "commercial", which you may or may not want: +:: + + LICENSE_FLAGS_WHITELIST = "commercial" + +Specifying audio and video plugins as part of the +``COMMERCIAL_AUDIO_PLUGINS`` and ``COMMERCIAL_VIDEO_PLUGINS`` statements +(along with the enabling ``LICENSE_FLAGS_WHITELIST``) includes the +plugins or components into built images, thus adding support for media +formats or components. + +Maintaining Open Source License Compliance During Your Product's Lifecycle +-------------------------------------------------------------------------- + +One of the concerns for a development organization using open source +software is how to maintain compliance with various open source +licensing during the lifecycle of the product. While this section does +not provide legal advice or comprehensively cover all scenarios, it does +present methods that you can use to assist you in meeting the compliance +requirements during a software release. + +With hundreds of different open source licenses that the Yocto Project +tracks, it is difficult to know the requirements of each and every +license. However, the requirements of the major FLOSS licenses can begin +to be covered by assuming that three main areas of concern exist: + +- Source code must be provided. + +- License text for the software must be provided. + +- Compilation scripts and modifications to the source code must be + provided. + +There are other requirements beyond the scope of these three and the +methods described in this section (e.g. the mechanism through which +source code is distributed). + +As different organizations have different methods of complying with open +source licensing, this section is not meant to imply that there is only +one single way to meet your compliance obligations, but rather to +describe one method of achieving compliance. The remainder of this +section describes methods supported to meet the previously mentioned +three requirements. Once you take steps to meet these requirements, and +prior to releasing images, sources, and the build system, you should +audit all artifacts to ensure completeness. + +.. note:: + + The Yocto Project generates a license manifest during image creation + that is located in + ${DEPLOY_DIR}/licenses/ + image_name-datestamp + to assist with any audits. + +Providing the Source Code +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Compliance activities should begin before you generate the final image. +The first thing you should look at is the requirement that tops the list +for most compliance groups - providing the source. The Yocto Project has +a few ways of meeting this requirement. + +One of the easiest ways to meet this requirement is to provide the +entire :term:`DL_DIR` used by the +build. This method, however, has a few issues. The most obvious is the +size of the directory since it includes all sources used in the build +and not just the source used in the released image. It will include +toolchain source, and other artifacts, which you would not generally +release. However, the more serious issue for most companies is +accidental release of proprietary software. The Yocto Project provides +an :ref:`archiver ` class to +help avoid some of these concerns. + +Before you employ ``DL_DIR`` or the ``archiver`` class, you need to +decide how you choose to provide source. The source ``archiver`` class +can generate tarballs and SRPMs and can create them with various levels +of compliance in mind. + +One way of doing this (but certainly not the only way) is to release +just the source as a tarball. You can do this by adding the following to +the ``local.conf`` file found in the +:term:`Build Directory`: +:: + + INHERIT += "archiver" + ARCHIVER_MODE[src] = "original" + +During the creation of your +image, the source from all recipes that deploy packages to the image is +placed within subdirectories of ``DEPLOY_DIR/sources`` based on the +:term:`LICENSE` for each recipe. +Releasing the entire directory enables you to comply with requirements +concerning providing the unmodified source. It is important to note that +the size of the directory can get large. + +A way to help mitigate the size issue is to only release tarballs for +licenses that require the release of source. Let us assume you are only +concerned with GPL code as identified by running the following script: +:: + + # Script to archive a subset of packages matching specific license(s) + # Source and license files are copied into sub folders of package folder + # Must be run from build folder + #!/bin/bash + src_release_dir="source-release" + mkdir -p $src_release_dir + for a in tmp/deploy/sources/*; do + for d in $a/*; do + # Get package name from path + p=`basename $d` + p=${p%-*} + p=${p%-*} + # Only archive GPL packages (update *GPL* regex for your license check) + numfiles=`ls tmp/deploy/licenses/$p/*GPL* 2> /dev/null | wc -l` + if [ $numfiles -gt 1 ]; then + echo Archiving $p + mkdir -p $src_release_dir/$p/source + cp $d/* $src_release_dir/$p/source 2> /dev/null + mkdir -p $src_release_dir/$p/license + cp tmp/deploy/licenses/$p/* $src_release_dir/$p/license 2> /dev/null + fi + done + done + +At this point, you +could create a tarball from the ``gpl_source_release`` directory and +provide that to the end user. This method would be a step toward +achieving compliance with section 3a of GPLv2 and with section 6 of +GPLv3. + +Providing License Text +~~~~~~~~~~~~~~~~~~~~~~ + +One requirement that is often overlooked is inclusion of license text. +This requirement also needs to be dealt with prior to generating the +final image. Some licenses require the license text to accompany the +binary. You can achieve this by adding the following to your +``local.conf`` file: +:: + + COPY_LIC_MANIFEST = "1" + COPY_LIC_DIRS = "1" + LICENSE_CREATE_PACKAGE = "1" + +Adding these statements to the +configuration file ensures that the licenses collected during package +generation are included on your image. + +.. note:: + + Setting all three variables to "1" results in the image having two + copies of the same license file. One copy resides in + ``/usr/share/common-licenses`` and the other resides in + ``/usr/share/license``. + + The reason for this behavior is because + :term:`COPY_LIC_DIRS` and + :term:`COPY_LIC_MANIFEST` + add a copy of the license when the image is built but do not offer a + path for adding licenses for newly installed packages to an image. + :term:`LICENSE_CREATE_PACKAGE` + adds a separate package and an upgrade path for adding licenses to an + image. + +As the source ``archiver`` class has already archived the original +unmodified source that contains the license files, you would have +already met the requirements for inclusion of the license information +with source as defined by the GPL and other open source licenses. + +Providing Compilation Scripts and Source Code Modifications +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +At this point, we have addressed all we need to prior to generating the +image. The next two requirements are addressed during the final +packaging of the release. + +By releasing the version of the OpenEmbedded build system and the layers +used during the build, you will be providing both compilation scripts +and the source code modifications in one step. + +If the deployment team has a :ref:`overview-manual/overview-manual-concepts:bsp layer` +and a distro layer, and those +those layers are used to patch, compile, package, or modify (in any way) +any open source software included in your released images, you might be +required to release those layers under section 3 of GPLv2 or section 1 +of GPLv3. One way of doing that is with a clean checkout of the version +of the Yocto Project and layers used during your build. Here is an +example: +:: + + # We built using the dunfell branch of the poky repo + $ git clone -b dunfell git://git.yoctoproject.org/poky + $ cd poky + # We built using the release_branch for our layers + $ git clone -b release_branch git://git.mycompany.com/meta-my-bsp-layer + $ git clone -b release_branch git://git.mycompany.com/meta-my-software-layer + # clean up the .git repos + $ find . -name ".git" -type d -exec rm -rf {} \; + +One +thing a development organization might want to consider for end-user +convenience is to modify ``meta-poky/conf/bblayers.conf.sample`` to +ensure that when the end user utilizes the released build system to +build an image, the development organization's layers are included in +the ``bblayers.conf`` file automatically: +:: + + # POKY_BBLAYERS_CONF_VERSION is increased each time build/conf/bblayers.conf + # changes incompatibly + POKY_BBLAYERS_CONF_VERSION = "2" + + BBPATH = "${TOPDIR}" + BBFILES ?= "" + + BBLAYERS ?= " \ + ##OEROOT##/meta \ + ##OEROOT##/meta-poky \ + ##OEROOT##/meta-yocto-bsp \ + ##OEROOT##/meta-mylayer \ + " + +Creating and +providing an archive of the :term:`Metadata` +layers (recipes, configuration files, and so forth) enables you to meet +your requirements to include the scripts to control compilation as well +as any modifications to the original source. + +Copying Licenses that Do Not Exist +---------------------------------- + +Some packages, such as the linux-firmware package, have many licenses +that are not in any way common. You can avoid adding a lot of these +types of common license files, which are only applicable to a specific +package, by using the +:term:`NO_GENERIC_LICENSE` +variable. Using this variable also avoids QA errors when you use a +non-common, non-CLOSED license in a recipe. + +The following is an example that uses the ``LICENSE.Abilis.txt`` file as +the license from the fetched source: +:: + + NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENSE.Abilis.txt" + +Using the Error Reporting Tool +============================== + +The error reporting tool allows you to submit errors encountered during +builds to a central database. Outside of the build environment, you can +use a web interface to browse errors, view statistics, and query for +errors. The tool works using a client-server system where the client +portion is integrated with the installed Yocto Project +:term:`Source Directory` (e.g. ``poky``). +The server receives the information collected and saves it in a +database. + +A live instance of the error reporting server exists at +http://errors.yoctoproject.org. This server exists so that when +you want to get help with build failures, you can submit all of the +information on the failure easily and then point to the URL in your bug +report or send an email to the mailing list. + +.. note:: + + If you send error reports to this server, the reports become publicly + visible. + +Enabling and Using the Tool +--------------------------- + +By default, the error reporting tool is disabled. You can enable it by +inheriting the +:ref:`report-error ` +class by adding the following statement to the end of your +``local.conf`` file in your +:term:`Build Directory`. +:: + + INHERIT += "report-error" + +By default, the error reporting feature stores information in +``${``\ :term:`LOG_DIR`\ ``}/error-report``. +However, you can specify a directory to use by adding the following to +your ``local.conf`` file: +:: + + ERR_REPORT_DIR = "path" + +Enabling error +reporting causes the build process to collect the errors and store them +in a file as previously described. When the build system encounters an +error, it includes a command as part of the console output. You can run +the command to send the error file to the server. For example, the +following command sends the errors to an upstream server: +:: + + $ send-error-report /home/brandusa/project/poky/build/tmp/log/error-report/error_report_201403141617.txt + +In the previous example, the errors are sent to a public database +available at http://errors.yoctoproject.org, which is used by the +entire community. If you specify a particular server, you can send the +errors to a different database. Use the following command for more +information on available options: +:: + + $ send-error-report --help + +When sending the error file, you are prompted to review the data being +sent as well as to provide a name and optional email address. Once you +satisfy these prompts, the command returns a link from the server that +corresponds to your entry in the database. For example, here is a +typical link: http://errors.yoctoproject.org/Errors/Details/9522/ + +Following the link takes you to a web interface where you can browse, +query the errors, and view statistics. + +Disabling the Tool +------------------ + +To disable the error reporting feature, simply remove or comment out the +following statement from the end of your ``local.conf`` file in your +:term:`Build Directory`. +:: + + INHERIT += "report-error" + +Setting Up Your Own Error Reporting Server +------------------------------------------ + +If you want to set up your own error reporting server, you can obtain +the code from the Git repository at +http://git.yoctoproject.org/cgit/cgit.cgi/error-report-web/. +Instructions on how to set it up are in the README document. + +.. _dev-using-wayland-and-weston: + +Using Wayland and Weston +======================== + +`Wayland `__ +is a computer display server protocol that provides a method for +compositing window managers to communicate directly with applications +and video hardware and expects them to communicate with input hardware +using other libraries. Using Wayland with supporting targets can result +in better control over graphics frame rendering than an application +might otherwise achieve. + +The Yocto Project provides the Wayland protocol libraries and the +reference +`Weston `__ +compositor as part of its release. You can find the integrated packages +in the ``meta`` layer of the :term:`Source Directory`. +Specifically, you +can find the recipes that build both Wayland and Weston at +``meta/recipes-graphics/wayland``. + +You can build both the Wayland and Weston packages for use only with +targets that accept the `Mesa 3D and Direct Rendering +Infrastructure `__, +which is also known as Mesa DRI. This implies that you cannot build and +use the packages if your target uses, for example, the Intel Embedded +Media and Graphics Driver (Intel EMGD) that overrides Mesa DRI. + +.. note:: + + Due to lack of EGL support, Weston 1.0.3 will not run directly on the + emulated QEMU hardware. However, this version of Weston will run + under X emulation without issues. + +This section describes what you need to do to implement Wayland and use +the Weston compositor when building an image for a supporting target. + +Enabling Wayland in an Image +---------------------------- + +To enable Wayland, you need to enable it to be built and enable it to be +included (installed) in the image. + +.. _enable-building: + +Building Wayland +~~~~~~~~~~~~~~~~ + +To cause Mesa to build the ``wayland-egl`` platform and Weston to build +Wayland with Kernel Mode Setting +(`KMS `__) +support, include the "wayland" flag in the +:term:`DISTRO_FEATURES` +statement in your ``local.conf`` file: +:: + + DISTRO_FEATURES_append = " wayland" + +.. note:: + + If X11 has been enabled elsewhere, Weston will build Wayland with X11 + support + +.. _enable-installation-in-an-image: + +Installing Wayland and Weston +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +To install the Wayland feature into an image, you must include the +following +:term:`CORE_IMAGE_EXTRA_INSTALL` +statement in your ``local.conf`` file: +:: + + CORE_IMAGE_EXTRA_INSTALL += "wayland weston" + +Running Weston +-------------- + +To run Weston inside X11, enabling it as described earlier and building +a Sato image is sufficient. If you are running your image under Sato, a +Weston Launcher appears in the "Utility" category. + +Alternatively, you can run Weston through the command-line interpretor +(CLI), which is better suited for development work. To run Weston under +the CLI, you need to do the following after your image is built: + +1. Run these commands to export ``XDG_RUNTIME_DIR``: + :: + + mkdir -p /tmp/$USER-weston + chmod 0700 /tmp/$USER-weston + export XDG_RUNTIME_DIR=/tmp/$USER-weston + +2. Launch Weston in the shell: + :: + + weston diff --git a/poky/documentation/dev-manual/dev-manual-common-tasks.xml b/poky/documentation/dev-manual/dev-manual-common-tasks.xml index 1f24c7343..247f6abfd 100644 --- a/poky/documentation/dev-manual/dev-manual-common-tasks.xml +++ b/poky/documentation/dev-manual/dev-manual-common-tasks.xml @@ -8384,7 +8384,7 @@ If you see the following error, you need to update or create a ~/.mtoolsrc file and - be sure to have the line “mtools_skip_check=1“ + be sure to have the line "mtools_skip_check=1" in the file. Then, run the Wic command again: @@ -9837,7 +9837,7 @@ Select the desired package format as follows: - PACKAGE_CLASSES ?= “package_packageformat” + PACKAGE_CLASSES ?= "package_packageformat" where packageformat can be "ipk", "rpm", "deb", or "tar" which are the @@ -14193,7 +14193,7 @@ local.conf file or in an image recipe: - IMAGE_INSTALL_append = “ gdbserver" + IMAGE_INSTALL_append = " gdbserver" The change makes sure the gdbserver package is included. diff --git a/poky/documentation/dev-manual/dev-manual-intro.rst b/poky/documentation/dev-manual/dev-manual-intro.rst new file mode 100644 index 000000000..3225c6ca4 --- /dev/null +++ b/poky/documentation/dev-manual/dev-manual-intro.rst @@ -0,0 +1,61 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************************************** +The Yocto Project Development Tasks Manual +****************************************** + +.. _dev-welcome: + +Welcome +======= + +Welcome to the Yocto Project Development Tasks Manual! This manual +provides relevant procedures necessary for developing in the Yocto +Project environment (i.e. developing embedded Linux images and +user-space applications that run on targeted devices). The manual groups +related procedures into higher-level sections. Procedures can consist of +high-level steps or low-level steps depending on the topic. + +This manual provides the following: + +- Procedures that help you get going with the Yocto Project. For + example, procedures that show you how to set up a build host and work + with the Yocto Project source repositories. + +- Procedures that show you how to submit changes to the Yocto Project. + Changes can be improvements, new features, or bug fixes. + +- Procedures related to "everyday" tasks you perform while developing + images and applications using the Yocto Project. For example, + procedures to create a layer, customize an image, write a new recipe, + and so forth. + +This manual does not provide the following: + +- Redundant Step-by-step Instructions: For example, the + :doc:`../sdk-manual/sdk-manual` manual contains detailed + instructions on how to install an SDK, which is used to develop + applications for target hardware. + +- Reference or Conceptual Material: This type of material resides in an + appropriate reference manual. For example, system variables are + documented in the :doc`../ref-manual/ref-manual`. + +- Detailed Public Information Not Specific to the Yocto Project: For + example, exhaustive information on how to use the Source Control + Manager Git is better covered with Internet searches and official Git + Documentation than through the Yocto Project documentation. + +Other Information +================= + +Because this manual presents information for many different topics, +supplemental information is recommended for full comprehension. For +introductory information on the Yocto Project, see the +:yocto_home:`Yocto Project Website <>`. If you want to build an image with no +knowledge of Yocto Project as a way of quickly testing it out, see the +:doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document. + +For a comprehensive list of links and other documentation, see the +":ref:`ref-manual/resources:links and related documentation`" +section in the Yocto Project Reference Manual. diff --git a/poky/documentation/dev-manual/dev-manual-qemu.rst b/poky/documentation/dev-manual/dev-manual-qemu.rst new file mode 100644 index 000000000..88b03745f --- /dev/null +++ b/poky/documentation/dev-manual/dev-manual-qemu.rst @@ -0,0 +1,470 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************************* +Using the Quick EMUlator (QEMU) +******************************* + +The Yocto Project uses an implementation of the Quick EMUlator (QEMU) +Open Source project as part of the Yocto Project development "tool set". +This chapter provides both procedures that show you how to use the Quick +EMUlator (QEMU) and other QEMU information helpful for development +purposes. + +.. _qemu-dev-overview: + +Overview +======== + +Within the context of the Yocto Project, QEMU is an emulator and +virtualization machine that allows you to run a complete image you have +built using the Yocto Project as just another task on your build system. +QEMU is useful for running and testing images and applications on +supported Yocto Project architectures without having actual hardware. +Among other things, the Yocto Project uses QEMU to run automated Quality +Assurance (QA) tests on final images shipped with each release. + +.. note:: + + This implementation is not the same as QEMU in general. + +This section provides a brief reference for the Yocto Project +implementation of QEMU. + +For official information and documentation on QEMU in general, see the +following references: + +- `QEMU Website `__\ *:* The official + website for the QEMU Open Source project. + +- `Documentation `__\ *:* The QEMU user + manual. + +.. _qemu-running-qemu: + +Running QEMU +============ + +To use QEMU, you need to have QEMU installed and initialized as well as +have the proper artifacts (i.e. image files and root filesystems) +available. Follow these general steps to run QEMU: + +1. *Install QEMU:* QEMU is made available with the Yocto Project a + number of ways. One method is to install a Software Development Kit + (SDK). See ":ref:`sdk-manual/sdk-intro:the qemu emulator`" section in the + Yocto Project Application Development and the Extensible Software + Development Kit (eSDK) manual for information on how to install QEMU. + +2. *Setting Up the Environment:* How you set up the QEMU environment + depends on how you installed QEMU: + + - If you cloned the ``poky`` repository or you downloaded and + unpacked a Yocto Project release tarball, you can source the build + environment script (i.e. :ref:`structure-core-script`): + :: + + $ cd ~/poky + $ source oe-init-build-env + + - If you installed a cross-toolchain, you can run the script that + initializes the toolchain. For example, the following commands run + the initialization script from the default ``poky_sdk`` directory: + :: + + . ~/poky_sdk/environment-setup-core2-64-poky-linux + +3. *Ensure the Artifacts are in Place:* You need to be sure you have a + pre-built kernel that will boot in QEMU. You also need the target + root filesystem for your target machine's architecture: + + - If you have previously built an image for QEMU (e.g. ``qemux86``, + ``qemuarm``, and so forth), then the artifacts are in place in + your :term:`Build Directory`. + + - If you have not built an image, you can go to the + :yocto_dl:`machines/qemu ` area and download a + pre-built image that matches your architecture and can be run on + QEMU. + + See the ":ref:`sdk-manual/sdk-appendix-obtain:extracting the root filesystem`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual for information on + how to extract a root filesystem. + +4. *Run QEMU:* The basic ``runqemu`` command syntax is as follows: + :: + + $ runqemu [option ] [...] + + Based on what you provide on the command + line, ``runqemu`` does a good job of figuring out what you are trying + to do. For example, by default, QEMU looks for the most recently + built image according to the timestamp when it needs to look for an + image. Minimally, through the use of options, you must provide either + a machine name, a virtual machine image (``*wic.vmdk``), or a kernel + image (``*.bin``). + + Here are some additional examples to help illustrate further QEMU: + + - This example starts QEMU with MACHINE set to "qemux86-64". + Assuming a standard + :term:`Build Directory`, ``runqemu`` + automatically finds the ``bzImage-qemux86-64.bin`` image file and + the ``core-image-minimal-qemux86-64-20200218002850.rootfs.ext4`` + (assuming the current build created a ``core-image-minimal`` + image). + + .. note:: + + When more than one image with the same name exists, QEMU finds + and uses the most recently built image according to the + timestamp. + + :: + + $ runqemu qemux86-64 + + - This example produces the exact same results as the previous + example. This command, however, specifically provides the image + and root filesystem type. + :: + + $ runqemu qemux86-64 core-image-minimal ext4 + + - This example specifies to boot an initial RAM disk image and to + enable audio in QEMU. For this case, ``runqemu`` set the internal + variable ``FSTYPE`` to "cpio.gz". Also, for audio to be enabled, + an appropriate driver must be installed (see the previous + description for the ``audio`` option for more information). + :: + + $ runqemu qemux86-64 ramfs audio + + - This example does not provide enough information for QEMU to + launch. While the command does provide a root filesystem type, it + must also minimally provide a MACHINE, KERNEL, or VM option. + :: + + $ runqemu ext4 + + - This example specifies to boot a virtual machine image + (``.wic.vmdk`` file). From the ``.wic.vmdk``, ``runqemu`` + determines the QEMU architecture (MACHINE) to be "qemux86-64" and + the root filesystem type to be "vmdk". + :: + + $ runqemu /home/scott-lenovo/vm/core-image-minimal-qemux86-64.wic.vmdk + +Switching Between Consoles +========================== + +When booting or running QEMU, you can switch between supported consoles +by using Ctrl+Alt+number. For example, Ctrl+Alt+3 switches you to the +serial console as long as that console is enabled. Being able to switch +consoles is helpful, for example, if the main QEMU console breaks for +some reason. + +.. note:: + + Usually, "2" gets you to the main console and "3" gets you to the + serial console. + +Removing the Splash Screen +========================== + +You can remove the splash screen when QEMU is booting by using Alt+left. +Removing the splash screen allows you to see what is happening in the +background. + +Disabling the Cursor Grab +========================= + +The default QEMU integration captures the cursor within the main window. +It does this since standard mouse devices only provide relative input +and not absolute coordinates. You then have to break out of the grab +using the "Ctrl+Alt" key combination. However, the Yocto Project's +integration of QEMU enables the wacom USB touch pad driver by default to +allow input of absolute coordinates. This default means that the mouse +can enter and leave the main window without the grab taking effect +leading to a better user experience. + +.. _qemu-running-under-a-network-file-system-nfs-server: + +Running Under a Network File System (NFS) Server +================================================ + +One method for running QEMU is to run it on an NFS server. This is +useful when you need to access the same file system from both the build +and the emulated system at the same time. It is also worth noting that +the system does not need root privileges to run. It uses a user space +NFS server to avoid that. Follow these steps to set up for running QEMU +using an NFS server. + +1. *Extract a Root Filesystem:* Once you are able to run QEMU in your + environment, you can use the ``runqemu-extract-sdk`` script, which is + located in the ``scripts`` directory along with the ``runqemu`` + script. + + The ``runqemu-extract-sdk`` takes a root filesystem tarball and + extracts it into a location that you specify. Here is an example that + takes a file system and extracts it to a directory named + ``test-nfs``: + :: + + runqemu-extract-sdk ./tmp/deploy/images/qemux86-64/core-image-sato-qemux86-64.tar.bz2 test-nfs + +2. *Start QEMU:* Once you have extracted the file system, you can run + ``runqemu`` normally with the additional location of the file system. + You can then also make changes to the files within ``./test-nfs`` and + see those changes appear in the image in real time. Here is an + example using the ``qemux86`` image: + :: + + runqemu qemux86-64 ./test-nfs + +.. note:: + + Should you need to start, stop, or restart the NFS share, you can use + the following commands: + + - The following command starts the NFS share: runqemu-export-rootfs + start file-system-location + + - The following command stops the NFS share: runqemu-export-rootfs + stop file-system-location + + - The following command restarts the NFS share: + runqemu-export-rootfs restart file-system-location + +.. _qemu-kvm-cpu-compatibility: + +QEMU CPU Compatibility Under KVM +================================ + +By default, the QEMU build compiles for and targets 64-bit and x86 Intel +Core2 Duo processors and 32-bit x86 Intel Pentium II processors. QEMU +builds for and targets these CPU types because they display a broad +range of CPU feature compatibility with many commonly used CPUs. + +Despite this broad range of compatibility, the CPUs could support a +feature that your host CPU does not support. Although this situation is +not a problem when QEMU uses software emulation of the feature, it can +be a problem when QEMU is running with KVM enabled. Specifically, +software compiled with a certain CPU feature crashes when run on a CPU +under KVM that does not support that feature. To work around this +problem, you can override QEMU's runtime CPU setting by changing the +``QB_CPU_KVM`` variable in ``qemuboot.conf`` in the +:term:`Build Directory` ``deploy/image`` +directory. This setting specifies a ``-cpu`` option passed into QEMU in +the ``runqemu`` script. Running ``qemu -cpu help`` returns a list of +available supported CPU types. + +.. _qemu-dev-performance: + +QEMU Performance +================ + +Using QEMU to emulate your hardware can result in speed issues depending +on the target and host architecture mix. For example, using the +``qemux86`` image in the emulator on an Intel-based 32-bit (x86) host +machine is fast because the target and host architectures match. On the +other hand, using the ``qemuarm`` image on the same Intel-based host can +be slower. But, you still achieve faithful emulation of ARM-specific +issues. + +To speed things up, the QEMU images support using ``distcc`` to call a +cross-compiler outside the emulated system. If you used ``runqemu`` to +start QEMU, and the ``distccd`` application is present on the host +system, any BitBake cross-compiling toolchain available from the build +system is automatically used from within QEMU simply by calling +``distcc``. You can accomplish this by defining the cross-compiler +variable (e.g. ``export CC="distcc"``). Alternatively, if you are using +a suitable SDK image or the appropriate stand-alone toolchain is +present, the toolchain is also automatically used. + +.. note:: + + Several mechanisms exist that let you connect to the system running + on the QEMU emulator: + + - QEMU provides a framebuffer interface that makes standard consoles + available. + + - Generally, headless embedded devices have a serial port. If so, + you can configure the operating system of the running image to use + that port to run a console. The connection uses standard IP + networking. + + - SSH servers exist in some QEMU images. The ``core-image-sato`` + QEMU image has a Dropbear secure shell (SSH) server that runs with + the root password disabled. The ``core-image-full-cmdline`` and + ``core-image-lsb`` QEMU images have OpenSSH instead of Dropbear. + Including these SSH servers allow you to use standard ``ssh`` and + ``scp`` commands. The ``core-image-minimal`` QEMU image, however, + contains no SSH server. + + - You can use a provided, user-space NFS server to boot the QEMU + session using a local copy of the root filesystem on the host. In + order to make this connection, you must extract a root filesystem + tarball by using the ``runqemu-extract-sdk`` command. After + running the command, you must then point the ``runqemu`` script to + the extracted directory instead of a root filesystem image file. + See the "`Running Under a Network File System (NFS) + Server <#qemu-running-under-a-network-file-system-nfs-server>`__" + section for more information. + +.. _qemu-dev-command-line-syntax: + +QEMU Command-Line Syntax +======================== + +The basic ``runqemu`` command syntax is as follows: +:: + + $ runqemu [option ] [...] + +Based on what you provide on the command line, ``runqemu`` does a +good job of figuring out what you are trying to do. For example, by +default, QEMU looks for the most recently built image according to the +timestamp when it needs to look for an image. Minimally, through the use +of options, you must provide either a machine name, a virtual machine +image (``*wic.vmdk``), or a kernel image (``*.bin``). + +Following is the command-line help output for the ``runqemu`` command: +:: + + $ runqemu --help + + Usage: you can run this script with any valid combination + of the following environment variables (in any order): + KERNEL - the kernel image file to use + ROOTFS - the rootfs image file or nfsroot directory to use + MACHINE - the machine name (optional, autodetected from KERNEL filename if unspecified) + Simplified QEMU command-line options can be passed with: + nographic - disable video console + serial - enable a serial console on /dev/ttyS0 + slirp - enable user networking, no root privileges is required + kvm - enable KVM when running x86/x86_64 (VT-capable CPU required) + kvm-vhost - enable KVM with vhost when running x86/x86_64 (VT-capable CPU required) + publicvnc - enable a VNC server open to all hosts + audio - enable audio + [*/]ovmf* - OVMF firmware file or base name for booting with UEFI + tcpserial= - specify tcp serial port number + biosdir= - specify custom bios dir + biosfilename= - specify bios filename + qemuparams= - specify custom parameters to QEMU + bootparams= - specify custom kernel parameters during boot + help, -h, --help: print this text + + Examples: + runqemu + runqemu qemuarm + runqemu tmp/deploy/images/qemuarm + runqemu tmp/deploy/images/qemux86/ + runqemu qemux86-64 core-image-sato ext4 + runqemu qemux86-64 wic-image-minimal wic + runqemu path/to/bzImage-qemux86.bin path/to/nfsrootdir/ serial + runqemu qemux86 iso/hddimg/wic.vmdk/wic.qcow2/wic.vdi/ramfs/cpio.gz... + runqemu qemux86 qemuparams="-m 256" + runqemu qemux86 bootparams="psplash=false" + runqemu path/to/-.wic + runqemu path/to/-.wic.vmdk + +.. _qemu-dev-runqemu-command-line-options: + +``runqemu`` Command-Line Options +================================ + +Following is a description of ``runqemu`` options you can provide on the +command line: + +.. note:: + + If you do provide some "illegal" option combination or perhaps you do + not provide enough in the way of options, + runqemu + provides appropriate error messaging to help you correct the problem. + +- QEMUARCH: The QEMU machine architecture, which must be "qemuarm", + "qemuarm64", "qemumips", "qemumips64", "qemuppc", "qemux86", or + "qemux86-64". + +- ``VM``: The virtual machine image, which must be a ``.wic.vmdk`` + file. Use this option when you want to boot a ``.wic.vmdk`` image. + The image filename you provide must contain one of the following + strings: "qemux86-64", "qemux86", "qemuarm", "qemumips64", + "qemumips", "qemuppc", or "qemush4". + +- ROOTFS: A root filesystem that has one of the following filetype + extensions: "ext2", "ext3", "ext4", "jffs2", "nfs", or "btrfs". If + the filename you provide for this option uses "nfs", it must provide + an explicit root filesystem path. + +- KERNEL: A kernel image, which is a ``.bin`` file. When you provide a + ``.bin`` file, ``runqemu`` detects it and assumes the file is a + kernel image. + +- MACHINE: The architecture of the QEMU machine, which must be one of + the following: "qemux86", "qemux86-64", "qemuarm", "qemuarm64", + "qemumips", "qemumips64", or "qemuppc". The MACHINE and QEMUARCH + options are basically identical. If you do not provide a MACHINE + option, ``runqemu`` tries to determine it based on other options. + +- ``ramfs``: Indicates you are booting an initial RAM disk (initramfs) + image, which means the ``FSTYPE`` is ``cpio.gz``. + +- ``iso``: Indicates you are booting an ISO image, which means the + ``FSTYPE`` is ``.iso``. + +- ``nographic``: Disables the video console, which sets the console to + "ttys0". This option is useful when you have logged into a server and + you do not want to disable forwarding from the X Window System (X11) + to your workstation or laptop. + +- ``serial``: Enables a serial console on ``/dev/ttyS0``. + +- ``biosdir``: Establishes a custom directory for BIOS, VGA BIOS and + keymaps. + +- ``biosfilename``: Establishes a custom BIOS name. + +- ``qemuparams=\"xyz\"``: Specifies custom QEMU parameters. Use this + option to pass options other than the simple "kvm" and "serial" + options. + +- ``bootparams=\"xyz\"``: Specifies custom boot parameters for the + kernel. + +- ``audio``: Enables audio in QEMU. The MACHINE option must be either + "qemux86" or "qemux86-64" in order for audio to be enabled. + Additionally, the ``snd_intel8x0`` or ``snd_ens1370`` driver must be + installed in linux guest. + +- ``slirp``: Enables "slirp" networking, which is a different way of + networking that does not need root access but also is not as easy to + use or comprehensive as the default. + +- ``kvm``: Enables KVM when running "qemux86" or "qemux86-64" QEMU + architectures. For KVM to work, all the following conditions must be + met: + + - Your MACHINE must be either qemux86" or "qemux86-64". + + - Your build host has to have the KVM modules installed, which are + ``/dev/kvm``. + + - The build host ``/dev/kvm`` directory has to be both writable and + readable. + +- ``kvm-vhost``: Enables KVM with VHOST support when running "qemux86" + or "qemux86-64" QEMU architectures. For KVM with VHOST to work, the + following conditions must be met: + + - `kvm <#kvm-cond>`__ option conditions must be met. + + - Your build host has to have virtio net device, which are + ``/dev/vhost-net``. + + - The build host ``/dev/vhost-net`` directory has to be either + readable or writable and "slirp-enabled". + +- ``publicvnc``: Enables a VNC server open to all hosts. diff --git a/poky/documentation/dev-manual/dev-manual-qemu.xml b/poky/documentation/dev-manual/dev-manual-qemu.xml index 46fe67bab..1a526dd2f 100644 --- a/poky/documentation/dev-manual/dev-manual-qemu.xml +++ b/poky/documentation/dev-manual/dev-manual-qemu.xml @@ -106,7 +106,7 @@ You need to be sure you have a pre-built kernel that will boot in QEMU. You also need the target root filesystem for your target - machine’s architecture: + machine's architecture: If you have previously built an image for QEMU @@ -553,7 +553,7 @@ A root filesystem that has one of the following filetype extensions: "ext2", "ext3", "ext4", "jffs2", "nfs", or "btrfs". - If the filename you provide for this option uses “nfs”, it + If the filename you provide for this option uses "nfs", it must provide an explicit root filesystem path. @@ -567,7 +567,7 @@ MACHINE: The architecture of the QEMU machine, which must be one of the following: "qemux86", "qemux86-64", "qemuarm", - "qemuarm64", "qemumips", “qemumips64", or "qemuppc". + "qemuarm64", "qemumips", "qemumips64", or "qemuppc". The MACHINE and QEMUARCH options are basically identical. @@ -674,7 +674,7 @@ qemux86" or "qemux86-64". The build host /dev/vhost-net directory has to be either readable or writable - and “slirp-enabled”. + and "slirp-enabled". diff --git a/poky/documentation/dev-manual/dev-manual-start.rst b/poky/documentation/dev-manual/dev-manual-start.rst new file mode 100644 index 000000000..536d5a9cd --- /dev/null +++ b/poky/documentation/dev-manual/dev-manual-start.rst @@ -0,0 +1,940 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************************** +Setting Up to Use the Yocto Project +*********************************** + +This chapter provides guidance on how to prepare to use the Yocto +Project. You can learn about creating a team environment to develop +using the Yocto Project, how to set up a :ref:`build +host `, how to locate +Yocto Project source repositories, and how to create local Git +repositories. + +.. _usingpoky-changes-collaborate: + +Creating a Team Development Environment +======================================= + +It might not be immediately clear how you can use the Yocto Project in a +team development environment, or how to scale it for a large team of +developers. You can adapt the Yocto Project to many different use cases +and scenarios; however, this flexibility could cause difficulties if you +are trying to create a working setup that scales effectively. + +To help you understand how to set up this type of environment, this +section presents a procedure that gives you information that can help +you get the results you want. The procedure is high-level and presents +some of the project's most successful experiences, practices, solutions, +and available technologies that have proved to work well in the past; +however, keep in mind, the procedure here is simply a starting point. +You can build off these steps and customize the procedure to fit any +particular working environment and set of practices. + +1. *Determine Who is Going to be Developing:* You first need to + understand who is going to be doing anything related to the Yocto + Project and determine their roles. Making this determination is + essential to completing subsequent steps, which are to get your + equipment together and set up your development environment's + hardware topology. + + The following roles exist: + + - *Application Developer:* This type of developer does application + level work on top of an existing software stack. + + - *Core System Developer:* This type of developer works on the + contents of the operating system image itself. + + - *Build Engineer:* This type of developer manages Autobuilders and + releases. Depending on the specifics of the environment, not all + situations might need a Build Engineer. + + - *Test Engineer:* This type of developer creates and manages + automated tests that are used to ensure all application and core + system development meets desired quality standards. + +2. *Gather the Hardware:* Based on the size and make-up of the team, + get the hardware together. Ideally, any development, build, or test + engineer uses a system that runs a supported Linux distribution. + These systems, in general, should be high performance (e.g. dual, + six-core Xeons with 24 Gbytes of RAM and plenty of disk space). You + can help ensure efficiency by having any machines used for testing + or that run Autobuilders be as high performance as possible. + + .. note:: + + Given sufficient processing power, you might also consider + building Yocto Project development containers to be run under + Docker, which is described later. + +3. *Understand the Hardware Topology of the Environment:* Once you + understand the hardware involved and the make-up of the team, you + can understand the hardware topology of the development environment. + You can get a visual idea of the machines and their roles across the + development environment. + +4. *Use Git as Your Source Control Manager (SCM):* Keeping your + :term:`Metadata` (i.e. recipes, + configuration files, classes, and so forth) and any software you are + developing under the control of an SCM system that is compatible + with the OpenEmbedded build system is advisable. Of all of the SCMs + supported by BitBake, the Yocto Project team strongly recommends using + :ref:`overview-manual/overview-manual-development-environment:git`. + Git is a distributed system + that is easy to back up, allows you to work remotely, and then + connects back to the infrastructure. + + .. note:: + + For information about BitBake, see the + BitBake User Manual + . + + It is relatively easy to set up Git services and create + infrastructure like + :yocto_git:`http://git.yoctoproject.org <>`, which is based on + server software called ``gitolite`` with ``cgit`` being used to + generate the web interface that lets you view the repositories. The + ``gitolite`` software identifies users using SSH keys and allows + branch-based access controls to repositories that you can control as + little or as much as necessary. + + .. note:: + + The setup of these services is beyond the scope of this manual. + However, sites such as the following exist that describe how to + perform setup: + + - `Git documentation `__: + Describes how to install ``gitolite`` on the server. + + - `Gitolite `__: Information for + ``gitolite``. + + - `Interfaces, frontends, and + tools `__: + Documentation on how to create interfaces and frontends for + Git. + +5. *Set up the Application Development Machines:* As mentioned earlier, + application developers are creating applications on top of existing + software stacks. Following are some best practices for setting up + machines used for application development: + + - Use a pre-built toolchain that contains the software stack + itself. Then, develop the application code on top of the stack. + This method works well for small numbers of relatively isolated + applications. + + - Keep your cross-development toolchains updated. You can do this + through provisioning either as new toolchain downloads or as + updates through a package update mechanism using ``opkg`` to + provide updates to an existing toolchain. The exact mechanics of + how and when to do this depend on local policy. + + - Use multiple toolchains installed locally into different + locations to allow development across versions. + +6. *Set up the Core Development Machines:* As mentioned earlier, core + developers work on the contents of the operating system itself. + Following are some best practices for setting up machines used for + developing images: + + - Have the :term:`OpenEmbedded Build System` available on + the developer workstations so developers can run their own builds + and directly rebuild the software stack. + + - Keep the core system unchanged as much as possible and do your + work in layers on top of the core system. Doing so gives you a + greater level of portability when upgrading to new versions of + the core system or Board Support Packages (BSPs). + + - Share layers amongst the developers of a particular project and + contain the policy configuration that defines the project. + +7. *Set up an Autobuilder:* Autobuilders are often the core of the + development environment. It is here that changes from individual + developers are brought together and centrally tested. Based on this + automated build and test environment, subsequent decisions about + releases can be made. Autobuilders also allow for "continuous + integration" style testing of software components and regression + identification and tracking. + + See "`Yocto Project + Autobuilder `__" for more + information and links to buildbot. The Yocto Project team has found + this implementation works well in this role. A public example of + this is the Yocto Project Autobuilders, which the Yocto Project team + uses to test the overall health of the project. + + The features of this system are: + + - Highlights when commits break the build. + + - Populates an :ref:`sstate + cache ` from which + developers can pull rather than requiring local builds. + + - Allows commit hook triggers, which trigger builds when commits + are made. + + - Allows triggering of automated image booting and testing under + the QuickEMUlator (QEMU). + + - Supports incremental build testing and from-scratch builds. + + - Shares output that allows developer testing and historical + regression investigation. + + - Creates output that can be used for releases. + + - Allows scheduling of builds so that resources can be used + efficiently. + +8. *Set up Test Machines:* Use a small number of shared, high + performance systems for testing purposes. Developers can use these + systems for wider, more extensive testing while they continue to + develop locally using their primary development system. + +9. *Document Policies and Change Flow:* The Yocto Project uses a + hierarchical structure and a pull model. Scripts exist to create and + send pull requests (i.e. ``create-pull-request`` and + ``send-pull-request``). This model is in line with other open source + projects where maintainers are responsible for specific areas of the + project and a single maintainer handles the final "top-of-tree" + merges. + + .. note:: + + You can also use a more collective push model. The + gitolite + software supports both the push and pull models quite easily. + + As with any development environment, it is important to document the + policy used as well as any main project guidelines so they are + understood by everyone. It is also a good idea to have + well-structured commit messages, which are usually a part of a + project's guidelines. Good commit messages are essential when + looking back in time and trying to understand why changes were made. + + If you discover that changes are needed to the core layer of the + project, it is worth sharing those with the community as soon as + possible. Chances are if you have discovered the need for changes, + someone else in the community needs them also. + +10. *Development Environment Summary:* Aside from the previous steps, + some best practices exist within the Yocto Project development + environment. Consider the following: + + - Use :ref:`overview-manual/overview-manual-development-environment:git` as the source control + system. + + - Maintain your Metadata in layers that make sense for your + situation. See the ":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`" + section in the Yocto Project Overview and Concepts Manual and the + ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" + section for more information on layers. + + - Separate the project's Metadata and code by using separate Git + repositories. See the ":ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`" + section in the Yocto Project Overview and Concepts Manual for + information on these repositories. See the "`Locating Yocto + Project Source Files <#locating-yocto-project-source-files>`__" + section for information on how to set up local Git repositories + for related upstream Yocto Project Git repositories. + + - Set up the directory for the shared state cache + (:term:`SSTATE_DIR`) where + it makes sense. For example, set up the sstate cache on a system + used by developers in the same organization and share the same + source directories on their machines. + + - Set up an Autobuilder and have it populate the sstate cache and + source directories. + + - The Yocto Project community encourages you to send patches to the + project to fix bugs or add features. If you do submit patches, + follow the project commit guidelines for writing good commit + messages. See the + ":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" + section. + + - Send changes to the core sooner than later as others are likely + to run into the same issues. For some guidance on mailing lists + to use, see the list in the + ":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" + section. For a description + of the available mailing lists, see the ":ref:`resources-mailinglist`" section in + the Yocto Project Reference Manual. + +.. _dev-preparing-the-build-host: + +Preparing the Build Host +======================== + +This section provides procedures to set up a system to be used as your +:term:`Build Host` for +development using the Yocto Project. Your build host can be a native +Linux machine (recommended), it can be a machine (Linux, Mac, or +Windows) that uses `CROPS `__, +which leverages `Docker Containers `__ or it +can be a Windows machine capable of running Windows Subsystem For Linux +v2 (WSL). + +.. note:: + + The Yocto Project is not compatible with + Windows Subsystem for Linux v1 + . It is compatible but not officially supported nor validated with + WSLv2. If you still decide to use WSL please upgrade to + WSLv2 + . + +Once your build host is set up to use the Yocto Project, further steps +are necessary depending on what you want to accomplish. See the +following references for information on how to prepare for Board Support +Package (BSP) development and kernel development: + +- *BSP Development:* See the ":ref:`bsp-guide/bsp:preparing your build host to work with bsp layers`" + section in the Yocto Project Board Support Package (BSP) Developer's + Guide. + +- *Kernel Development:* See the ":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`" + section in the Yocto Project Linux Kernel Development Manual. + +Setting Up a Native Linux Host +------------------------------ + +Follow these steps to prepare a native Linux machine as your Yocto +Project Build Host: + +1. *Use a Supported Linux Distribution:* You should have a reasonably + current Linux-based host system. You will have the best results with + a recent release of Fedora, openSUSE, Debian, Ubuntu, RHEL or CentOS + as these releases are frequently tested against the Yocto Project and + officially supported. For a list of the distributions under + validation and their status, see the ":ref:`Supported Linux + Distributions `" + section in the Yocto Project Reference Manual and the wiki page at + :yocto_wiki:`Distribution Support `. + +2. *Have Enough Free Memory:* Your system should have at least 50 Gbytes + of free disk space for building images. + +3. *Meet Minimal Version Requirements:* The OpenEmbedded build system + should be able to run on any modern distribution that has the + following versions for Git, tar, Python and gcc. + + - Git 1.8.3.1 or greater + + - tar 1.28 or greater + + - Python 3.5.0 or greater. + + - gcc 5.0 or greater. + + If your build host does not meet any of these three listed version + requirements, you can take steps to prepare the system so that you + can still use the Yocto Project. See the + ":ref:`ref-manual/ref-system-requirements:required git, tar, python and gcc versions`" + section in the Yocto Project Reference Manual for information. + +4. *Install Development Host Packages:* Required development host + packages vary depending on your build host and what you want to do + with the Yocto Project. Collectively, the number of required packages + is large if you want to be able to cover all cases. + + For lists of required packages for all scenarios, see the + ":ref:`ref-manual/ref-system-requirements:required packages for the build host`" + section in the Yocto Project Reference Manual. + +Once you have completed the previous steps, you are ready to continue +using a given development path on your native Linux machine. If you are +going to use BitBake, see the +":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`" +section. If you are going +to use the Extensible SDK, see the ":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto +Project Application Development and the Extensible Software Development +Kit (eSDK) manual. If you want to work on the kernel, see the :doc:`../kernel-dev/kernel-dev`. If you are going to use +Toaster, see the ":doc:`../toaster-manual/toaster-manual-setup-and-use`" +section in the Toaster User Manual. + +.. _setting-up-to-use-crops: + +Setting Up to Use CROss PlatformS (CROPS) +----------------------------------------- + +With `CROPS `__, which +leverages `Docker Containers `__, you can +create a Yocto Project development environment that is operating system +agnostic. You can set up a container in which you can develop using the +Yocto Project on a Windows, Mac, or Linux machine. + +Follow these general steps to prepare a Windows, Mac, or Linux machine +as your Yocto Project build host: + +1. *Determine What Your Build Host Needs:* + `Docker `__ is a software + container platform that you need to install on the build host. + Depending on your build host, you might have to install different + software to support Docker containers. Go to the Docker installation + page and read about the platform requirements in "`Supported + Platforms `__" + your build host needs to run containers. + +2. *Choose What To Install:* Depending on whether or not your build host + meets system requirements, you need to install "Docker CE Stable" or + the "Docker Toolbox". Most situations call for Docker CE. However, if + you have a build host that does not meet requirements (e.g. + Pre-Windows 10 or Windows 10 "Home" version), you must install Docker + Toolbox instead. + +3. *Go to the Install Site for Your Platform:* Click the link for the + Docker edition associated with your build host's native software. For + example, if your build host is running Microsoft Windows Version 10 + and you want the Docker CE Stable edition, click that link under + "Supported Platforms". + +4. *Install the Software:* Once you have understood all the + pre-requisites, you can download and install the appropriate + software. Follow the instructions for your specific machine and the + type of the software you need to install: + + - Install `Docker CE for + Windows `__ + for Windows build hosts that meet requirements. + + - Install `Docker CE for + Macs `__ + for Mac build hosts that meet requirements. + + - Install `Docker Toolbox for + Windows `__ + for Windows build hosts that do not meet Docker requirements. + + - Install `Docker Toolbox for + MacOS `__ + for Mac build hosts that do not meet Docker requirements. + + - Install `Docker CE for + CentOS `__ + for Linux build hosts running the CentOS distribution. + + - Install `Docker CE for + Debian `__ + for Linux build hosts running the Debian distribution. + + - Install `Docker CE for + Fedora `__ + for Linux build hosts running the Fedora distribution. + + - Install `Docker CE for + Ubuntu `__ + for Linux build hosts running the Ubuntu distribution. + +5. *Optionally Orient Yourself With Docker:* If you are unfamiliar with + Docker and the container concept, you can learn more here - + https://docs.docker.com/get-started/. + +6. *Launch Docker or Docker Toolbox:* You should be able to launch + Docker or the Docker Toolbox and have a terminal shell on your + development host. + +7. *Set Up the Containers to Use the Yocto Project:* Go to + https://github.com/crops/docker-win-mac-docs/wiki and follow + the directions for your particular build host (i.e. Linux, Mac, or + Windows). + + Once you complete the setup instructions for your machine, you have + the Poky, Extensible SDK, and Toaster containers available. You can + click those links from the page and learn more about using each of + those containers. + +Once you have a container set up, everything is in place to develop just +as if you were running on a native Linux machine. If you are going to +use the Poky container, see the "`Cloning the ``poky`` +Repository <#cloning-the-poky-repository>`__" section. If you are going +to use the Extensible SDK container, see the +":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto +Project Application Development and the Extensible Software Development +Kit (eSDK) manual. If you are going to use the Toaster container, see +the ":doc:`../toaster-manual/toaster-manual-setup-and-use`" +section in the Toaster User Manual. + +.. _setting-up-to-use-wsl: + +Setting Up to Use Windows Subsystem For Linux (WSLv2) +----------------------------------------------------- + +With `Windows Subsystem for Linux +(WSLv2) `__, +you can create a Yocto Project development environment that allows you +to build on Windows. You can set up a Linux distribution inside Windows +in which you can develop using the Yocto Project. + +Follow these general steps to prepare a Windows machine using WSLv2 as +your Yocto Project build host: + +1. *Make sure your Windows 10 machine is capable of running WSLv2:* + WSLv2 is only available for Windows 10 builds > 18917. To check which + build version you are running, you may open a command prompt on + Windows and execute the command "ver". + :: + + C:\Users\myuser> ver + + Microsoft Windows [Version 10.0.19041.153] + + If your build is capable of running + WSLv2 you may continue, for more information on this subject or + instructions on how to upgrade to WSLv2 visit `Windows 10 + WSLv2 `__ + +2. *Install the Linux distribution of your choice inside Windows 10:* + Once you know your version of Windows 10 supports WSLv2, you can + install the distribution of your choice from the Microsoft Store. + Open the Microsoft Store and search for Linux. While there are + several Linux distributions available, the assumption is that your + pick will be one of the distributions supported by the Yocto Project + as stated on the instructions for using a native Linux host. After + making your selection, simply click "Get" to download and install the + distribution. + +3. *Check your Linux distribution is using WSLv2:* Open a Windows + PowerShell and run: + :: + + C:\WINDOWS\system32> wsl -l -v + NAME STATE VERSION + *Ubuntu Running 2 + + Note the version column which says the WSL version + being used by your distribution, on compatible systems, this can be + changed back at any point in time. + +4. *Optionally Orient Yourself on WSL:* If you are unfamiliar with WSL, + you can learn more here - + https://docs.microsoft.com/en-us/windows/wsl/wsl2-about. + +5. *Launch your WSL Distibution:* From the Windows start menu simply + launch your WSL distribution just like any other application. + +6. *Optimize your WSLv2 storage often:* Due to the way storage is + handled on WSLv2, the storage space used by the undelying Linux + distribution is not reflected immedately, and since bitbake heavily + uses storage, after several builds, you may be unaware you are + running out of space. WSLv2 uses a VHDX file for storage, this issue + can be easily avoided by manually optimizing this file often, this + can be done in the following way: + + 1. *Find the location of your VHDX file:* First you need to find the + distro app package directory, to achieve this open a Windows + Powershell as Administrator and run: + :: + + C:\WINDOWS\system32> Get-AppxPackage -Name "*Ubuntu*" | Select PackageFamilyName + PackageFamilyName + ----------------- + CanonicalGroupLimited.UbuntuonWindows_79abcdefgh + + + You should now + replace the PackageFamilyName and your user on the following path + to find your VHDX file: + :: + + ls C:\Users\myuser\AppData\Local\Packages\CanonicalGroupLimited.UbuntuonWindows_79abcdefgh\LocalState\ + Mode LastWriteTime Length Name + -a---- 3/14/2020 9:52 PM 57418973184 ext4.vhdx + + Your VHDX file path is: + ``C:\Users\myuser\AppData\Local\Packages\CanonicalGroupLimited.UbuntuonWindows_79abcdefgh\LocalState\ext4.vhdx`` + + 2. *Optimize your VHDX file:* Open a Windows Powershell as + Administrator to optimize your VHDX file, shutting down WSL first: + :: + + C:\WINDOWS\system32> wsl --shutdown + C:\WINDOWS\system32> optimize-vhd -Path C:\Users\myuser\AppData\Local\Packages\CanonicalGroupLimited.UbuntuonWindows_79abcdefgh\LocalState\ext4.vhdx -Mode full + + A progress bar should be shown while optimizing the + VHDX file, and storage should now be reflected correctly on the + Windows Explorer. + +.. note:: + + The current implementation of WSLv2 does not have out-of-the-box + access to external devices such as those connected through a USB + port, but it automatically mounts your + C: + drive on + /mnt/c/ + (and others), which you can use to share deploy artifacts to be later + flashed on hardware through Windows, but your build directory should + not reside inside this mountpoint. + +Once you have WSLv2 set up, everything is in place to develop just as if +you were running on a native Linux machine. If you are going to use the +Extensible SDK container, see the ":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto +Project Application Development and the Extensible Software Development +Kit (eSDK) manual. If you are going to use the Toaster container, see +the ":doc:`../toaster-manual/toaster-manual-setup-and-use`" +section in the Toaster User Manual. + +Locating Yocto Project Source Files +=================================== + +This section shows you how to locate, fetch and configure the source +files you'll need to work with the Yocto Project. + +.. note:: + + - For concepts and introductory information about Git as it is used + in the Yocto Project, see the ":ref:`overview-manual/overview-manual-development-environment:git`" + section in the Yocto Project Overview and Concepts Manual. + + - For concepts on Yocto Project source repositories, see the + ":ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`" + section in the Yocto Project Overview and Concepts Manual." + +Accessing Source Repositories +----------------------------- + +Working from a copy of the upstream :ref:`dev-manual/dev-manual-start:accessing source repositories` is the +preferred method for obtaining and using a Yocto Project release. You +can view the Yocto Project Source Repositories at +:yocto_git:`/`. In particular, you can find the ``poky`` +repository at :yocto_git:`/cgit.cgi/poky`. + +Use the following procedure to locate the latest upstream copy of the +``poky`` Git repository: + +1. *Access Repositories:* Open a browser and go to + :yocto_git:`/` to access the GUI-based interface into the + Yocto Project source repositories. + +2. *Select the Repository:* Click on the repository in which you are + interested (e.g. ``poky``). + +3. *Find the URL Used to Clone the Repository:* At the bottom of the + page, note the URL used to clone that repository + (e.g. :yocto_git:`/git/poky`). + + .. note:: + + For information on cloning a repository, see the " + Cloning the + poky + Repository + " section. + +Accessing Index of Releases +--------------------------- + +Yocto Project maintains an Index of Releases area that contains related +files that contribute to the Yocto Project. Rather than Git +repositories, these files are tarballs that represent snapshots in time +of a given component. + +.. note:: + + The recommended method for accessing Yocto Project components is to + use Git to clone the upstream repository and work from within that + locally cloned repository. The procedure in this section exists + should you desire a tarball snapshot of any given component. + +Follow these steps to locate and download a particular tarball: + +1. *Access the Index of Releases:* Open a browser and go to + :yocto_dl:`Index of Releases `. The + list represents released components (e.g. ``bitbake``, ``sato``, and + so on). + + .. note:: + + The + yocto + directory contains the full array of released Poky tarballs. The + poky + directory in the Index of Releases was historically used for very + early releases and exists now only for retroactive completeness. + +2. *Select a Component:* Click on any released component in which you + are interested (e.g. ``yocto``). + +3. *Find the Tarball:* Drill down to find the associated tarball. For + example, click on ``yocto-&DISTRO;`` to view files associated with the + Yocto Project &DISTRO; release (e.g. + ``&YOCTO_POKY;.tar.bz2``, which is the + released Poky tarball). + +4. *Download the Tarball:* Click the tarball to download and save a + snapshot of the given component. + +Using the Downloads Page +------------------------ + +The :yocto_home:`Yocto Project Website <>` uses a "DOWNLOADS" page +from which you can locate and download tarballs of any Yocto Project +release. Rather than Git repositories, these files represent snapshot +tarballs similar to the tarballs located in the Index of Releases +described in the "`Accessing Index of +Releases <#accessing-index-of-releases>`__" section. + +.. note:: + + The recommended method for accessing Yocto Project components is to + use Git to clone a repository and work from within that local + repository. The procedure in this section exists should you desire a + tarball snapshot of any given component. + +1. *Go to the Yocto Project Website:* Open The + :yocto_home:`Yocto Project Website <>` in your browser. + +2. *Get to the Downloads Area:* Select the "DOWNLOADS" item from the + pull-down "SOFTWARE" tab menu near the top of the page. + +3. *Select a Yocto Project Release:* Use the menu next to "RELEASE" to + display and choose a recent or past supported Yocto Project release + (e.g. &DISTRO_NAME_NO_CAP;, &DISTRO_NAME_NO_CAP_MINUS_ONE;, and so forth). + + .. note:: + + For a "map" of Yocto Project releases to version numbers, see the + Releases + wiki page. + + You can use the "RELEASE ARCHIVE" link to reveal a menu of all Yocto + Project releases. + +4. *Download Tools or Board Support Packages (BSPs):* From the + "DOWNLOADS" page, you can download tools or BSPs as well. Just scroll + down the page and look for what you need. + +Accessing Nightly Builds +------------------------ + +Yocto Project maintains an area for nightly builds that contains tarball +releases at https://autobuilder.yocto.io//pub/nightly/. These builds include Yocto +Project releases ("poky"), toolchains, and builds for supported +machines. + +Should you ever want to access a nightly build of a particular Yocto +Project component, use the following procedure: + +1. *Locate the Index of Nightly Builds:* Open a browser and go to + https://autobuilder.yocto.io//pub/nightly/ to access the Nightly Builds. + +2. *Select a Date:* Click on the date in which you are interested. If + you want the latest builds, use "CURRENT". + +3. *Select a Build:* Choose the area in which you are interested. For + example, if you are looking for the most recent toolchains, select + the "toolchain" link. + +4. *Find the Tarball:* Drill down to find the associated tarball. + +5. *Download the Tarball:* Click the tarball to download and save a + snapshot of the given component. + +Cloning and Checking Out Branches +================================= + +To use the Yocto Project for development, you need a release locally +installed on your development system. This locally installed set of +files is referred to as the :term:`Source Directory` +in the Yocto Project documentation. + +The preferred method of creating your Source Directory is by using +:ref:`overview-manual/overview-manual-development-environment:git` to clone a local copy of the upstream +``poky`` repository. Working from a cloned copy of the upstream +repository allows you to contribute back into the Yocto Project or to +simply work with the latest software on a development branch. Because +Git maintains and creates an upstream repository with a complete history +of changes and you are working with a local clone of that repository, +you have access to all the Yocto Project development branches and tag +names used in the upstream repository. + +Cloning the ``poky`` Repository +------------------------------- + +Follow these steps to create a local version of the upstream +:term:`Poky` Git repository. + +1. *Set Your Directory:* Change your working directory to where you want + to create your local copy of ``poky``. + +2. *Clone the Repository:* The following example command clones the + ``poky`` repository and uses the default name "poky" for your local + repository: + :: + + $ git clone git://git.yoctoproject.org/poky + Cloning into 'poky'... + remote: Counting objects: 432160, done. + remote: Compressing objects: 100% (102056/102056), done. + remote: Total 432160 (delta 323116), reused 432037 (delta 323000) + Receiving objects: 100% (432160/432160), 153.81 MiB | 8.54 MiB/s, done. + Resolving deltas: 100% (323116/323116), done. + Checking connectivity... done. + + Unless you + specify a specific development branch or tag name, Git clones the + "master" branch, which results in a snapshot of the latest + development changes for "master". For information on how to check out + a specific development branch or on how to check out a local branch + based on a tag name, see the "`Checking Out By Branch in + Poky <#checking-out-by-branch-in-poky>`__" and `Checking Out By Tag + in Poky <#checkout-out-by-tag-in-poky>`__" sections, respectively. + + Once the local repository is created, you can change to that + directory and check its status. Here, the single "master" branch + exists on your system and by default, it is checked out: + :: + + $ cd ~/poky + $ git status + On branch master + Your branch is up-to-date with 'origin/master'. + nothing to commit, working directory clean + $ git branch + * master + + Your local repository of poky is identical to the + upstream poky repository at the time from which it was cloned. As you + work with the local branch, you can periodically use the + ``git pull --rebase`` command to be sure you are up-to-date + with the upstream branch. + +Checking Out by Branch in Poky +------------------------------ + +When you clone the upstream poky repository, you have access to all its +development branches. Each development branch in a repository is unique +as it forks off the "master" branch. To see and use the files of a +particular development branch locally, you need to know the branch name +and then specifically check out that development branch. + +.. note:: + + Checking out an active development branch by branch name gives you a + snapshot of that particular branch at the time you check it out. + Further development on top of the branch that occurs after check it + out can occur. + +1. *Switch to the Poky Directory:* If you have a local poky Git + repository, switch to that directory. If you do not have the local + copy of poky, see the "`Cloning the ``poky`` + Repository <#cloning-the-poky-repository>`__" section. + +2. *Determine Existing Branch Names:* + :: + + $ git branch -a + * master + remotes/origin/1.1_M1 + remotes/origin/1.1_M2 + remotes/origin/1.1_M3 + remotes/origin/1.1_M4 + remotes/origin/1.2_M1 + remotes/origin/1.2_M2 + remotes/origin/1.2_M3 + . . . + remotes/origin/thud + remotes/origin/thud-next + remotes/origin/warrior + remotes/origin/warrior-next + remotes/origin/zeus + remotes/origin/zeus-next + ... and so on ... + +3. *Check out the Branch:* Check out the development branch in which you + want to work. For example, to access the files for the Yocto Project + &DISTRO; Release (&DISTRO_NAME;), use the following command: + :: + + $ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME; + Branch &DISTRO_NAME; set up to track remote branch &DISTRO_NAME; from origin. + Switched to a new branch '&DISTRO_NAME;' + + The previous command checks out the "&DISTRO_NAME;" development + branch and reports that the branch is tracking the upstream + "origin/&DISTRO_NAME;" branch. + + The following command displays the branches that are now part of your + local poky repository. The asterisk character indicates the branch + that is currently checked out for work: + :: + + $ git branch + master * + &DISTRO_NAME; + +.. _checkout-out-by-tag-in-poky: + +Checking Out by Tag in Poky +--------------------------- + +Similar to branches, the upstream repository uses tags to mark specific +commits associated with significant points in a development branch (i.e. +a release point or stage of a release). You might want to set up a local +branch based on one of those points in the repository. The process is +similar to checking out by branch name except you use tag names. + +.. note:: + + Checking out a branch based on a tag gives you a stable set of files + not affected by development on the branch above the tag. + +1. *Switch to the Poky Directory:* If you have a local poky Git + repository, switch to that directory. If you do not have the local + copy of poky, see the "`Cloning the ``poky`` + Repository <#cloning-the-poky-repository>`__" section. + +2. *Fetch the Tag Names:* To checkout the branch based on a tag name, + you need to fetch the upstream tags into your local repository: + :: + + $ git fetch --tags + $ + +3. *List the Tag Names:* You can list the tag names now: + :: + + $ git tag + 1.1_M1.final + 1.1_M1.rc1 + 1.1_M1.rc2 + 1.1_M2.final + 1.1_M2.rc1 + . + . + . + yocto-2.5 + yocto-2.5.1 + yocto-2.5.2 + yocto-2.5.3 + yocto-2.6 + yocto-2.6.1 + yocto-2.6.2 + yocto-2.7 + yocto_1.5_M5.rc8 + + +4. *Check out the Branch:* + :: + + $ git checkout tags/yocto-&DISTRO; -b my_yocto_&DISTRO; + Switched to a new branch 'my_yocto_&DISTRO;' + $ git branch + master + * my_yocto_&DISTRO; + + The previous command creates and + checks out a local branch named "my_yocto_&DISTRO;", which is based on + the commit in the upstream poky repository that has the same tag. In + this example, the files you have available locally as a result of the + ``checkout`` command are a snapshot of the "&DISTRO_NAME_NO_CAP;" + development branch at the point where Yocto Project &DISTRO; was + released. diff --git a/poky/documentation/dev-manual/dev-manual.rst b/poky/documentation/dev-manual/dev-manual.rst new file mode 100644 index 000000000..c62906715 --- /dev/null +++ b/poky/documentation/dev-manual/dev-manual.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +====================================== +Yocto Project Development Tasks Manual +====================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + dev-manual-intro + dev-manual-start + dev-manual-common-tasks + dev-manual-qemu + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/dev-manual/history.rst b/poky/documentation/dev-manual/history.rst new file mode 100644 index 000000000..8b149a6ef --- /dev/null +++ b/poky/documentation/dev-manual/history.rst @@ -0,0 +1,67 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 1.1 + - October 2011 + - The initial document released with the Yocto Project 1.1 Release + * - 1.2 + - April 2012 + - Released with the Yocto Project 1.2 Release. + * - 1.3 + - October 2012 + - Released with the Yocto Project 1.3 Release. + * - 1.4 + - April 2013 + - Released with the Yocto Project 1.4 Release. + * - 1.5 + - October 2013 + - Released with the Yocto Project 1.5 Release. + * - 1.6 + - April 2014 + - Released with the Yocto Project 1.6 Release. + * - 1.7 + - October 2014 + - Released with the Yocto Project 1.7 Release. + * - 1.8 + - April 2015 + - Released with the Yocto Project 1.8 Release. + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/figures/yp-how-it-works-new-diagram.png b/poky/documentation/figures/yp-how-it-works-new-diagram.png new file mode 100644 index 000000000..2ce076f3c Binary files /dev/null and b/poky/documentation/figures/yp-how-it-works-new-diagram.png differ diff --git a/poky/documentation/genindex.rst b/poky/documentation/genindex.rst new file mode 100644 index 000000000..a4af06f65 --- /dev/null +++ b/poky/documentation/genindex.rst @@ -0,0 +1,3 @@ +===== +Index +===== diff --git a/poky/documentation/index.rst b/poky/documentation/index.rst new file mode 100644 index 000000000..258ecb81a --- /dev/null +++ b/poky/documentation/index.rst @@ -0,0 +1,53 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +.. The Yocto Project documentation master file, created by + sphinx-quickstart on Mon Apr 13 09:38:33 2020. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to The Yocto Project's documentation! +============================================= + +| + +.. toctree:: + :maxdepth: 1 + :caption: Introduction and Overview + + Quick Build + what-i-wish-id-known + transitioning-to-a-custom-environment + Yocto Project Software Overview + Tips and Tricks Wiki + + +.. toctree:: + :maxdepth: 1 + :caption: Manuals + + Overview and Concepts Manual + Reference Manual + Board Support Package (BSP) Developer's guide + Development Tasks Manual + Linux Kernel Development Manual + Profile and Tracing Manual + Application Development and the Extensible SDK (eSDK) + Toaster Manual + Test Environment Manual + Bitbake User Manual + +.. toctree:: + :maxdepth: 1 + :caption: 'Mega' Manual + + All-in-one 'Mega' Manual + +.. toctree:: + :maxdepth: 1 + :caption: Manuals/Variable Index + + genindex + Current/Previous Version Specific Manuals + + + diff --git a/poky/documentation/kernel-dev/history.rst b/poky/documentation/kernel-dev/history.rst new file mode 100644 index 000000000..3ffb7eacb --- /dev/null +++ b/poky/documentation/kernel-dev/history.rst @@ -0,0 +1,58 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 1.4 + - April 2013 + - The initial document released with the Yocto Project 1.4 Release + * - 1.5 + - October 2013 + - Released with the Yocto Project 1.5 Release. + * - 1.6 + - April 2014 + - Released with the Yocto Project 1.6 Release. + * - 1.7 + - October 2014 + - Released with the Yocto Project 1.7 Release. + * - 1.8 + - April 2015 + - Released with the Yocto Project 1.8 Release. + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/kernel-dev/kernel-dev-advanced.rst b/poky/documentation/kernel-dev/kernel-dev-advanced.rst new file mode 100644 index 000000000..36133caae --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-advanced.rst @@ -0,0 +1,983 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************************************************* +Working with Advanced Metadata (``yocto-kernel-cache``) +******************************************************* + +.. _kernel-dev-advanced-overview: + +Overview +======== + +In addition to supporting configuration fragments and patches, the Yocto +Project kernel tools also support rich +:term:`Metadata` that you can use to define +complex policies and Board Support Package (BSP) support. The purpose of +the Metadata and the tools that manage it is to help you manage the +complexity of the configuration and sources used to support multiple +BSPs and Linux kernel types. + +Kernel Metadata exists in many places. One area in the +:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories` +is the ``yocto-kernel-cache`` Git repository. You can find this repository +grouped under the "Yocto Linux Kernel" heading in the +:yocto_git:`Yocto Project Source Repositories <>`. + +Kernel development tools ("kern-tools") exist also in the Yocto Project +Source Repositories under the "Yocto Linux Kernel" heading in the +``yocto-kernel-tools`` Git repository. The recipe that builds these +tools is ``meta/recipes-kernel/kern-tools/kern-tools-native_git.bb`` in +the :term:`Source Directory` (e.g. +``poky``). + +Using Kernel Metadata in a Recipe +================================= + +As mentioned in the introduction, the Yocto Project contains kernel +Metadata, which is located in the ``yocto-kernel-cache`` Git repository. +This Metadata defines Board Support Packages (BSPs) that correspond to +definitions in linux-yocto recipes for corresponding BSPs. A BSP +consists of an aggregation of kernel policy and enabled +hardware-specific features. The BSP can be influenced from within the +linux-yocto recipe. + +.. note:: + + A Linux kernel recipe that contains kernel Metadata (e.g. inherits + from the + linux-yocto.inc + file) is said to be a "linux-yocto style" recipe. + +Every linux-yocto style recipe must define the +:term:`KMACHINE` variable. This +variable is typically set to the same value as the ``MACHINE`` variable, +which is used by :term:`BitBake`. +However, in some cases, the variable might instead refer to the +underlying platform of the ``MACHINE``. + +Multiple BSPs can reuse the same ``KMACHINE`` name if they are built +using the same BSP description. Multiple Corei7-based BSPs could share +the same "intel-corei7-64" value for ``KMACHINE``. It is important to +realize that ``KMACHINE`` is just for kernel mapping, while ``MACHINE`` +is the machine type within a BSP Layer. Even with this distinction, +however, these two variables can hold the same value. See the `BSP +Descriptions <#bsp-descriptions>`__ section for more information. + +Every linux-yocto style recipe must also indicate the Linux kernel +source repository branch used to build the Linux kernel. The +:term:`KBRANCH` variable must be set +to indicate the branch. + +.. note:: + + You can use the + KBRANCH + value to define an alternate branch typically with a machine override + as shown here from the + meta-yocto-bsp + layer: + :: + + KBRANCH_edgerouter = "standard/edgerouter" + + +The linux-yocto style recipes can optionally define the following +variables: + + - :term:`KERNEL_FEATURES` + + - :term:`LINUX_KERNEL_TYPE` + +:term:`LINUX_KERNEL_TYPE` +defines the kernel type to be used in assembling the configuration. If +you do not specify a ``LINUX_KERNEL_TYPE``, it defaults to "standard". +Together with ``KMACHINE``, ``LINUX_KERNEL_TYPE`` defines the search +arguments used by the kernel tools to find the appropriate description +within the kernel Metadata with which to build out the sources and +configuration. The linux-yocto recipes define "standard", "tiny", and +"preempt-rt" kernel types. See the "`Kernel Types <#kernel-types>`__" +section for more information on kernel types. + +During the build, the kern-tools search for the BSP description file +that most closely matches the ``KMACHINE`` and ``LINUX_KERNEL_TYPE`` +variables passed in from the recipe. The tools use the first BSP +description it finds that match both variables. If the tools cannot find +a match, they issue a warning. + +The tools first search for the ``KMACHINE`` and then for the +``LINUX_KERNEL_TYPE``. If the tools cannot find a partial match, they +will use the sources from the ``KBRANCH`` and any configuration +specified in the :term:`SRC_URI`. + +You can use the +:term:`KERNEL_FEATURES` +variable to include features (configuration fragments, patches, or both) +that are not already included by the ``KMACHINE`` and +``LINUX_KERNEL_TYPE`` variable combination. For example, to include a +feature specified as "features/netfilter/netfilter.scc", specify: +:: + + KERNEL_FEATURES += "features/netfilter/netfilter.scc" + +To include a +feature called "cfg/sound.scc" just for the ``qemux86`` machine, +specify: +:: + + KERNEL_FEATURES_append_qemux86 = " cfg/sound.scc" + +The value of +the entries in ``KERNEL_FEATURES`` are dependent on their location +within the kernel Metadata itself. The examples here are taken from the +``yocto-kernel-cache`` repository. Each branch of this repository +contains "features" and "cfg" subdirectories at the top-level. For more +information, see the "`Kernel Metadata +Syntax <#kernel-metadata-syntax>`__" section. + +Kernel Metadata Syntax +====================== + +The kernel Metadata consists of three primary types of files: ``scc`` +[1]_ description files, configuration fragments, and patches. The +``scc`` files define variables and include or otherwise reference any of +the three file types. The description files are used to aggregate all +types of kernel Metadata into what ultimately describes the sources and +the configuration required to build a Linux kernel tailored to a +specific machine. + +The ``scc`` description files are used to define two fundamental types +of kernel Metadata: + +- Features + +- Board Support Packages (BSPs) + +Features aggregate sources in the form of patches and configuration +fragments into a modular reusable unit. You can use features to +implement conceptually separate kernel Metadata descriptions such as +pure configuration fragments, simple patches, complex features, and +kernel types. `Kernel types <#kernel-types>`__ define general kernel +features and policy to be reused in the BSPs. + +BSPs define hardware-specific features and aggregate them with kernel +types to form the final description of what will be assembled and built. + +While the kernel Metadata syntax does not enforce any logical separation +of configuration fragments, patches, features or kernel types, best +practices dictate a logical separation of these types of Metadata. The +following Metadata file hierarchy is recommended: +:: + + base/ + bsp/ + cfg/ + features/ + ktypes/ + patches/ + +The ``bsp`` directory contains the `BSP +descriptions <#bsp-descriptions>`__. The remaining directories all +contain "features". Separating ``bsp`` from the rest of the structure +aids conceptualizing intended usage. + +Use these guidelines to help place your ``scc`` description files within +the structure: + +- If your file contains only configuration fragments, place the file in + the ``cfg`` directory. + +- If your file contains only source-code fixes, place the file in the + ``patches`` directory. + +- If your file encapsulates a major feature, often combining sources + and configurations, place the file in ``features`` directory. + +- If your file aggregates non-hardware configuration and patches in + order to define a base kernel policy or major kernel type to be + reused across multiple BSPs, place the file in ``ktypes`` directory. + +These distinctions can easily become blurred - especially as out-of-tree +features slowly merge upstream over time. Also, remember that how the +description files are placed is a purely logical organization and has no +impact on the functionality of the kernel Metadata. There is no impact +because all of ``cfg``, ``features``, ``patches``, and ``ktypes``, +contain "features" as far as the kernel tools are concerned. + +Paths used in kernel Metadata files are relative to base, which is +either +:term:`FILESEXTRAPATHS` if +you are creating Metadata in `recipe-space <#recipe-space-metadata>`__, +or the top level of +:yocto_git:`yocto-kernel-cache ` +if you are creating `Metadata outside of the +recipe-space <#metadata-outside-the-recipe-space>`__. + +.. [1] + ``scc`` stands for Series Configuration Control, but the naming has + less significance in the current implementation of the tooling than + it had in the past. Consider ``scc`` files to be description files. + +Configuration +------------- + +The simplest unit of kernel Metadata is the configuration-only feature. +This feature consists of one or more Linux kernel configuration +parameters in a configuration fragment file (``.cfg``) and a ``.scc`` +file that describes the fragment. + +As an example, consider the Symmetric Multi-Processing (SMP) fragment +used with the ``linux-yocto-4.12`` kernel as defined outside of the +recipe space (i.e. ``yocto-kernel-cache``). This Metadata consists of +two files: ``smp.scc`` and ``smp.cfg``. You can find these files in the +``cfg`` directory of the ``yocto-4.12`` branch in the +``yocto-kernel-cache`` Git repository: +:: + + cfg/smp.scc: + define KFEATURE_DESCRIPTION "Enable SMP for 32 bit builds" + define KFEATURE_COMPATIBILITY all + + kconf hardware smp.cfg + + cfg/smp.cfg: + CONFIG_SMP=y + CONFIG_SCHED_SMT=y + # Increase default NR_CPUS from 8 to 64 so that platform with + # more than 8 processors can be all activated at boot time + CONFIG_NR_CPUS=64 + # The following is needed when setting NR_CPUS to something + # greater than 8 on x86 architectures, it should be automatically + # disregarded by Kconfig when using a different arch + CONFIG_X86_BIGSMP=y + +You can find general information on configuration +fragment files in the "`Creating Configuration +Fragments <#creating-config-fragments>`__" section. + +Within the ``smp.scc`` file, the +:term:`KFEATURE_DESCRIPTION` +statement provides a short description of the fragment. Higher level +kernel tools use this description. + +Also within the ``smp.scc`` file, the ``kconf`` command includes the +actual configuration fragment in an ``.scc`` file, and the "hardware" +keyword identifies the fragment as being hardware enabling, as opposed +to general policy, which would use the "non-hardware" keyword. The +distinction is made for the benefit of the configuration validation +tools, which warn you if a hardware fragment overrides a policy set by a +non-hardware fragment. + +.. note:: + + The description file can include multiple + kconf + statements, one per fragment. + +As described in the "`Validating +Configuration <#validating-configuration>`__" section, you can use the +following BitBake command to audit your configuration: +:: + + $ bitbake linux-yocto -c kernel_configcheck -f + +Patches +------- + +Patch descriptions are very similar to configuration fragment +descriptions, which are described in the previous section. However, +instead of a ``.cfg`` file, these descriptions work with source patches +(i.e. ``.patch`` files). + +A typical patch includes a description file and the patch itself. As an +example, consider the build patches used with the ``linux-yocto-4.12`` +kernel as defined outside of the recipe space (i.e. +``yocto-kernel-cache``). This Metadata consists of several files: +``build.scc`` and a set of ``*.patch`` files. You can find these files +in the ``patches/build`` directory of the ``yocto-4.12`` branch in the +``yocto-kernel-cache`` Git repository. + +The following listings show the ``build.scc`` file and part of the +``modpost-mask-trivial-warnings.patch`` file: +:: + + patches/build/build.scc: + patch arm-serialize-build-targets.patch + patch powerpc-serialize-image-targets.patch + patch kbuild-exclude-meta-directory-from-distclean-processi.patch + + # applied by kgit + # patch kbuild-add-meta-files-to-the-ignore-li.patch + + patch modpost-mask-trivial-warnings.patch + patch menuconfig-check-lxdiaglog.sh-Allow-specification-of.patch + + patches/build/modpost-mask-trivial-warnings.patch: + From bd48931bc142bdd104668f3a062a1f22600aae61 Mon Sep 17 00:00:00 2001 + From: Paul Gortmaker + Date: Sun, 25 Jan 2009 17:58:09 -0500 + Subject: [PATCH] modpost: mask trivial warnings + + Newer HOSTCC will complain about various stdio fcns because + . + . + . + char *dump_write = NULL, *files_source = NULL; + int opt; + -- + 2.10.1 + + generated by cgit v0.10.2 at 2017-09-28 15:23:23 (GMT) + +The description file can +include multiple patch statements where each statement handles a single +patch. In the example ``build.scc`` file, five patch statements exist +for the five patches in the directory. + +You can create a typical ``.patch`` file using ``diff -Nurp`` or +``git format-patch`` commands. For information on how to create patches, +see the "`Using ``devtool`` to Patch the +Kernel <#using-devtool-to-patch-the-kernel>`__" and "`Using Traditional +Kernel Development to Patch the +Kernel <#using-traditional-kernel-development-to-patch-the-kernel>`__" +sections. + +Features +-------- + +Features are complex kernel Metadata types that consist of configuration +fragments, patches, and possibly other feature description files. As an +example, consider the following generic listing: +:: + + features/myfeature.scc + define KFEATURE_DESCRIPTION "Enable myfeature" + + patch 0001-myfeature-core.patch + patch 0002-myfeature-interface.patch + + include cfg/myfeature_dependency.scc + kconf non-hardware myfeature.cfg + +This example shows how the ``patch`` and ``kconf`` commands are used as well +as how an additional feature description file is included with the +``include`` command. + +Typically, features are less granular than configuration fragments and +are more likely than configuration fragments and patches to be the types +of things you want to specify in the ``KERNEL_FEATURES`` variable of the +Linux kernel recipe. See the "`Using Kernel Metadata in a +Recipe <#using-kernel-metadata-in-a-recipe>`__" section earlier in the +manual. + +Kernel Types +------------ + +A kernel type defines a high-level kernel policy by aggregating +non-hardware configuration fragments with patches you want to use when +building a Linux kernel of a specific type (e.g. a real-time kernel). +Syntactically, kernel types are no different than features as described +in the "`Features <#features>`__" section. The +:term:`LINUX_KERNEL_TYPE` +variable in the kernel recipe selects the kernel type. For example, in +the ``linux-yocto_4.12.bb`` kernel recipe found in +``poky/meta/recipes-kernel/linux``, a +:ref:`require ` directive +includes the ``poky/meta/recipes-kernel/linux/linux-yocto.inc`` file, +which has the following statement that defines the default kernel type: +:: + + LINUX_KERNEL_TYPE ??= "standard" + +Another example would be the real-time kernel (i.e. +``linux-yocto-rt_4.12.bb``). This kernel recipe directly sets the kernel +type as follows: +:: + + LINUX_KERNEL_TYPE = "preempt-rt" + +.. note:: + + You can find kernel recipes in the + meta/recipes-kernel/linux + directory of the + Source Directory + (e.g. + poky/meta/recipes-kernel/linux/linux-yocto_4.12.bb + ). See the " + Using Kernel Metadata in a Recipe + " section for more information. + +Three kernel types ("standard", "tiny", and "preempt-rt") are supported +for Linux Yocto kernels: + +- "standard": Includes the generic Linux kernel policy of the Yocto + Project linux-yocto kernel recipes. This policy includes, among other + things, which file systems, networking options, core kernel features, + and debugging and tracing options are supported. + +- "preempt-rt": Applies the ``PREEMPT_RT`` patches and the + configuration options required to build a real-time Linux kernel. + This kernel type inherits from the "standard" kernel type. + +- "tiny": Defines a bare minimum configuration meant to serve as a base + for very small Linux kernels. The "tiny" kernel type is independent + from the "standard" configuration. Although the "tiny" kernel type + does not currently include any source changes, it might in the + future. + +For any given kernel type, the Metadata is defined by the ``.scc`` (e.g. +``standard.scc``). Here is a partial listing for the ``standard.scc`` +file, which is found in the ``ktypes/standard`` directory of the +``yocto-kernel-cache`` Git repository: +:: + + # Include this kernel type fragment to get the standard features and + # configuration values. + + # Note: if only the features are desired, but not the configuration + # then this should be included as: + # include ktypes/standard/standard.scc nocfg + # if no chained configuration is desired, include it as: + # include ktypes/standard/standard.scc nocfg inherit + + + + include ktypes/base/base.scc + branch standard + + kconf non-hardware standard.cfg + + include features/kgdb/kgdb.scc + . + . + . + + include cfg/net/ip6_nf.scc + include cfg/net/bridge.scc + + include cfg/systemd.scc + + include features/rfkill/rfkill.scc + +As with any ``.scc`` file, a kernel type definition can aggregate other +``.scc`` files with ``include`` commands. These definitions can also +directly pull in configuration fragments and patches with the ``kconf`` +and ``patch`` commands, respectively. + +.. note:: + + It is not strictly necessary to create a kernel type + .scc + file. The Board Support Package (BSP) file can implicitly define the + kernel type using a + define + KTYPE + myktype + line. See the " + BSP Descriptions + " section for more information. + +BSP Descriptions +---------------- + +BSP descriptions (i.e. ``*.scc`` files) combine kernel types with +hardware-specific features. The hardware-specific Metadata is typically +defined independently in the BSP layer, and then aggregated with each +supported kernel type. + +.. note:: + + For BSPs supported by the Yocto Project, the BSP description files + are located in the + bsp + directory of the + yocto-kernel-cache + repository organized under the "Yocto Linux Kernel" heading in the + Yocto Project Source Repositories + . + +This section overviews the BSP description structure, the aggregation +concepts, and presents a detailed example using a BSP supported by the +Yocto Project (i.e. BeagleBone Board). For complete information on BSP +layer file hierarchy, see the :doc:`../bsp-guide/bsp-guide`. + +.. _bsp-description-file-overview: + +Description Overview +~~~~~~~~~~~~~~~~~~~~ + +For simplicity, consider the following root BSP layer description files +for the BeagleBone board. These files employ both a structure and naming +convention for consistency. The naming convention for the file is as +follows: +:: + + bsp_root_name-kernel_type.scc + +Here are some example root layer +BSP filenames for the BeagleBone Board BSP, which is supported by the +Yocto Project: +:: + + beaglebone-standard.scc + beaglebone-preempt-rt.scc + +Each file uses the root name (i.e "beaglebone") BSP name followed by the +kernel type. + +Examine the ``beaglebone-standard.scc`` file: +:: + + define KMACHINE beaglebone + define KTYPE standard + define KARCH arm + + include ktypes/standard/standard.scc + branch beaglebone + + include beaglebone.scc + + # default policy for standard kernels + include features/latencytop/latencytop.scc + include features/profiling/profiling.scc + +Every top-level BSP description file +should define the :term:`KMACHINE`, +:term:`KTYPE`, and +:term:`KARCH` variables. These +variables allow the OpenEmbedded build system to identify the +description as meeting the criteria set by the recipe being built. This +example supports the "beaglebone" machine for the "standard" kernel and +the "arm" architecture. + +Be aware that a hard link between the ``KTYPE`` variable and a kernel +type description file does not exist. Thus, if you do not have the +kernel type defined in your kernel Metadata as it is here, you only need +to ensure that the +:term:`LINUX_KERNEL_TYPE` +variable in the kernel recipe and the ``KTYPE`` variable in the BSP +description file match. + +To separate your kernel policy from your hardware configuration, you +include a kernel type (``ktype``), such as "standard". In the previous +example, this is done using the following: +:: + + include ktypes/standard/standard.scc + +This file aggregates all the configuration +fragments, patches, and features that make up your standard kernel +policy. See the "`Kernel Types <#kernel-types>`__" section for more +information. + +To aggregate common configurations and features specific to the kernel +for mybsp, use the following: +:: + + include mybsp.scc + +You can see that in the BeagleBone example with the following: +:: + + include beaglebone.scc + +For information on how to break a complete ``.config`` file into the various +configuration fragments, see the "`Creating Configuration +Fragments <#creating-config-fragments>`__" section. + +Finally, if you have any configurations specific to the hardware that +are not in a ``*.scc`` file, you can include them as follows: +:: + + kconf hardware mybsp-extra.cfg + +The BeagleBone example does not include these +types of configurations. However, the Malta 32-bit board does +("mti-malta32"). Here is the ``mti-malta32-le-standard.scc`` file: +:: + + define KMACHINE mti-malta32-le + define KMACHINE qemumipsel + define KTYPE standard + define KARCH mips + + include ktypes/standard/standard.scc + branch mti-malta32 + + include mti-malta32.scc + kconf hardware mti-malta32-le.cfg + +.. _bsp-description-file-example-minnow: + +Example +~~~~~~~ + +Many real-world examples are more complex. Like any other ``.scc`` file, +BSP descriptions can aggregate features. Consider the Minnow BSP +definition given the ``linux-yocto-4.4`` branch of the +``yocto-kernel-cache`` (i.e. +``yocto-kernel-cache/bsp/minnow/minnow.scc``): + +.. note:: + + Although the Minnow Board BSP is unused, the Metadata remains and is + being used here just as an example. + +:: + + include cfg/x86.scc + include features/eg20t/eg20t.scc + include cfg/dmaengine.scc + include features/power/intel.scc + include cfg/efi.scc + include features/usb/ehci-hcd.scc + include features/usb/ohci-hcd.scc + include features/usb/usb-gadgets.scc + include features/usb/touchscreen-composite.scc + include cfg/timer/hpet.scc + include features/leds/leds.scc + include features/spi/spidev.scc + include features/i2c/i2cdev.scc + include features/mei/mei-txe.scc + + # Earlyprintk and port debug requires 8250 + kconf hardware cfg/8250.cfg + + kconf hardware minnow.cfg + kconf hardware minnow-dev.cfg + +The ``minnow.scc`` description file includes a hardware configuration +fragment (``minnow.cfg``) specific to the Minnow BSP as well as several +more general configuration fragments and features enabling hardware +found on the machine. This ``minnow.scc`` description file is then +included in each of the three "minnow" description files for the +supported kernel types (i.e. "standard", "preempt-rt", and "tiny"). +Consider the "minnow" description for the "standard" kernel type (i.e. +``minnow-standard.scc``: +:: + + define KMACHINE minnow + define KTYPE standard + define KARCH i386 + + include ktypes/standard + + include minnow.scc + + # Extra minnow configs above the minimal defined in minnow.scc + include cfg/efi-ext.scc + include features/media/media-all.scc + include features/sound/snd_hda_intel.scc + + # The following should really be in standard.scc + # USB live-image support + include cfg/usb-mass-storage.scc + include cfg/boot-live.scc + + # Basic profiling + include features/latencytop/latencytop.scc + include features/profiling/profiling.scc + + # Requested drivers that don't have an existing scc + kconf hardware minnow-drivers-extra.cfg + +The ``include`` command midway through the file includes the ``minnow.scc`` description +that defines all enabled hardware for the BSP that is common to all +kernel types. Using this command significantly reduces duplication. + +Now consider the "minnow" description for the "tiny" kernel type (i.e. +``minnow-tiny.scc``): +:: + + define KMACHINE minnow + define KTYPE tiny + define KARCH i386 + + include ktypes/tiny + + include minnow.scc + +As you might expect, +the "tiny" description includes quite a bit less. In fact, it includes +only the minimal policy defined by the "tiny" kernel type and the +hardware-specific configuration required for booting the machine along +with the most basic functionality of the system as defined in the base +"minnow" description file. + +Notice again the three critical variables: +:term:`KMACHINE`, +:term:`KTYPE`, and +:term:`KARCH`. Of these variables, only +``KTYPE`` has changed to specify the "tiny" kernel type. + +Kernel Metadata Location +======================== + +Kernel Metadata always exists outside of the kernel tree either defined +in a kernel recipe (recipe-space) or outside of the recipe. Where you +choose to define the Metadata depends on what you want to do and how you +intend to work. Regardless of where you define the kernel Metadata, the +syntax used applies equally. + +If you are unfamiliar with the Linux kernel and only wish to apply a +configuration and possibly a couple of patches provided to you by +others, the recipe-space method is recommended. This method is also a +good approach if you are working with Linux kernel sources you do not +control or if you just do not want to maintain a Linux kernel Git +repository on your own. For partial information on how you can define +kernel Metadata in the recipe-space, see the "`Modifying an Existing +Recipe <#modifying-an-existing-recipe>`__" section. + +Conversely, if you are actively developing a kernel and are already +maintaining a Linux kernel Git repository of your own, you might find it +more convenient to work with kernel Metadata kept outside the +recipe-space. Working with Metadata in this area can make iterative +development of the Linux kernel more efficient outside of the BitBake +environment. + +Recipe-Space Metadata +--------------------- + +When stored in recipe-space, the kernel Metadata files reside in a +directory hierarchy below +:term:`FILESEXTRAPATHS`. For +a linux-yocto recipe or for a Linux kernel recipe derived by copying and +modifying +``oe-core/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb`` to +a recipe in your layer, ``FILESEXTRAPATHS`` is typically set to +``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}``. +See the "`Modifying an Existing +Recipe <#modifying-an-existing-recipe>`__" section for more information. + +Here is an example that shows a trivial tree of kernel Metadata stored +in recipe-space within a BSP layer: +:: + + meta-my_bsp_layer/ + `-- recipes-kernel + `-- linux + `-- linux-yocto + |-- bsp-standard.scc + |-- bsp.cfg + `-- standard.cfg + +When the Metadata is stored in recipe-space, you must take steps to +ensure BitBake has the necessary information to decide what files to +fetch and when they need to be fetched again. It is only necessary to +specify the ``.scc`` files on the +:term:`SRC_URI`. BitBake parses them +and fetches any files referenced in the ``.scc`` files by the +``include``, ``patch``, or ``kconf`` commands. Because of this, it is +necessary to bump the recipe :term:`PR` +value when changing the content of files not explicitly listed in the +``SRC_URI``. + +If the BSP description is in recipe space, you cannot simply list the +``*.scc`` in the ``SRC_URI`` statement. You need to use the following +form from your kernel append file: +:: + + SRC_URI_append_myplatform = " \ + file://myplatform;type=kmeta;destsuffix=myplatform \ + " + +Metadata Outside the Recipe-Space +--------------------------------- + +When stored outside of the recipe-space, the kernel Metadata files +reside in a separate repository. The OpenEmbedded build system adds the +Metadata to the build as a "type=kmeta" repository through the +:term:`SRC_URI` variable. As an +example, consider the following ``SRC_URI`` statement from the +``linux-yocto_4.12.bb`` kernel recipe: +:: + + SRC_URI = "git://git.yoctoproject.org/linux-yocto-4.12.git;name=machine;branch=${KBRANCH}; \ + git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-4.12;destsuffix=${KMETA}" + + +``${KMETA}``, in this context, is simply used to name the directory into +which the Git fetcher places the Metadata. This behavior is no different +than any multi-repository ``SRC_URI`` statement used in a recipe (e.g. +see the previous section). + +You can keep kernel Metadata in a "kernel-cache", which is a directory +containing configuration fragments. As with any Metadata kept outside +the recipe-space, you simply need to use the ``SRC_URI`` statement with +the "type=kmeta" attribute. Doing so makes the kernel Metadata available +during the configuration phase. + +If you modify the Metadata, you must not forget to update the ``SRCREV`` +statements in the kernel's recipe. In particular, you need to update the +``SRCREV_meta`` variable to match the commit in the ``KMETA`` branch you +wish to use. Changing the data in these branches and not updating the +``SRCREV`` statements to match will cause the build to fetch an older +commit. + +Organizing Your Source +====================== + +Many recipes based on the ``linux-yocto-custom.bb`` recipe use Linux +kernel sources that have only a single branch - "master". This type of +repository structure is fine for linear development supporting a single +machine and architecture. However, if you work with multiple boards and +architectures, a kernel source repository with multiple branches is more +efficient. For example, suppose you need a series of patches for one +board to boot. Sometimes, these patches are works-in-progress or +fundamentally wrong, yet they are still necessary for specific boards. +In these situations, you most likely do not want to include these +patches in every kernel you build (i.e. have the patches as part of the +lone "master" branch). It is situations like these that give rise to +multiple branches used within a Linux kernel sources Git repository. + +Repository organization strategies exist that maximize source reuse, +remove redundancy, and logically order your changes. This section +presents strategies for the following cases: + +- Encapsulating patches in a feature description and only including the + patches in the BSP descriptions of the applicable boards. + +- Creating a machine branch in your kernel source repository and + applying the patches on that branch only. + +- Creating a feature branch in your kernel source repository and + merging that branch into your BSP when needed. + +The approach you take is entirely up to you and depends on what works +best for your development model. + +Encapsulating Patches +--------------------- + +if you are reusing patches from an external tree and are not working on +the patches, you might find the encapsulated feature to be appropriate. +Given this scenario, you do not need to create any branches in the +source repository. Rather, you just take the static patches you need and +encapsulate them within a feature description. Once you have the feature +description, you simply include that into the BSP description as +described in the "`BSP Descriptions <#bsp-descriptions>`__" section. + +You can find information on how to create patches and BSP descriptions +in the "`Patches <#patches>`__" and "`BSP +Descriptions <#bsp-descriptions>`__" sections. + +Machine Branches +---------------- + +When you have multiple machines and architectures to support, or you are +actively working on board support, it is more efficient to create +branches in the repository based on individual machines. Having machine +branches allows common source to remain in the "master" branch with any +features specific to a machine stored in the appropriate machine branch. +This organization method frees you from continually reintegrating your +patches into a feature. + +Once you have a new branch, you can set up your kernel Metadata to use +the branch a couple different ways. In the recipe, you can specify the +new branch as the ``KBRANCH`` to use for the board as follows: +:: + + KBRANCH = "mynewbranch" + +Another method is to use the ``branch`` command in the BSP +description: + + mybsp.scc: + define KMACHINE mybsp + define KTYPE standard + define KARCH i386 + include standard.scc + + branch mynewbranch + + include mybsp-hw.scc + +If you find yourself with numerous branches, you might consider using a +hierarchical branching system similar to what the Yocto Linux Kernel Git +repositories use: +:: + + common/kernel_type/machine + +If you had two kernel types, "standard" and "small" for instance, three +machines, and common as ``mydir``, the branches in your Git repository +might look like this: +: + + mydir/base + mydir/standard/base + mydir/standard/machine_a + mydir/standard/machine_b + mydir/standard/machine_c + mydir/small/base + mydir/small/machine_a + +This organization can help clarify the branch relationships. In this +case, ``mydir/standard/machine_a`` includes everything in ``mydir/base`` +and ``mydir/standard/base``. The "standard" and "small" branches add +sources specific to those kernel types that for whatever reason are not +appropriate for the other branches. + +.. note:: + + The "base" branches are an artifact of the way Git manages its data + internally on the filesystem: Git will not allow you to use + mydir/standard + and + mydir/standard/machine_a + because it would have to create a file and a directory named + "standard". + +Feature Branches +---------------- + +When you are actively developing new features, it can be more efficient +to work with that feature as a branch, rather than as a set of patches +that have to be regularly updated. The Yocto Project Linux kernel tools +provide for this with the ``git merge`` command. + +To merge a feature branch into a BSP, insert the ``git merge`` command +after any ``branch`` commands: +:: + + mybsp.scc: + define KMACHINE mybsp + define KTYPE standard + define KARCH i386 + include standard.scc + + branch mynewbranch + git merge myfeature + + include mybsp-hw.scc + +.. _scc-reference: + +SCC Description File Reference +============================== + +This section provides a brief reference for the commands you can use +within an SCC description file (``.scc``): + +- ``branch [ref]``: Creates a new branch relative to the current branch + (typically ``${KTYPE}``) using the currently checked-out branch, or + "ref" if specified. + +- ``define``: Defines variables, such as + :term:`KMACHINE`, + :term:`KTYPE`, + :term:`KARCH`, and + :term:`KFEATURE_DESCRIPTION`. + +- ``include SCC_FILE``: Includes an SCC file in the current file. The + file is parsed as if you had inserted it inline. + +- ``kconf [hardware|non-hardware] CFG_FILE``: Queues a configuration + fragment for merging into the final Linux ``.config`` file. + +- ``git merge GIT_BRANCH``: Merges the feature branch into the current + branch. + +- ``patch PATCH_FILE``: Applies the patch to the current Git branch. + + diff --git a/poky/documentation/kernel-dev/kernel-dev-common.rst b/poky/documentation/kernel-dev/kernel-dev-common.rst new file mode 100644 index 000000000..d4b60a9dc --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-common.rst @@ -0,0 +1,2078 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Common Tasks +************ + +This chapter presents several common tasks you perform when you work +with the Yocto Project Linux kernel. These tasks include preparing your +host development system for kernel development, preparing a layer, +modifying an existing recipe, patching the kernel, configuring the +kernel, iterative development, working with your own sources, and +incorporating out-of-tree modules. + +.. note:: + + The examples presented in this chapter work with the Yocto Project + 2.4 Release and forward. + +Preparing the Build Host to Work on the Kernel +============================================== + +Before you can do any kernel development, you need to be sure your build +host is set up to use the Yocto Project. For information on how to get +set up, see the ":doc:`../dev-manual/dev-manual-start`" section in +the Yocto Project Development Tasks Manual. Part of preparing the system +is creating a local Git repository of the +:term:`Source Directory` (``poky``) on your system. Follow the steps in the +":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`" +section in the Yocto Project Development Tasks Manual to set up your +Source Directory. + +.. note:: + + Be sure you check out the appropriate development branch or you + create your local branch by checking out a specific tag to get the + desired version of Yocto Project. See the " + Checking Out by Branch in Poky + " and " + Checking Out by Tag in Poky + " sections in the Yocto Project Development Tasks Manual for more + information. + +Kernel development is best accomplished using +:ref:`devtool ` +and not through traditional kernel workflow methods. The remainder of +this section provides information for both scenarios. + +Getting Ready to Develop Using ``devtool`` +------------------------------------------ + +Follow these steps to prepare to update the kernel image using +``devtool``. Completing this procedure leaves you with a clean kernel +image and ready to make modifications as described in the " +:ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" +section: + +1. *Initialize the BitBake Environment:* Before building an extensible + SDK, you need to initialize the BitBake build environment by sourcing + the build environment script (i.e. :ref:`structure-core-script`): + :: + + $ cd ~/poky + $ source oe-init-build-env + + .. note:: + + The previous commands assume the + Source Repositories + (i.e. + poky + ) have been cloned using Git and the local repository is named + "poky". + +2. *Prepare Your local.conf File:* By default, the + :term:`MACHINE` variable is set to + "qemux86-64", which is fine if you are building for the QEMU emulator + in 64-bit mode. However, if you are not, you need to set the + ``MACHINE`` variable appropriately in your ``conf/local.conf`` file + found in the + :term:`Build Directory` (i.e. + ``~/poky/build`` in this example). + + Also, since you are preparing to work on the kernel image, you need + to set the + :term:`MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS` + variable to include kernel modules. + + In this example we wish to build for qemux86 so we must set the + ``MACHINE`` variable to "qemux86" and also add the "kernel-modules". + As described we do this by appending to ``conf/local.conf``: + :: + + MACHINE = "qemux86" + MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS += "kernel-modules" + +3. *Create a Layer for Patches:* You need to create a layer to hold + patches created for the kernel image. You can use the + ``bitbake-layers create-layer`` command as follows: + :: + + $ cd ~/poky/build + $ bitbake-layers create-layer ../../meta-mylayer + NOTE: Starting bitbake server... + Add your new layer with 'bitbake-layers add-layer ../../meta-mylayer' + $ + + .. note:: + + For background information on working with common and BSP layers, + see the " + Understanding and Creating Layers + " section in the Yocto Project Development Tasks Manual and the " + BSP Layers + " section in the Yocto Project Board Support (BSP) Developer's + Guide, respectively. For information on how to use the + bitbake-layers create-layer + command to quickly set up a layer, see the " + Creating a General Layer Using the + bitbake-layers + Script + " section in the Yocto Project Development Tasks Manual. + +4. *Inform the BitBake Build Environment About Your Layer:* As directed + when you created your layer, you need to add the layer to the + :term:`BBLAYERS` variable in the + ``bblayers.conf`` file as follows: + :: + + $ cd ~/poky/build + $ bitbake-layers add-layer ../../meta-mylayer + NOTE: Starting bitbake server... + $ + +5. *Build the Extensible SDK:* Use BitBake to build the extensible SDK + specifically for use with images to be run using QEMU: + :: + + $ cd ~/poky/build + $ bitbake core-image-minimal -c populate_sdk_ext + + Once + the build finishes, you can find the SDK installer file (i.e. + ``*.sh`` file) in the following directory: + ~/poky/build/tmp/deploy/sdk For this example, the installer file is + named + ``poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-DISTRO.sh`` + +6. *Install the Extensible SDK:* Use the following command to install + the SDK. For this example, install the SDK in the default + ``~/poky_sdk`` directory: + :: + + $ cd ~/poky/build/tmp/deploy/sdk + $ ./poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-3.1.2.sh + Poky (Yocto Project Reference Distro) Extensible SDK installer version 3.1.2 + ============================================================================ + Enter target directory for SDK (default: ~/poky_sdk): + You are about to install the SDK to "/home/scottrif/poky_sdk". Proceed [Y/n]? Y + Extracting SDK......................................done + Setting it up... + Extracting buildtools... + Preparing build system... + Parsing recipes: 100% |#################################################################| Time: 0:00:52 + Initializing tasks: 100% |############## ###############################################| Time: 0:00:04 + Checking sstate mirror object availability: 100% |######################################| Time: 0:00:00 + Parsing recipes: 100% |#################################################################| Time: 0:00:33 + Initializing tasks: 100% |##############################################################| Time: 0:00:00 + done + SDK has been successfully set up and is ready to be used. + Each time you wish to use the SDK in a new shell session, you need to source the environment setup script e.g. + $ . /home/scottrif/poky_sdk/environment-setup-i586-poky-linux + + +7. *Set Up a New Terminal to Work With the Extensible SDK:* You must set + up a new terminal to work with the SDK. You cannot use the same + BitBake shell used to build the installer. + + After opening a new shell, run the SDK environment setup script as + directed by the output from installing the SDK: + :: + + $ source ~/poky_sdk/environment-setup-i586-poky-linux + "SDK environment now set up; additionally you may now run devtool to perform development tasks. + Run devtool --help for further details. + + .. note:: + + If you get a warning about attempting to use the extensible SDK in + an environment set up to run BitBake, you did not use a new shell. + +8. *Build the Clean Image:* The final step in preparing to work on the + kernel is to build an initial image using ``devtool`` in the new + terminal you just set up and initialized for SDK work: + :: + + $ devtool build-image + Parsing recipes: 100% |##########################################| Time: 0:00:05 + Parsing of 830 .bb files complete (0 cached, 830 parsed). 1299 targets, 47 skipped, 0 masked, 0 errors. + WARNING: No packages to add, building image core-image-minimal unmodified + Loading cache: 100% |############################################| Time: 0:00:00 + Loaded 1299 entries from dependency cache. + NOTE: Resolving any missing task queue dependencies + Initializing tasks: 100% |#######################################| Time: 0:00:07 + Checking sstate mirror object availability: 100% |###############| Time: 0:00:00 + NOTE: Executing SetScene Tasks + NOTE: Executing RunQueue Tasks + NOTE: Tasks Summary: Attempted 2866 tasks of which 2604 didn't need to be rerun and all succeeded. + NOTE: Successfully built core-image-minimal. You can find output files in /home/scottrif/poky_sdk/tmp/deploy/images/qemux86 + + If you were + building for actual hardware and not for emulation, you could flash + the image to a USB stick on ``/dev/sdd`` and boot your device. For an + example that uses a Minnowboard, see the + `TipsAndTricks/KernelDevelopmentWithEsdk `__ + Wiki page. + +At this point you have set up to start making modifications to the +kernel by using the extensible SDK. For a continued example, see the +":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" +section. + +Getting Ready for Traditional Kernel Development +------------------------------------------------ + +Getting ready for traditional kernel development using the Yocto Project +involves many of the same steps as described in the previous section. +However, you need to establish a local copy of the kernel source since +you will be editing these files. + +Follow these steps to prepare to update the kernel image using +traditional kernel development flow with the Yocto Project. Completing +this procedure leaves you ready to make modifications to the kernel +source as described in the ":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`" +section: + +1. *Initialize the BitBake Environment:* Before you can do anything + using BitBake, you need to initialize the BitBake build environment + by sourcing the build environment script (i.e. + :ref:`structure-core-script`). + Also, for this example, be sure that the local branch you have + checked out for ``poky`` is the Yocto Project &DISTRO_NAME; branch. If + you need to checkout out the &DISTRO_NAME; branch, see the + ":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" + section in the Yocto Project Development Tasks Manual. + :: + + $ cd ~/poky + $ git branch + master + * &DISTRO_NAME; + $ source oe-init-build-env + + .. note:: + + The previous commands assume the + Source Repositories + (i.e. + poky + ) have been cloned using Git and the local repository is named + "poky". + +2. *Prepare Your local.conf File:* By default, the + :term:`MACHINE` variable is set to + "qemux86-64", which is fine if you are building for the QEMU emulator + in 64-bit mode. However, if you are not, you need to set the + ``MACHINE`` variable appropriately in your ``conf/local.conf`` file + found in the + :term:`Build Directory` (i.e. + ``~/poky/build`` in this example). + + Also, since you are preparing to work on the kernel image, you need + to set the + :term:`MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS` + variable to include kernel modules. + + In this example we wish to build for qemux86 so we must set the + ``MACHINE`` variable to "qemux86" and also add the "kernel-modules". + As described we do this by appending to ``conf/local.conf``: + :: + + MACHINE = "qemux86" + MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS += "kernel-modules" + +3. *Create a Layer for Patches:* You need to create a layer to hold + patches created for the kernel image. You can use the + ``bitbake-layers create-layer`` command as follows: + :: + + $ cd ~/poky/build + $ bitbake-layers create-layer ../../meta-mylayer + NOTE: Starting bitbake server... + Add your new layer with 'bitbake-layers add-layer ../../meta-mylayer' + + .. note:: + + For background information on working with common and BSP layers, + see the " + Understanding and Creating Layers + " section in the Yocto Project Development Tasks Manual and the " + BSP Layers + " section in the Yocto Project Board Support (BSP) Developer's + Guide, respectively. For information on how to use the + bitbake-layers create-layer + command to quickly set up a layer, see the " + Creating a General Layer Using the + bitbake-layers + Script + " section in the Yocto Project Development Tasks Manual. + +4. *Inform the BitBake Build Environment About Your Layer:* As directed + when you created your layer, you need to add the layer to the + :term:`BBLAYERS` variable in the + ``bblayers.conf`` file as follows: + :: + + $ cd ~/poky/build + $ bitbake-layers add-layer ../../meta-mylayer + NOTE: Starting bitbake server ... + $ + +5. *Create a Local Copy of the Kernel Git Repository:* You can find Git + repositories of supported Yocto Project kernels organized under + "Yocto Linux Kernel" in the Yocto Project Source Repositories at + :yocto_git:`/`. + + For simplicity, it is recommended that you create your copy of the + kernel Git repository outside of the + :term:`Source Directory`, which is + usually named ``poky``. Also, be sure you are in the + ``standard/base`` branch. + + The following commands show how to create a local copy of the + ``linux-yocto-4.12`` kernel and be in the ``standard/base`` branch. + + .. note:: + + The + linux-yocto-4.12 + kernel can be used with the Yocto Project 2.4 release and forward. + You cannot use the + linux-yocto-4.12 + kernel with releases prior to Yocto Project 2.4: + + :: + + $ cd ~ + $ git clone git://git.yoctoproject.org/linux-yocto-4.12 --branch standard/base + Cloning into 'linux-yocto-4.12'... + remote: Counting objects: 6097195, done. + remote: Compressing objects: 100% (901026/901026), done. + remote: Total 6097195 (delta 5152604), reused 6096847 (delta 5152256) + Receiving objects: 100% (6097195/6097195), 1.24 GiB | 7.81 MiB/s, done. + Resolving deltas: 100% (5152604/5152604), done. Checking connectivity... done. + Checking out files: 100% (59846/59846), done. + +6. *Create a Local Copy of the Kernel Cache Git Repository:* For + simplicity, it is recommended that you create your copy of the kernel + cache Git repository outside of the + :term:`Source Directory`, which is + usually named ``poky``. Also, for this example, be sure you are in + the ``yocto-4.12`` branch. + + The following commands show how to create a local copy of the + ``yocto-kernel-cache`` and be in the ``yocto-4.12`` branch: + :: + + $ cd ~ + $ git clone git://git.yoctoproject.org/yocto-kernel-cache --branch yocto-4.12 + Cloning into 'yocto-kernel-cache'... + remote: Counting objects: 22639, done. + remote: Compressing objects: 100% (9761/9761), done. + remote: Total 22639 (delta 12400), reused 22586 (delta 12347) + Receiving objects: 100% (22639/22639), 22.34 MiB | 6.27 MiB/s, done. + Resolving deltas: 100% (12400/12400), done. + Checking connectivity... done. + +At this point, you are ready to start making modifications to the kernel +using traditional kernel development steps. For a continued example, see +the "`Using Traditional Kernel Development to Patch the +Kernel <#using-traditional-kernel-development-to-patch-the-kernel>`__" +section. + +Creating and Preparing a Layer +============================== + +If you are going to be modifying kernel recipes, it is recommended that +you create and prepare your own layer in which to do your work. Your +layer contains its own :term:`BitBake` +append files (``.bbappend``) and provides a convenient mechanism to +create your own recipe files (``.bb``) as well as store and use kernel +patch files. For background information on working with layers, see the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section in the Yocto Project Development Tasks Manual. + +.. note:: + + The Yocto Project comes with many tools that simplify tasks you need + to perform. One such tool is the + bitbake-layers create-layer + command, which simplifies creating a new layer. See the " + Creating a General Layer Using the + bitbake-layers + Script + " section in the Yocto Project Development Tasks Manual for + information on how to use this script to quick set up a new layer. + +To better understand the layer you create for kernel development, the +following section describes how to create a layer without the aid of +tools. These steps assume creation of a layer named ``mylayer`` in your +home directory: + +1. *Create Structure*: Create the layer's structure: + :: + + $ cd $HOME + $ mkdir meta-mylayer + $ mkdir meta-mylayer/conf + $ mkdir meta-mylayer/recipes-kernel + $ mkdir meta-mylayer/recipes-kernel/linux + $ mkdir meta-mylayer/recipes-kernel/linux/linux-yocto + + The ``conf`` directory holds your configuration files, while the + ``recipes-kernel`` directory holds your append file and eventual + patch files. + +2. *Create the Layer Configuration File*: Move to the + ``meta-mylayer/conf`` directory and create the ``layer.conf`` file as + follows: + :: + + # We have a conf and classes directory, add to BBPATH + BBPATH .= ":${LAYERDIR}" + + # We have recipes-* directories, add to BBFILES + BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \ + ${LAYERDIR}/recipes-*/*/*.bbappend" + + BBFILE_COLLECTIONS += "mylayer" + BBFILE_PATTERN_mylayer = "^${LAYERDIR}/" + BBFILE_PRIORITY_mylayer = "5" + + Notice ``mylayer`` as part of the last three statements. + +3. *Create the Kernel Recipe Append File*: Move to the + ``meta-mylayer/recipes-kernel/linux`` directory and create the + kernel's append file. This example uses the ``linux-yocto-4.12`` + kernel. Thus, the name of the append file is + ``linux-yocto_4.12.bbappend``: + :: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + + SRC_URI_append = " file://patch-file-one" + SRC_URI_append = " file://patch-file-two" + SRC_URI_append = " file://patch-file-three" + + The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements + enable the OpenEmbedded build system to find patch files. For more + information on using append files, see the + ":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`" + section in the Yocto Project Development Tasks Manual. + +Modifying an Existing Recipe +============================ + +In many cases, you can customize an existing linux-yocto recipe to meet +the needs of your project. Each release of the Yocto Project provides a +few Linux kernel recipes from which you can choose. These are located in +the :term:`Source Directory` in +``meta/recipes-kernel/linux``. + +Modifying an existing recipe can consist of the following: + +- Creating the append file + +- Applying patches + +- Changing the configuration + +Before modifying an existing recipe, be sure that you have created a +minimal, custom layer from which you can work. See the "`Creating and +Preparing a Layer <#creating-and-preparing-a-layer>`__" section for +information. + +Creating the Append File +------------------------ + +You create this file in your custom layer. You also name it accordingly +based on the linux-yocto recipe you are using. For example, if you are +modifying the ``meta/recipes-kernel/linux/linux-yocto_4.12.bb`` recipe, +the append file will typically be located as follows within your custom +layer: +:: + + your-layer/recipes-kernel/linux/linux-yocto_4.12.bbappend + +The append file should initially extend the +:term:`FILESPATH` search path by +prepending the directory that contains your files to the +:term:`FILESEXTRAPATHS` +variable as follows: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + +The path ``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}`` +expands to "linux-yocto" in the current directory for this example. If +you add any new files that modify the kernel recipe and you have +extended ``FILESPATH`` as described above, you must place the files in +your layer in the following area: +:: + + your-layer/recipes-kernel/linux/linux-yocto/ + +.. note:: + + If you are working on a new machine Board Support Package (BSP), be + sure to refer to the + Yocto Project Board Support Package (BSP) Developer's Guide + . + +As an example, consider the following append file used by the BSPs in +``meta-yocto-bsp``: +:: + + meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.12.bbappend + +The following listing shows the file. Be aware that the actual commit ID +strings in this example listing might be different than the actual +strings in the file from the ``meta-yocto-bsp`` layer upstream. +:: + + KBRANCH_genericx86 = "standard/base" + KBRANCH_genericx86-64 = "standard/base" + + KMACHINE_genericx86 ?= "common-pc" + KMACHINE_genericx86-64 ?= "common-pc-64" + KBRANCH_edgerouter = "standard/edgerouter" + KBRANCH_beaglebone = "standard/beaglebone" + + SRCREV_machine_genericx86 ?= "d09f2ce584d60ecb7890550c22a80c48b83c2e19" + SRCREV_machine_genericx86-64 ?= "d09f2ce584d60ecb7890550c22a80c48b83c2e19" + SRCREV_machine_edgerouter ?= "b5c8cfda2dfe296410d51e131289fb09c69e1e7d" + SRCREV_machine_beaglebone ?= "b5c8cfda2dfe296410d51e131289fb09c69e1e7d" + + + COMPATIBLE_MACHINE_genericx86 = "genericx86" + COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" + COMPATIBLE_MACHINE_edgerouter = "edgerouter" + COMPATIBLE_MACHINE_beaglebone = "beaglebone" + + LINUX_VERSION_genericx86 = "4.12.7" + LINUX_VERSION_genericx86-64 = "4.12.7" + LINUX_VERSION_edgerouter = "4.12.10" + LINUX_VERSION_beaglebone = "4.12.10" + +This append file +contains statements used to support several BSPs that ship with the +Yocto Project. The file defines machines using the +:term:`COMPATIBLE_MACHINE` +variable and uses the +:term:`KMACHINE` variable to ensure +the machine name used by the OpenEmbedded build system maps to the +machine name used by the Linux Yocto kernel. The file also uses the +optional :term:`KBRANCH` variable to +ensure the build process uses the appropriate kernel branch. + +Although this particular example does not use it, the +:term:`KERNEL_FEATURES` +variable could be used to enable features specific to the kernel. The +append file points to specific commits in the +:term:`Source Directory` Git repository and +the ``meta`` Git repository branches to identify the exact kernel needed +to build the BSP. + +One thing missing in this particular BSP, which you will typically need +when developing a BSP, is the kernel configuration file (``.config``) +for your BSP. When developing a BSP, you probably have a kernel +configuration file or a set of kernel configuration files that, when +taken together, define the kernel configuration for your BSP. You can +accomplish this definition by putting the configurations in a file or a +set of files inside a directory located at the same level as your +kernel's append file and having the same name as the kernel's main +recipe file. With all these conditions met, simply reference those files +in the :term:`SRC_URI` statement in +the append file. + +For example, suppose you had some configuration options in a file called +``network_configs.cfg``. You can place that file inside a directory +named ``linux-yocto`` and then add a ``SRC_URI`` statement such as the +following to the append file. When the OpenEmbedded build system builds +the kernel, the configuration options are picked up and applied. +:: + + SRC_URI += "file://network_configs.cfg" + +To group related configurations into multiple files, you perform a +similar procedure. Here is an example that groups separate +configurations specifically for Ethernet and graphics into their own +files and adds the configurations by using a ``SRC_URI`` statement like +the following in your append file: +:: + + SRC_URI += "file://myconfig.cfg \ + file://eth.cfg \ + file://gfx.cfg" + +Another variable you can use in your kernel recipe append file is the +:term:`FILESEXTRAPATHS` +variable. When you use this statement, you are extending the locations +used by the OpenEmbedded system to look for files and patches as the +recipe is processed. + +.. note:: + + Other methods exist to accomplish grouping and defining configuration + options. For example, if you are working with a local clone of the + kernel repository, you could checkout the kernel's ``meta`` branch, + make your changes, and then push the changes to the local bare clone + of the kernel. The result is that you directly add configuration + options to the ``meta`` branch for your BSP. The configuration + options will likely end up in that location anyway if the BSP gets + added to the Yocto Project. + + In general, however, the Yocto Project maintainers take care of + moving the ``SRC_URI``-specified configuration options to the + kernel's ``meta`` branch. Not only is it easier for BSP developers to + not have to worry about putting those configurations in the branch, + but having the maintainers do it allows them to apply 'global' + knowledge about the kinds of common configuration options multiple + BSPs in the tree are typically using. This allows for promotion of + common configurations into common features. + +Applying Patches +---------------- + +If you have a single patch or a small series of patches that you want to +apply to the Linux kernel source, you can do so just as you would with +any other recipe. You first copy the patches to the path added to +:term:`FILESEXTRAPATHS` in +your ``.bbappend`` file as described in the previous section, and then +reference them in :term:`SRC_URI` +statements. + +For example, you can apply a three-patch series by adding the following +lines to your linux-yocto ``.bbappend`` file in your layer: +:: + + SRC_URI += "file://0001-first-change.patch" + SRC_URI += "file://0002-second-change.patch" + SRC_URI += "file://0003-third-change.patch" + +The next time you run BitBake to build +the Linux kernel, BitBake detects the change in the recipe and fetches +and applies the patches before building the kernel. + +For a detailed example showing how to patch the kernel using +``devtool``, see the +":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" +and +":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`" +sections. + +Changing the Configuration +-------------------------- + +You can make wholesale or incremental changes to the final ``.config`` +file used for the eventual Linux kernel configuration by including a +``defconfig`` file and by specifying configuration fragments in the +:term:`SRC_URI` to be applied to that +file. + +If you have a complete, working Linux kernel ``.config`` file you want +to use for the configuration, as before, copy that file to the +appropriate ``${PN}`` directory in your layer's ``recipes-kernel/linux`` +directory, and rename the copied file to "defconfig". Then, add the +following lines to the linux-yocto ``.bbappend`` file in your layer: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + SRC_URI += "file://defconfig" + +The ``SRC_URI`` tells the build system how to search +for the file, while the +:term:`FILESEXTRAPATHS` +extends the :term:`FILESPATH` +variable (search directories) to include the ``${PN}`` directory you +created to hold the configuration changes. + +.. note:: + + The build system applies the configurations from the + defconfig + file before applying any subsequent configuration fragments. The + final kernel configuration is a combination of the configurations in + the + defconfig + file and any configuration fragments you provide. You need to realize + that if you have any configuration fragments, the build system + applies these on top of and after applying the existing + defconfig + file configurations. + +Generally speaking, the preferred approach is to determine the +incremental change you want to make and add that as a configuration +fragment. For example, if you want to add support for a basic serial +console, create a file named ``8250.cfg`` in the ``${PN}`` directory +with the following content (without indentation): +:: + + CONFIG_SERIAL_8250=y + CONFIG_SERIAL_8250_CONSOLE=y + CONFIG_SERIAL_8250_PCI=y + CONFIG_SERIAL_8250_NR_UARTS=4 + CONFIG_SERIAL_8250_RUNTIME_UARTS=4 + CONFIG_SERIAL_CORE=y + CONFIG_SERIAL_CORE_CONSOLE=y + +Next, include this +configuration fragment and extend the ``FILESPATH`` variable in your +``.bbappend`` file: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + SRC_URI += "file://8250.cfg" + +The next time you run BitBake to build the +Linux kernel, BitBake detects the change in the recipe and fetches and +applies the new configuration before building the kernel. + +For a detailed example showing how to configure the kernel, see the +"`Configuring the Kernel <#configuring-the-kernel>`__" section. + +Using an "In-Tree"  ``defconfig`` File +-------------------------------------- + +It might be desirable to have kernel configuration fragment support +through a ``defconfig`` file that is pulled from the kernel source tree +for the configured machine. By default, the OpenEmbedded build system +looks for ``defconfig`` files in the layer used for Metadata, which is +"out-of-tree", and then configures them using the following: +:: + + SRC_URI += "file://defconfig" + +If you do not want to maintain copies of +``defconfig`` files in your layer but would rather allow users to use +the default configuration from the kernel tree and still be able to add +configuration fragments to the +:term:`SRC_URI` through, for example, +append files, you can direct the OpenEmbedded build system to use a +``defconfig`` file that is "in-tree". + +To specify an "in-tree" ``defconfig`` file, use the following statement +form: +:: + + KBUILD_DEFCONFIG_KMACHINE ?= defconfig_file + +Here is an example +that assigns the ``KBUILD_DEFCONFIG`` variable based on "raspberrypi2" +and provides the path to the "in-tree" ``defconfig`` file to be used for +a Raspberry Pi 2, which is based on the Broadcom 2708/2709 chipset: +:: + + KBUILD_DEFCONFIG_raspberrypi2 ?= "bcm2709_defconfig" + +Aside from modifying your kernel recipe and providing your own +``defconfig`` file, you need to be sure no files or statements set +``SRC_URI`` to use a ``defconfig`` other than your "in-tree" file (e.g. +a kernel's ``linux-``\ machine\ ``.inc`` file). In other words, if the +build system detects a statement that identifies an "out-of-tree" +``defconfig`` file, that statement will override your +``KBUILD_DEFCONFIG`` variable. + +See the +:term:`KBUILD_DEFCONFIG` +variable description for more information. + +Using ``devtool`` to Patch the Kernel +===================================== + +The steps in this procedure show you how you can patch the kernel using +the extensible SDK and ``devtool``. + +.. note:: + + Before attempting this procedure, be sure you have performed the + steps to get ready for updating the kernel as described in the " + Getting Ready to Develop Using + devtool + " section. + +Patching the kernel involves changing or adding configurations to an +existing kernel, changing or adding recipes to the kernel that are +needed to support specific hardware features, or even altering the +source code itself. + +This example creates a simple patch by adding some QEMU emulator console +output at boot time through ``printk`` statements in the kernel's +``calibrate.c`` source code file. Applying the patch and booting the +modified image causes the added messages to appear on the emulator's +console. The example is a continuation of the setup procedure found in +the ":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``" Section. + +1. *Check Out the Kernel Source Files:* First you must use ``devtool`` + to checkout the kernel source code in its workspace. Be sure you are + in the terminal set up to do work with the extensible SDK. + + .. note:: + + See this + step + in the " + Getting Ready to Develop Using + devtool + " section for more information. + + Use the following ``devtool`` command to check out the code: + :: + + $ devtool modify linux-yocto + + .. note:: + + During the checkout operation, a bug exists that could cause + errors such as the following to appear: + :: + + ERROR: Taskhash mismatch 2c793438c2d9f8c3681fd5f7bc819efa versus + be3a89ce7c47178880ba7bf6293d7404 for + /path/to/esdk/layers/poky/meta/recipes-kernel/linux/linux-yocto_4.10.bb.do_unpack + + + You can safely ignore these messages. The source code is correctly + checked out. + +2. *Edit the Source Files* Follow these steps to make some simple + changes to the source files: + + 1. *Change the working directory*: In the previous step, the output + noted where you can find the source files (e.g. + ``~/poky_sdk/workspace/sources/linux-yocto``). Change to where the + kernel source code is before making your edits to the + ``calibrate.c`` file: + :: + + $ cd ~/poky_sdk/workspace/sources/linux-yocto + + 2. *Edit the source file*: Edit the ``init/calibrate.c`` file to have + the following changes: + :: + + void calibrate_delay(void) + { + unsigned long lpj; + static bool printed; + int this_cpu = smp_processor_id(); + + printk("*************************************\n"); + printk("* *\n"); + printk("* HELLO YOCTO KERNEL *\n"); + printk("* *\n"); + printk("*************************************\n"); + + if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { + . + . + . + +3. *Build the Updated Kernel Source:* To build the updated kernel + source, use ``devtool``: + :: + + $ devtool build linux-yocto + +4. *Create the Image With the New Kernel:* Use the + ``devtool build-image`` command to create a new image that has the + new kernel. + + .. note:: + + If the image you originally created resulted in a Wic file, you + can use an alternate method to create the new image with the + updated kernel. For an example, see the steps in the + TipsAndTricks/KernelDevelopmentWithEsdk + Wiki Page. + + :: + + $ cd ~ + $ devtool build-image core-image-minimal + +5. *Test the New Image:* For this example, you can run the new image + using QEMU to verify your changes: + + 1. *Boot the image*: Boot the modified image in the QEMU emulator + using this command: + :: + + $ runqemu qemux86 + + 2. *Verify the changes*: Log into the machine using ``root`` with no + password and then use the following shell command to scroll + through the console's boot output. + :: + + # dmesg | less + + You should see + the results of your ``printk`` statements as part of the output + when you scroll down the console window. + +6. *Stage and commit your changes*: Within your eSDK terminal, change + your working directory to where you modified the ``calibrate.c`` file + and use these Git commands to stage and commit your changes: + :: + + $ cd ~/poky_sdk/workspace/sources/linux-yocto + $ git status + $ git add init/calibrate.c + $ git commit -m "calibrate: Add printk example" + +7. *Export the Patches and Create an Append File:* To export your + commits as patches and create a ``.bbappend`` file, use the following + command in the terminal used to work with the extensible SDK. This + example uses the previously established layer named ``meta-mylayer``. + + .. note:: + + See Step 3 of the " + Getting Ready to Develop Using devtool + " section for information on setting up this layer. + + $ devtool finish linux-yocto ~/meta-mylayer + + Once the command + finishes, the patches and the ``.bbappend`` file are located in the + ``~/meta-mylayer/recipes-kernel/linux`` directory. + +8. *Build the Image With Your Modified Kernel:* You can now build an + image that includes your kernel patches. Execute the following + command from your + :term:`Build Directory` in the terminal + set up to run BitBake: + :: + + $ cd ~/poky/build + $ bitbake core-image-minimal + +Using Traditional Kernel Development to Patch the Kernel +======================================================== + +The steps in this procedure show you how you can patch the kernel using +traditional kernel development (i.e. not using ``devtool`` and the +extensible SDK as described in the +":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" +section). + +.. note:: + + Before attempting this procedure, be sure you have performed the + steps to get ready for updating the kernel as described in the " + Getting Ready for Traditional Kernel Development + " section. + +Patching the kernel involves changing or adding configurations to an +existing kernel, changing or adding recipes to the kernel that are +needed to support specific hardware features, or even altering the +source code itself. + +The example in this section creates a simple patch by adding some QEMU +emulator console output at boot time through ``printk`` statements in +the kernel's ``calibrate.c`` source code file. Applying the patch and +booting the modified image causes the added messages to appear on the +emulator's console. The example is a continuation of the setup procedure +found in the "`Getting Ready for Traditional Kernel +Development <#getting-ready-for-traditional-kernel-development>`__" +Section. + +1. *Edit the Source Files* Prior to this step, you should have used Git + to create a local copy of the repository for your kernel. Assuming + you created the repository as directed in the "`Getting Ready for + Traditional Kernel + Development <#getting-ready-for-traditional-kernel-development>`__" + section, use the following commands to edit the ``calibrate.c`` file: + + 1. *Change the working directory*: You need to locate the source + files in the local copy of the kernel Git repository: Change to + where the kernel source code is before making your edits to the + ``calibrate.c`` file: + :: + + $ cd ~/linux-yocto-4.12/init + + 2. *Edit the source file*: Edit the ``calibrate.c`` file to have the + following changes: + :: + + void calibrate_delay(void) + { + unsigned long lpj; + static bool printed; + int this_cpu = smp_processor_id(); + + printk("*************************************\n"); + printk("* *\n"); + printk("* HELLO YOCTO KERNEL *\n"); + printk("* *\n"); + printk("*************************************\n"); + + if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { + . + . + . + +2. *Stage and Commit Your Changes:* Use standard Git commands to stage + and commit the changes you just made: + :: + + $ git add calibrate.c + $ git commit -m "calibrate.c - Added some printk statements" + + If you do not + stage and commit your changes, the OpenEmbedded Build System will not + pick up the changes. + +3. *Update Your local.conf File to Point to Your Source Files:* In + addition to your ``local.conf`` file specifying to use + "kernel-modules" and the "qemux86" machine, it must also point to the + updated kernel source files. Add + :term:`SRC_URI` and + :term:`SRCREV` statements similar + to the following to your ``local.conf``: + :: + + $ cd ~/poky/build/conf + + Add the following to the ``local.conf``: + :: + + SRC_URI_pn-linux-yocto = "git:///path-to/linux-yocto-4.12;protocol=file;name=machine;branch=standard/base; \ + git:///path-to/yocto-kernel-cache;protocol=file;type=kmeta;name=meta;branch=yocto-4.12;destsuffix=${KMETA}" + SRCREV_meta_qemux86 = "${AUTOREV}" + SRCREV_machine_qemux86 = "${AUTOREV}" + + .. note:: + + Be sure to replace + path-to + with the pathname to your local Git repositories. Also, you must + be sure to specify the correct branch and machine types. For this + example, the branch is + standard/base + and the machine is "qemux86". + +4. *Build the Image:* With the source modified, your changes staged and + committed, and the ``local.conf`` file pointing to the kernel files, + you can now use BitBake to build the image: + :: + + $ cd ~/poky/build + $ bitbake core-image-minimal + +5. *Boot the image*: Boot the modified image in the QEMU emulator using + this command. When prompted to login to the QEMU console, use "root" + with no password: + :: + + $ cd ~/poky/build + $ runqemu qemux86 + +6. *Look for Your Changes:* As QEMU booted, you might have seen your + changes rapidly scroll by. If not, use these commands to see your + changes: + :: + + # dmesg | less + + You should see the results of your + ``printk`` statements as part of the output when you scroll down the + console window. + +7. *Generate the Patch File:* Once you are sure that your patch works + correctly, you can generate a ``*.patch`` file in the kernel source + repository: + :: + + $ cd ~/linux-yocto-4.12/init + $ git format-patch -1 + 0001-calibrate.c-Added-some-printk-statements.patch + +8. *Move the Patch File to Your Layer:* In order for subsequent builds + to pick up patches, you need to move the patch file you created in + the previous step to your layer ``meta-mylayer``. For this example, + the layer created earlier is located in your home directory as + ``meta-mylayer``. When the layer was created using the + ``yocto-create`` script, no additional hierarchy was created to + support patches. Before moving the patch file, you need to add + additional structure to your layer using the following commands: + :: + + $ cd ~/meta-mylayer + $ mkdir recipes-kernel + $ mkdir recipes-kernel/linux + $ mkdir recipes-kernel/linux/linux-yocto + + Once you have created this + hierarchy in your layer, you can move the patch file using the + following command: + :: + + $ mv ~/linux-yocto-4.12/init/0001-calibrate.c-Added-some-printk-statements.patch ~/meta-mylayer/recipes-kernel/linux/linux-yocto + +9. *Create the Append File:* Finally, you need to create the + ``linux-yocto_4.12.bbappend`` file and insert statements that allow + the OpenEmbedded build system to find the patch. The append file + needs to be in your layer's ``recipes-kernel/linux`` directory and it + must be named ``linux-yocto_4.12.bbappend`` and have the following + contents: + :: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + SRC_URI_append = "file://0001-calibrate.c-Added-some-printk-statements.patch" + + The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements + enable the OpenEmbedded build system to find the patch file. + + For more information on append files and patches, see the "`Creating + the Append File <#creating-the-append-file>`__" and "`Applying + Patches <#applying-patches>`__" sections. You can also see the + ":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`" + section in the Yocto Project Development Tasks Manual. + + .. note:: + + To build + core-image-minimal + again and see the effects of your patch, you can essentially + eliminate the temporary source files saved in + poky/build/tmp/work/... + and residual effects of the build by entering the following + sequence of commands: + :: + + $ cd ~/poky/build + $ bitbake -c cleanall yocto-linux + $ bitbake core-image-minimal -c cleanall + $ bitbake core-image-minimal + $ runqemu qemux86 + + +Configuring the Kernel +====================== + +Configuring the Yocto Project kernel consists of making sure the +``.config`` file has all the right information in it for the image you +are building. You can use the ``menuconfig`` tool and configuration +fragments to make sure your ``.config`` file is just how you need it. +You can also save known configurations in a ``defconfig`` file that the +build system can use for kernel configuration. + +This section describes how to use ``menuconfig``, create and use +configuration fragments, and how to interactively modify your +``.config`` file to create the leanest kernel configuration file +possible. + +For more information on kernel configuration, see the "`Changing the +Configuration <#changing-the-configuration>`__" section. + +Using  ``menuconfig`` +--------------------- + +The easiest way to define kernel configurations is to set them through +the ``menuconfig`` tool. This tool provides an interactive method with +which to set kernel configurations. For general information on +``menuconfig``, see http://en.wikipedia.org/wiki/Menuconfig. + +To use the ``menuconfig`` tool in the Yocto Project development +environment, you must do the following: + +- Because you launch ``menuconfig`` using BitBake, you must be sure to + set up your environment by running the + :ref:`structure-core-script` script found in + the :term:`Build Directory`. + +- You must be sure of the state of your build's configuration in the + :term:`Source Directory`. + +- Your build host must have the following two packages installed: + :: + + libncurses5-dev + libtinfo-dev + +The following commands initialize the BitBake environment, run the +:ref:`ref-tasks-kernel_configme` +task, and launch ``menuconfig``. These commands assume the Source +Directory's top-level folder is ``~/poky``: +:: + + $ cd poky + $ source oe-init-build-env + $ bitbake linux-yocto -c kernel_configme -f + $ bitbake linux-yocto -c menuconfig + +Once ``menuconfig`` comes up, its standard +interface allows you to interactively examine and configure all the +kernel configuration parameters. After making your changes, simply exit +the tool and save your changes to create an updated version of the +``.config`` configuration file. + +.. note:: + + You can use the entire + .config + file as the + defconfig + file. For information on + defconfig + files, see the " + Changing the Configuration + ", " + Using an In-Tree + defconfig + File + , and " + Creating a + defconfig + File + " sections. + +Consider an example that configures the "CONFIG_SMP" setting for the +``linux-yocto-4.12`` kernel. + +.. note:: + + The OpenEmbedded build system recognizes this kernel as + linux-yocto + through Metadata (e.g. + PREFERRED_VERSION + \_linux-yocto ?= "12.4%" + ). + +Once ``menuconfig`` launches, use the interface to navigate through the +selections to find the configuration settings in which you are +interested. For this example, you deselect "CONFIG_SMP" by clearing the +"Symmetric Multi-Processing Support" option. Using the interface, you +can find the option under "Processor Type and Features". To deselect +"CONFIG_SMP", use the arrow keys to highlight "Symmetric +Multi-Processing Support" and enter "N" to clear the asterisk. When you +are finished, exit out and save the change. + +Saving the selections updates the ``.config`` configuration file. This +is the file that the OpenEmbedded build system uses to configure the +kernel during the build. You can find and examine this file in the Build +Directory in ``tmp/work/``. The actual ``.config`` is located in the +area where the specific kernel is built. For example, if you were +building a Linux Yocto kernel based on the ``linux-yocto-4.12`` kernel +and you were building a QEMU image targeted for ``x86`` architecture, +the ``.config`` file would be: +:: + + poky/build/tmp/work/qemux86-poky-linux/linux-yocto/4.12.12+gitAUTOINC+eda4d18... + ...967-r0/linux-qemux86-standard-build/.config + +.. note:: + + The previous example directory is artificially split and many of the + characters in the actual filename are omitted in order to make it + more readable. Also, depending on the kernel you are using, the exact + pathname might differ. + +Within the ``.config`` file, you can see the kernel settings. For +example, the following entry shows that symmetric multi-processor +support is not set: +:: + + # CONFIG_SMP is not set + +A good method to isolate changed configurations is to use a combination +of the ``menuconfig`` tool and simple shell commands. Before changing +configurations with ``menuconfig``, copy the existing ``.config`` and +rename it to something else, use ``menuconfig`` to make as many changes +as you want and save them, then compare the renamed configuration file +against the newly created file. You can use the resulting differences as +your base to create configuration fragments to permanently save in your +kernel layer. + +.. note:: + + Be sure to make a copy of the + .config + file and do not just rename it. The build system needs an existing + .config + file from which to work. + +Creating a  ``defconfig`` File +------------------------------ + +A ``defconfig`` file in the context of the Yocto Project is often a +``.config`` file that is copied from a build or a ``defconfig`` taken +from the kernel tree and moved into recipe space. You can use a +``defconfig`` file to retain a known set of kernel configurations from +which the OpenEmbedded build system can draw to create the final +``.config`` file. + +.. note:: + + Out-of-the-box, the Yocto Project never ships a + defconfig + or + .config + file. The OpenEmbedded build system creates the final + .config + file used to configure the kernel. + +To create a ``defconfig``, start with a complete, working Linux kernel +``.config`` file. Copy that file to the appropriate +``${``\ :term:`PN`\ ``}`` directory in +your layer's ``recipes-kernel/linux`` directory, and rename the copied +file to "defconfig" (e.g. +``~/meta-mylayer/recipes-kernel/linux/linux-yocto/defconfig``). Then, +add the following lines to the linux-yocto ``.bbappend`` file in your +layer: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + SRC_URI += "file://defconfig" + +The :term:`SRC_URI` tells the build system how to search for the file, while the +:term:`FILESEXTRAPATHS` extends the :term:`FILESPATH` +variable (search directories) to include the ``${PN}`` directory you +created to hold the configuration changes. + +.. note:: + + The build system applies the configurations from the + defconfig + file before applying any subsequent configuration fragments. The + final kernel configuration is a combination of the configurations in + the + defconfig + file and any configuration fragments you provide. You need to realize + that if you have any configuration fragments, the build system + applies these on top of and after applying the existing defconfig + file configurations. + +For more information on configuring the kernel, see the "`Changing the +Configuration <#changing-the-configuration>`__" section. + +.. _creating-config-fragments: + +Creating Configuration Fragments +-------------------------------- + +Configuration fragments are simply kernel options that appear in a file +placed where the OpenEmbedded build system can find and apply them. The +build system applies configuration fragments after applying +configurations from a ``defconfig`` file. Thus, the final kernel +configuration is a combination of the configurations in the +``defconfig`` file and then any configuration fragments you provide. The +build system applies fragments on top of and after applying the existing +defconfig file configurations. + +Syntactically, the configuration statement is identical to what would +appear in the ``.config`` file, which is in the :term:`Build Directory`. + +.. note:: + + For more information about where the + .config + file is located, see the example in the + ":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``" + section. + +It is simple to create a configuration fragment. One method is to use +shell commands. For example, issuing the following from the shell +creates a configuration fragment file named ``my_smp.cfg`` that enables +multi-processor support within the kernel: +:: + + $ echo "CONFIG_SMP=y" >> my_smp.cfg + +.. note:: + + All configuration fragment files must use the + .cfg + extension in order for the OpenEmbedded build system to recognize + them as a configuration fragment. + +Another method is to create a configuration fragment using the +differences between two configuration files: one previously created and +saved, and one freshly created using the ``menuconfig`` tool. + +To create a configuration fragment using this method, follow these +steps: + +1. *Complete a Build Through Kernel Configuration:* Complete a build at + least through the kernel configuration task as follows: + :: + + $ bitbake linux-yocto -c kernel_configme -f + + This step ensures that you create a + ``.config`` file from a known state. Because situations exist where + your build state might become unknown, it is best to run this task + prior to starting ``menuconfig``. + +2. *Launch menuconfig:* Run the ``menuconfig`` command: + :: + + $ bitbake linux-yocto -c menuconfig + +3. *Create the Configuration Fragment:* Run the ``diffconfig`` command + to prepare a configuration fragment. The resulting file + ``fragment.cfg`` is placed in the + ``${``\ :term:`WORKDIR`\ ``}`` + directory: + :: + + $ bitbake linux-yocto -c diffconfig + +The ``diffconfig`` command creates a file that is a list of Linux kernel +``CONFIG_`` assignments. See the "`Changing the +Configuration <#changing-the-configuration>`__" section for additional +information on how to use the output as a configuration fragment. + +.. note:: + + You can also use this method to create configuration fragments for a + BSP. See the " + BSP Descriptions + " section for more information. + +Where do you put your configuration fragment files? You can place these +files in an area pointed to by +:term:`SRC_URI` as directed by your +``bblayers.conf`` file, which is located in your layer. The OpenEmbedded +build system picks up the configuration and adds it to the kernel's +configuration. For example, suppose you had a set of configuration +options in a file called ``myconfig.cfg``. If you put that file inside a +directory named ``linux-yocto`` that resides in the same directory as +the kernel's append file within your layer and then add the following +statements to the kernel's append file, those configuration options will +be picked up and applied when the kernel is built: +:: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + SRC_URI += "file://myconfig.cfg" + +As mentioned earlier, you can group related configurations into multiple +files and name them all in the ``SRC_URI`` statement as well. For +example, you could group separate configurations specifically for +Ethernet and graphics into their own files and add those by using a +``SRC_URI`` statement like the following in your append file: +:: + + SRC_URI += "file://myconfig.cfg \ + file://eth.cfg \ + file://gfx.cfg" + +Validating Configuration +------------------------ + +You can use the +:ref:`ref-tasks-kernel_configcheck` +task to provide configuration validation: +:: + + $ bitbake linux-yocto -c kernel_configcheck -f + +Running this task produces warnings for when a +requested configuration does not appear in the final ``.config`` file or +when you override a policy configuration in a hardware configuration +fragment. + +In order to run this task, you must have an existing ``.config`` file. +See the ":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``" section for +information on how to create a configuration file. + +Following is sample output from the ``do_kernel_configcheck`` task: +:: + + Loading cache: 100% |########################################################| Time: 0:00:00 + Loaded 1275 entries from dependency cache. + NOTE: Resolving any missing task queue dependencies + + Build Configuration: + . + . + . + + NOTE: Executing SetScene Tasks + NOTE: Executing RunQueue Tasks + WARNING: linux-yocto-4.12.12+gitAUTOINC+eda4d18ce4_16de014967-r0 do_kernel_configcheck: + [kernel config]: specified values did not make it into the kernel's final configuration: + + ---------- CONFIG_X86_TSC ----------------- + Config: CONFIG_X86_TSC + From: /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/bsp/common-pc/common-pc-cpu.cfg + Requested value: CONFIG_X86_TSC=y + Actual value: + + + ---------- CONFIG_X86_BIGSMP ----------------- + Config: CONFIG_X86_BIGSMP + From: /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/cfg/smp.cfg + /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/defconfig + Requested value: # CONFIG_X86_BIGSMP is not set + Actual value: + + + ---------- CONFIG_NR_CPUS ----------------- + Config: CONFIG_NR_CPUS + From: /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/cfg/smp.cfg + /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/bsp/common-pc/common-pc.cfg + /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/defconfig + Requested value: CONFIG_NR_CPUS=8 + Actual value: CONFIG_NR_CPUS=1 + + + ---------- CONFIG_SCHED_SMT ----------------- + Config: CONFIG_SCHED_SMT + From: /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/cfg/smp.cfg + /home/scottrif/poky/build/tmp/work-shared/qemux86/kernel-source/.kernel-meta/configs/standard/defconfig + Requested value: CONFIG_SCHED_SMT=y + Actual value: + + + + NOTE: Tasks Summary: Attempted 288 tasks of which 285 didn't need to be rerun and all succeeded. + + Summary: There were 3 WARNING messages shown. + +.. note:: + + The previous output example has artificial line breaks to make it + more readable. + +The output describes the various problems that you can encounter along +with where to find the offending configuration items. You can use the +information in the logs to adjust your configuration files and then +repeat the +:ref:`ref-tasks-kernel_configme` +and +:ref:`ref-tasks-kernel_configcheck` +tasks until they produce no warnings. + +For more information on how to use the ``menuconfig`` tool, see the +:ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\`` section. + +Fine-Tuning the Kernel Configuration File +----------------------------------------- + +You can make sure the ``.config`` file is as lean or efficient as +possible by reading the output of the kernel configuration fragment +audit, noting any issues, making changes to correct the issues, and then +repeating. + +As part of the kernel build process, the ``do_kernel_configcheck`` task +runs. This task validates the kernel configuration by checking the final +``.config`` file against the input files. During the check, the task +produces warning messages for the following issues: + +- Requested options that did not make the final ``.config`` file. + +- Configuration items that appear twice in the same configuration + fragment. + +- Configuration items tagged as "required" that were overridden. + +- A board overrides a non-board specific option. + +- Listed options not valid for the kernel being processed. In other + words, the option does not appear anywhere. + +.. note:: + + The + do_kernel_configcheck + task can also optionally report if an option is overridden during + processing. + +For each output warning, a message points to the file that contains a +list of the options and a pointer to the configuration fragment that +defines them. Collectively, the files are the key to streamlining the +configuration. + +To streamline the configuration, do the following: + +1. *Use a Working Configuration:* Start with a full configuration that + you know works. Be sure the configuration builds and boots + successfully. Use this configuration file as your baseline. + +2. *Run Configure and Check Tasks:* Separately run the + ``do_kernel_configme`` and ``do_kernel_configcheck`` tasks: + :: + + $ bitbake linux-yocto -c kernel_configme -f + $ bitbake linux-yocto -c kernel_configcheck -f + +3. *Process the Results:* Take the resulting list of files from the + ``do_kernel_configcheck`` task warnings and do the following: + + - Drop values that are redefined in the fragment but do not change + the final ``.config`` file. + + - Analyze and potentially drop values from the ``.config`` file that + override required configurations. + + - Analyze and potentially remove non-board specific options. + + - Remove repeated and invalid options. + +4. *Re-Run Configure and Check Tasks:* After you have worked through the + output of the kernel configuration audit, you can re-run the + ``do_kernel_configme`` and ``do_kernel_configcheck`` tasks to see the + results of your changes. If you have more issues, you can deal with + them as described in the previous step. + +Iteratively working through steps two through four eventually yields a +minimal, streamlined configuration file. Once you have the best +``.config``, you can build the Linux Yocto kernel. + +Expanding Variables +=================== + +Sometimes it is helpful to determine what a variable expands to during a +build. You can do examine the values of variables by examining the +output of the ``bitbake -e`` command. The output is long and is more +easily managed in a text file, which allows for easy searches: +:: + + $ bitbake -e virtual/kernel > some_text_file + +Within the text file, you can see +exactly how each variable is expanded and used by the OpenEmbedded build +system. + +Working with a "Dirty" Kernel Version String +============================================ + +If you build a kernel image and the version string has a "+" or a +"-dirty" at the end, uncommitted modifications exist in the kernel's +source directory. Follow these steps to clean up the version string: + +1. *Discover the Uncommitted Changes:* Go to the kernel's locally cloned + Git repository (source directory) and use the following Git command + to list the files that have been changed, added, or removed: + :: + + $ git status + +2. *Commit the Changes:* You should commit those changes to the kernel + source tree regardless of whether or not you will save, export, or + use the changes: + :: + + $ git add + $ git commit -s -a -m "getting rid of -dirty" + +3. *Rebuild the Kernel Image:* Once you commit the changes, rebuild the + kernel. + + Depending on your particular kernel development workflow, the + commands you use to rebuild the kernel might differ. For information + on building the kernel image when using ``devtool``, see the + ":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" + section. For + information on building the kernel image when using Bitbake, see the + "`Using Traditional Kernel Development to Patch the + Kernel <#using-traditional-kernel-development-to-patch-the-kernel>`__" + section. + +Working With Your Own Sources +============================= + +If you cannot work with one of the Linux kernel versions supported by +existing linux-yocto recipes, you can still make use of the Yocto +Project Linux kernel tooling by working with your own sources. When you +use your own sources, you will not be able to leverage the existing +kernel :term:`Metadata` and stabilization +work of the linux-yocto sources. However, you will be able to manage +your own Metadata in the same format as the linux-yocto sources. +Maintaining format compatibility facilitates converging with linux-yocto +on a future, mutually-supported kernel version. + +To help you use your own sources, the Yocto Project provides a +linux-yocto custom recipe (``linux-yocto-custom.bb``) that uses +``kernel.org`` sources and the Yocto Project Linux kernel tools for +managing kernel Metadata. You can find this recipe in the ``poky`` Git +repository of the Yocto Project :yocto_git:`Source Repository <>` +at: +:: + + poky/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb + +Here are some basic steps you can use to work with your own sources: + +1. *Create a Copy of the Kernel Recipe:* Copy the + ``linux-yocto-custom.bb`` recipe to your layer and give it a + meaningful name. The name should include the version of the Yocto + Linux kernel you are using (e.g. ``linux-yocto-myproject_4.12.bb``, + where "4.12" is the base version of the Linux kernel with which you + would be working). + +2. *Create a Directory for Your Patches:* In the same directory inside + your layer, create a matching directory to store your patches and + configuration files (e.g. ``linux-yocto-myproject``). + +3. *Ensure You Have Configurations:* Make sure you have either a + ``defconfig`` file or configuration fragment files in your layer. + When you use the ``linux-yocto-custom.bb`` recipe, you must specify a + configuration. If you do not have a ``defconfig`` file, you can run + the following: + :: + + $ make defconfig + + After running the command, copy the + resulting ``.config`` file to the ``files`` directory in your layer + as "defconfig" and then add it to the + :term:`SRC_URI` variable in the + recipe. + + Running the ``make defconfig`` command results in the default + configuration for your architecture as defined by your kernel. + However, no guarantee exists that this configuration is valid for + your use case, or that your board will even boot. This is + particularly true for non-x86 architectures. + + To use non-x86 ``defconfig`` files, you need to be more specific and + find one that matches your board (i.e. for arm, you look in + ``arch/arm/configs`` and use the one that is the best starting point + for your board). + +4. *Edit the Recipe:* Edit the following variables in your recipe as + appropriate for your project: + + - :term:`SRC_URI`: The + ``SRC_URI`` should specify a Git repository that uses one of the + supported Git fetcher protocols (i.e. ``file``, ``git``, ``http``, + and so forth). The ``SRC_URI`` variable should also specify either + a ``defconfig`` file or some configuration fragment files. The + skeleton recipe provides an example ``SRC_URI`` as a syntax + reference. + + - :term:`LINUX_VERSION`: + The Linux kernel version you are using (e.g. "4.12"). + + - :term:`LINUX_VERSION_EXTENSION`: + The Linux kernel ``CONFIG_LOCALVERSION`` that is compiled into the + resulting kernel and visible through the ``uname`` command. + + - :term:`SRCREV`: The commit ID + from which you want to build. + + - :term:`PR`: Treat this variable the + same as you would in any other recipe. Increment the variable to + indicate to the OpenEmbedded build system that the recipe has + changed. + + - :term:`PV`: The default ``PV`` + assignment is typically adequate. It combines the + ``LINUX_VERSION`` with the Source Control Manager (SCM) revision + as derived from the :term:`SRCPV` + variable. The combined results are a string with the following + form: + 3.19.11+git1+68a635bf8dfb64b02263c1ac80c948647cc76d5f_1+218bd8d2022b9852c60d32f0d770931e3cf343e2 + While lengthy, the extra verbosity in ``PV`` helps ensure you are + using the exact sources from which you intend to build. + + - :term:`COMPATIBLE_MACHINE`: + A list of the machines supported by your new recipe. This variable + in the example recipe is set by default to a regular expression + that matches only the empty string, "(^$)". This default setting + triggers an explicit build failure. You must change it to match a + list of the machines that your new recipe supports. For example, + to support the ``qemux86`` and ``qemux86-64`` machines, use the + following form: COMPATIBLE_MACHINE = "qemux86|qemux86-64" + +5. *Customize Your Recipe as Needed:* Provide further customizations to + your recipe as needed just as you would customize an existing + linux-yocto recipe. See the "`Modifying an Existing + Recipe <#modifying-an-existing-recipe>`__" section for information. + +Working with Out-of-Tree Modules +================================ + +This section describes steps to build out-of-tree modules on your target +and describes how to incorporate out-of-tree modules in the build. + +Building Out-of-Tree Modules on the Target +------------------------------------------ + +While the traditional Yocto Project development model would be to +include kernel modules as part of the normal build process, you might +find it useful to build modules on the target. This could be the case if +your target system is capable and powerful enough to handle the +necessary compilation. Before deciding to build on your target, however, +you should consider the benefits of using a proper cross-development +environment from your build host. + +If you want to be able to build out-of-tree modules on the target, there +are some steps you need to take on the target that is running your SDK +image. Briefly, the ``kernel-dev`` package is installed by default on +all ``*.sdk`` images and the ``kernel-devsrc`` package is installed on +many of the ``*.sdk`` images. However, you need to create some scripts +prior to attempting to build the out-of-tree modules on the target that +is running that image. + +Prior to attempting to build the out-of-tree modules, you need to be on +the target as root and you need to change to the ``/usr/src/kernel`` +directory. Next, ``make`` the scripts: +:: + + # cd /usr/src/kernel + # make scripts + +Because all SDK image recipes include ``dev-pkgs``, the +``kernel-dev`` packages will be installed as part of the SDK image and +the ``kernel-devsrc`` packages will be installed as part of applicable +SDK images. The SDK uses the scripts when building out-of-tree modules. +Once you have switched to that directory and created the scripts, you +should be able to build your out-of-tree modules on the target. + +Incorporating Out-of-Tree Modules +--------------------------------- + +While it is always preferable to work with sources integrated into the +Linux kernel sources, if you need an external kernel module, the +``hello-mod.bb`` recipe is available as a template from which you can +create your own out-of-tree Linux kernel module recipe. + +This template recipe is located in the ``poky`` Git repository of the +Yocto Project :yocto_git:`Source Repository <>` at: +:: + + poky/meta-skeleton/recipes-kernel/hello-mod/hello-mod_0.1.bb + +To get started, copy this recipe to your layer and give it a meaningful +name (e.g. ``mymodule_1.0.bb``). In the same directory, create a new +directory named ``files`` where you can store any source files, patches, +or other files necessary for building the module that do not come with +the sources. Finally, update the recipe as needed for the module. +Typically, you will need to set the following variables: + +- :term:`DESCRIPTION` + +- :term:`LICENSE* ` + +- :term:`SRC_URI` + +- :term:`PV` + +Depending on the build system used by the module sources, you might need +to make some adjustments. For example, a typical module ``Makefile`` +looks much like the one provided with the ``hello-mod`` template: +:: + + obj-m := hello.o + + SRC := $(shell pwd) + + all: + $(MAKE) -C $(KERNEL_SRC) M=$(SRC) + + modules_install: + $(MAKE) -C $(KERNEL_SRC) M=$(SRC) modules_install + ... + +The important point to note here is the :term:`KERNEL_SRC` variable. The +:ref:`module ` class sets this variable and the +:term:`KERNEL_PATH` variable to +``${STAGING_KERNEL_DIR}`` with the necessary Linux kernel build +information to build modules. If your module ``Makefile`` uses a +different variable, you might want to override the +:ref:`ref-tasks-compile` step, or +create a patch to the ``Makefile`` to work with the more typical +``KERNEL_SRC`` or ``KERNEL_PATH`` variables. + +After you have prepared your recipe, you will likely want to include the +module in your images. To do this, see the documentation for the +following variables in the Yocto Project Reference Manual and set one of +them appropriately for your machine configuration file: + +- :term:`MACHINE_ESSENTIAL_EXTRA_RDEPENDS` + +- :term:`MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS` + +- :term:`MACHINE_EXTRA_RDEPENDS` + +- :term:`MACHINE_EXTRA_RRECOMMENDS` + +Modules are often not required for boot and can be excluded from certain +build configurations. The following allows for the most flexibility: +:: + + MACHINE_EXTRA_RRECOMMENDS += "kernel-module-mymodule" + +The value is +derived by appending the module filename without the ``.ko`` extension +to the string "kernel-module-". + +Because the variable is +:term:`RRECOMMENDS` and not a +:term:`RDEPENDS` variable, the build +will not fail if this module is not available to include in the image. + +Inspecting Changes and Commits +============================== + +A common question when working with a kernel is: "What changes have been +applied to this tree?" Rather than using "grep" across directories to +see what has changed, you can use Git to inspect or search the kernel +tree. Using Git is an efficient way to see what has changed in the tree. + +What Changed in a Kernel? +------------------------- + +Following are a few examples that show how to use Git commands to +examine changes. These examples are by no means the only way to see +changes. + +.. note:: + + In the following examples, unless you provide a commit range, + kernel.org + history is blended with Yocto Project kernel changes. You can form + ranges by using branch names from the kernel tree as the upper and + lower commit markers with the Git commands. You can see the branch + names through the web interface to the Yocto Project source + repositories at + . + +To see a full range of the changes, use the ``git whatchanged`` command +and specify a commit range for the branch (commit\ ``..``\ commit). + +Here is an example that looks at what has changed in the ``emenlow`` +branch of the ``linux-yocto-3.19`` kernel. The lower commit range is the +commit associated with the ``standard/base`` branch, while the upper +commit range is the commit associated with the ``standard/emenlow`` +branch. +:: + + $ git whatchanged origin/standard/base..origin/standard/emenlow + +To see short, one line summaries of changes use the ``git log`` command: +:: + + $ git log --oneline origin/standard/base..origin/standard/emenlow + +Use this command to see code differences for the changes: +:: + + $ git diff origin/standard/base..origin/standard/emenlow + +Use this command to see the commit log messages and the text +differences: +:: + + $ git show origin/standard/base..origin/standard/emenlow + +Use this command to create individual patches for each change. Here is +an example that that creates patch files for each commit and places them +in your ``Documents`` directory: +:: + + $ git format-patch -o $HOME/Documents origin/standard/base..origin/standard/emenlow + +Showing a Particular Feature or Branch Change +--------------------------------------------- + +Tags in the Yocto Project kernel tree divide changes for significant +features or branches. The ``git show`` tag command shows changes based +on a tag. Here is an example that shows ``systemtap`` changes: +:: + + $ git show systemtap + +You can use the ``git branch --contains`` tag command to +show the branches that contain a particular feature. This command shows +the branches that contain the ``systemtap`` feature: +:: + + $ git branch --contains systemtap + +Adding Recipe-Space Kernel Features +=================================== + +You can add kernel features in the +`recipe-space <#recipe-space-metadata>`__ by using the +:term:`KERNEL_FEATURES` +variable and by specifying the feature's ``.scc`` file path in the +:term:`SRC_URI` statement. When you +add features using this method, the OpenEmbedded build system checks to +be sure the features are present. If the features are not present, the +build stops. Kernel features are the last elements processed for +configuring and patching the kernel. Therefore, adding features in this +manner is a way to enforce specific features are present and enabled +without needing to do a full audit of any other layer's additions to the +``SRC_URI`` statement. + +You add a kernel feature by providing the feature as part of the +``KERNEL_FEATURES`` variable and by providing the path to the feature's +``.scc`` file, which is relative to the root of the kernel Metadata. The +OpenEmbedded build system searches all forms of kernel Metadata on the +``SRC_URI`` statement regardless of whether the Metadata is in the +"kernel-cache", system kernel Metadata, or a recipe-space Metadata (i.e. +part of the kernel recipe). See the "`Kernel Metadata +Location <#kernel-metadata-location>`__" section for additional +information. + +When you specify the feature's ``.scc`` file on the ``SRC_URI`` +statement, the OpenEmbedded build system adds the directory of that +``.scc`` file along with all its subdirectories to the kernel feature +search path. Because subdirectories are searched, you can reference a +single ``.scc`` file in the ``SRC_URI`` statement to reference multiple +kernel features. + +Consider the following example that adds the "test.scc" feature to the +build. + +1. *Create the Feature File:* Create a ``.scc`` file and locate it just + as you would any other patch file, ``.cfg`` file, or fetcher item you + specify in the ``SRC_URI`` statement. + + .. note:: + + - You must add the directory of the ``.scc`` file to the + fetcher's search path in the same manner as you would add a + ``.patch`` file. + + - You can create additional ``.scc`` files beneath the directory + that contains the file you are adding. All subdirectories are + searched during the build as potential feature directories. + + Continuing with the example, suppose the "test.scc" feature you are + adding has a ``test.scc`` file in the following directory: + :: + + my_recipe + | + +-linux-yocto + | + +-test.cfg + +-test.scc + + In this example, the + ``linux-yocto`` directory has both the feature ``test.scc`` file and + a similarly named configuration fragment file ``test.cfg``. + +2. *Add the Feature File to SRC_URI:* Add the ``.scc`` file to the + recipe's ``SRC_URI`` statement: + :: + + SRC_URI_append = " file://test.scc" + + The leading space before the path is important as the path is + appended to the existing path. + +3. *Specify the Feature as a Kernel Feature:* Use the + ``KERNEL_FEATURES`` statement to specify the feature as a kernel + feature: + :: + + KERNEL_FEATURES_append = " test.scc" + + The OpenEmbedded build + system processes the kernel feature when it builds the kernel. + + .. note:: + + If other features are contained below "test.scc", then their + directories are relative to the directory containing the + test.scc + file. diff --git a/poky/documentation/kernel-dev/kernel-dev-concepts-appx.rst b/poky/documentation/kernel-dev/kernel-dev-concepts-appx.rst new file mode 100644 index 000000000..04cb1172b --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-concepts-appx.rst @@ -0,0 +1,426 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************ +Advanced Kernel Concepts +************************ + +.. _kernel-big-picture: + +Yocto Project Kernel Development and Maintenance +================================================ + +Kernels available through the Yocto Project (Yocto Linux kernels), like +other kernels, are based off the Linux kernel releases from +http://www.kernel.org. At the beginning of a major Linux kernel +development cycle, the Yocto Project team chooses a Linux kernel based +on factors such as release timing, the anticipated release timing of +final upstream ``kernel.org`` versions, and Yocto Project feature +requirements. Typically, the Linux kernel chosen is in the final stages +of development by the Linux community. In other words, the Linux kernel +is in the release candidate or "rc" phase and has yet to reach final +release. But, by being in the final stages of external development, the +team knows that the ``kernel.org`` final release will clearly be within +the early stages of the Yocto Project development window. + +This balance allows the Yocto Project team to deliver the most +up-to-date Yocto Linux kernel possible, while still ensuring that the +team has a stable official release for the baseline Linux kernel +version. + +As implied earlier, the ultimate source for Yocto Linux kernels are +released kernels from ``kernel.org``. In addition to a foundational +kernel from ``kernel.org``, the available Yocto Linux kernels contain a +mix of important new mainline developments, non-mainline developments +(when no alternative exists), Board Support Package (BSP) developments, +and custom features. These additions result in a commercially released +Yocto Project Linux kernel that caters to specific embedded designer +needs for targeted hardware. + +You can find a web interface to the Yocto Linux kernels in the +:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories` +at :yocto_git:`/`. If you look at the interface, you will see to +the left a grouping of Git repositories titled "Yocto Linux Kernel". +Within this group, you will find several Linux Yocto kernels developed +and included with Yocto Project releases: + +- *linux-yocto-4.1:* The stable Yocto Project kernel to use with + the Yocto Project Release 2.0. This kernel is based on the Linux 4.1 + released kernel. + +- *linux-yocto-4.4:* The stable Yocto Project kernel to use with + the Yocto Project Release 2.1. This kernel is based on the Linux 4.4 + released kernel. + +- *linux-yocto-4.6:* A temporary kernel that is not tied to any + Yocto Project release. + +- *linux-yocto-4.8:* The stable yocto Project kernel to use with + the Yocto Project Release 2.2. + +- *linux-yocto-4.9:* The stable Yocto Project kernel to use with + the Yocto Project Release 2.3. This kernel is based on the Linux 4.9 + released kernel. + +- *linux-yocto-4.10:* The default stable Yocto Project kernel to + use with the Yocto Project Release 2.3. This kernel is based on the + Linux 4.10 released kernel. + +- *linux-yocto-4.12:* The default stable Yocto Project kernel to + use with the Yocto Project Release 2.4. This kernel is based on the + Linux 4.12 released kernel. + +- *yocto-kernel-cache:* The ``linux-yocto-cache`` contains patches + and configurations for the linux-yocto kernel tree. This repository + is useful when working on the linux-yocto kernel. For more + information on this "Advanced Kernel Metadata", see the + ":doc:`kernel-dev-advanced`" Chapter. + +- *linux-yocto-dev:* A development kernel based on the latest + upstream release candidate available. + +.. note:: + + Long Term Support Initiative (LTSI) for Yocto Linux kernels is as + follows: + + - For Yocto Project releases 1.7, 1.8, and 2.0, the LTSI kernel is + ``linux-yocto-3.14``. + + - For Yocto Project releases 2.1, 2.2, and 2.3, the LTSI kernel is + ``linux-yocto-4.1``. + + - For Yocto Project release 2.4, the LTSI kernel is + ``linux-yocto-4.9`` + + - ``linux-yocto-4.4`` is an LTS kernel. + +Once a Yocto Linux kernel is officially released, the Yocto Project team +goes into their next development cycle, or upward revision (uprev) +cycle, while still continuing maintenance on the released kernel. It is +important to note that the most sustainable and stable way to include +feature development upstream is through a kernel uprev process. +Back-porting hundreds of individual fixes and minor features from +various kernel versions is not sustainable and can easily compromise +quality. + +During the uprev cycle, the Yocto Project team uses an ongoing analysis +of Linux kernel development, BSP support, and release timing to select +the best possible ``kernel.org`` Linux kernel version on which to base +subsequent Yocto Linux kernel development. The team continually monitors +Linux community kernel development to look for significant features of +interest. The team does consider back-porting large features if they +have a significant advantage. User or community demand can also trigger +a back-port or creation of new functionality in the Yocto Project +baseline kernel during the uprev cycle. + +Generally speaking, every new Linux kernel both adds features and +introduces new bugs. These consequences are the basic properties of +upstream Linux kernel development and are managed by the Yocto Project +team's Yocto Linux kernel development strategy. It is the Yocto Project +team's policy to not back-port minor features to the released Yocto +Linux kernel. They only consider back-porting significant technological +jumps DASH and, that is done after a complete gap analysis. The reason +for this policy is that back-porting any small to medium sized change +from an evolving Linux kernel can easily create mismatches, +incompatibilities and very subtle errors. + +The policies described in this section result in both a stable and a +cutting edge Yocto Linux kernel that mixes forward ports of existing +Linux kernel features and significant and critical new functionality. +Forward porting Linux kernel functionality into the Yocto Linux kernels +available through the Yocto Project can be thought of as a "micro +uprev." The many "micro uprevs" produce a Yocto Linux kernel version +with a mix of important new mainline, non-mainline, BSP developments and +feature integrations. This Yocto Linux kernel gives insight into new +features and allows focused amounts of testing to be done on the kernel, +which prevents surprises when selecting the next major uprev. The +quality of these cutting edge Yocto Linux kernels is evolving and the +kernels are used in leading edge feature and BSP development. + +Yocto Linux Kernel Architecture and Branching Strategies +======================================================== + +As mentioned earlier, a key goal of the Yocto Project is to present the +developer with a kernel that has a clear and continuous history that is +visible to the user. The architecture and mechanisms, in particular the +branching strategies, used achieve that goal in a manner similar to +upstream Linux kernel development in ``kernel.org``. + +You can think of a Yocto Linux kernel as consisting of a baseline Linux +kernel with added features logically structured on top of the baseline. +The features are tagged and organized by way of a branching strategy +implemented by the Yocto Project team using the Source Code Manager +(SCM) Git. + +.. note:: + + - Git is the obvious SCM for meeting the Yocto Linux kernel + organizational and structural goals described in this section. Not + only is Git the SCM for Linux kernel development in ``kernel.org`` + but, Git continues to grow in popularity and supports many + different work flows, front-ends and management techniques. + + - You can find documentation on Git at + http://git-scm.com/documentation. You can also get an + introduction to Git as it applies to the Yocto Project in the + ":ref:`overview-manual/overview-manual-development-environment:git`" section in the Yocto Project + Overview and Concepts Manual. The latter reference provides an + overview of Git and presents a minimal set of Git commands that + allows you to be functional using Git. You can use as much, or as + little, of what Git has to offer to accomplish what you need for + your project. You do not have to be a "Git Expert" in order to use + it with the Yocto Project. + +Using Git's tagging and branching features, the Yocto Project team +creates kernel branches at points where functionality is no longer +shared and thus, needs to be isolated. For example, board-specific +incompatibilities would require different functionality and would +require a branch to separate the features. Likewise, for specific kernel +features, the same branching strategy is used. + +This "tree-like" architecture results in a structure that has features +organized to be specific for particular functionality, single kernel +types, or a subset of kernel types. Thus, the user has the ability to +see the added features and the commits that make up those features. In +addition to being able to see added features, the user can also view the +history of what made up the baseline Linux kernel. + +Another consequence of this strategy results in not having to store the +same feature twice internally in the tree. Rather, the kernel team +stores the unique differences required to apply the feature onto the +kernel type in question. + +.. note:: + + The Yocto Project team strives to place features in the tree such + that features can be shared by all boards and kernel types where + possible. However, during development cycles or when large features + are merged, the team cannot always follow this practice. In those + cases, the team uses isolated branches to merge features. + +BSP-specific code additions are handled in a similar manner to +kernel-specific additions. Some BSPs only make sense given certain +kernel types. So, for these types, the team creates branches off the end +of that kernel type for all of the BSPs that are supported on that +kernel type. From the perspective of the tools that create the BSP +branch, the BSP is really no different than a feature. Consequently, the +same branching strategy applies to BSPs as it does to kernel features. +So again, rather than store the BSP twice, the team only stores the +unique differences for the BSP across the supported multiple kernels. + +While this strategy can result in a tree with a significant number of +branches, it is important to realize that from the developer's point of +view, there is a linear path that travels from the baseline +``kernel.org``, through a select group of features and ends with their +BSP-specific commits. In other words, the divisions of the kernel are +transparent and are not relevant to the developer on a day-to-day basis. +From the developer's perspective, this path is the "master" branch in +Git terms. The developer does not need to be aware of the existence of +any other branches at all. Of course, value exists in the having these +branches in the tree, should a person decide to explore them. For +example, a comparison between two BSPs at either the commit level or at +the line-by-line code ``diff`` level is now a trivial operation. + +The following illustration shows the conceptual Yocto Linux kernel. + +.. image:: figures/kernel-architecture-overview.png + :align: center + +In the illustration, the "Kernel.org Branch Point" marks the specific +spot (or Linux kernel release) from which the Yocto Linux kernel is +created. From this point forward in the tree, features and differences +are organized and tagged. + +The "Yocto Project Baseline Kernel" contains functionality that is +common to every kernel type and BSP that is organized further along in +the tree. Placing these common features in the tree this way means +features do not have to be duplicated along individual branches of the +tree structure. + +From the "Yocto Project Baseline Kernel", branch points represent +specific functionality for individual Board Support Packages (BSPs) as +well as real-time kernels. The illustration represents this through +three BSP-specific branches and a real-time kernel branch. Each branch +represents some unique functionality for the BSP or for a real-time +Yocto Linux kernel. + +In this example structure, the "Real-time (rt) Kernel" branch has common +features for all real-time Yocto Linux kernels and contains more +branches for individual BSP-specific real-time kernels. The illustration +shows three branches as an example. Each branch points the way to +specific, unique features for a respective real-time kernel as they +apply to a given BSP. + +The resulting tree structure presents a clear path of markers (or +branches) to the developer that, for all practical purposes, is the +Yocto Linux kernel needed for any given set of requirements. + +.. note:: + + Keep in mind the figure does not take into account all the supported + Yocto Linux kernels, but rather shows a single generic kernel just + for conceptual purposes. Also keep in mind that this structure + represents the Yocto Project + Source Repositories + that are either pulled from during the build or established on the + host development system prior to the build by either cloning a + particular kernel's Git repository or by downloading and unpacking a + tarball. + +Working with the kernel as a structured tree follows recognized +community best practices. In particular, the kernel as shipped with the +product, should be considered an "upstream source" and viewed as a +series of historical and documented modifications (commits). These +modifications represent the development and stabilization done by the +Yocto Project kernel development team. + +Because commits only change at significant release points in the product +life cycle, developers can work on a branch created from the last +relevant commit in the shipped Yocto Project Linux kernel. As mentioned +previously, the structure is transparent to the developer because the +kernel tree is left in this state after cloning and building the kernel. + +Kernel Build File Hierarchy +=========================== + +Upstream storage of all the available kernel source code is one thing, +while representing and using the code on your host development system is +another. Conceptually, you can think of the kernel source repositories +as all the source files necessary for all the supported Yocto Linux +kernels. As a developer, you are just interested in the source files for +the kernel on which you are working. And, furthermore, you need them +available on your host system. + +Kernel source code is available on your host system several different +ways: + +- *Files Accessed While using devtool:* ``devtool``, which is + available with the Yocto Project, is the preferred method by which to + modify the kernel. See the ":ref:`kernel-dev/kernel-dev-intro:kernel modification workflow`" section. + +- *Cloned Repository:* If you are working in the kernel all the time, + you probably would want to set up your own local Git repository of + the Yocto Linux kernel tree. For information on how to clone a Yocto + Linux kernel Git repository, see the + ":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`" + section. + +- *Temporary Source Files from a Build:* If you just need to make some + patches to the kernel using a traditional BitBake workflow (i.e. not + using the ``devtool``), you can access temporary kernel source files + that were extracted and used during a kernel build. + +The temporary kernel source files resulting from a build using BitBake +have a particular hierarchy. When you build the kernel on your +development system, all files needed for the build are taken from the +source repositories pointed to by the +:term:`SRC_URI` variable and gathered +in a temporary work area where they are subsequently used to create the +unique kernel. Thus, in a sense, the process constructs a local source +tree specific to your kernel from which to generate the new kernel +image. + +The following figure shows the temporary file structure created on your +host system when you build the kernel using Bitbake. This +:term:`Build Directory` contains all the +source files used during the build. + +.. image:: figures/kernel-overview-2-generic.png + :align: center + +Again, for additional information on the Yocto Project kernel's +architecture and its branching strategy, see the +":ref:`kernel-dev/kernel-dev-concepts-appx:yocto linux kernel architecture and branching strategies`" +section. You can also reference the +":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" +and +":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`" +sections for detailed example that modifies the kernel. + +Determining Hardware and Non-Hardware Features for the Kernel Configuration Audit Phase +======================================================================================= + +This section describes part of the kernel configuration audit phase that +most developers can ignore. For general information on kernel +configuration including ``menuconfig``, ``defconfig`` files, and +configuration fragments, see the +":ref:`kernel-dev/kernel-dev-common:configuring the kernel`" section. + +During this part of the audit phase, the contents of the final +``.config`` file are compared against the fragments specified by the +system. These fragments can be system fragments, distro fragments, or +user-specified configuration elements. Regardless of their origin, the +OpenEmbedded build system warns the user if a specific option is not +included in the final kernel configuration. + +By default, in order to not overwhelm the user with configuration +warnings, the system only reports missing "hardware" options as they +could result in a boot failure or indicate that important hardware is +not available. + +To determine whether or not a given option is "hardware" or +"non-hardware", the kernel Metadata in ``yocto-kernel-cache`` contains +files that classify individual or groups of options as either hardware +or non-hardware. To better show this, consider a situation where the +``yocto-kernel-cache`` contains the following files: +:: + + yocto-kernel-cache/features/drm-psb/hardware.cfg + yocto-kernel-cache/features/kgdb/hardware.cfg + yocto-kernel-cache/ktypes/base/hardware.cfg + yocto-kernel-cache/bsp/mti-malta32/hardware.cfg + yocto-kernel-cache/bsp/qemu-ppc32/hardware.cfg + yocto-kernel-cache/bsp/qemuarma9/hardware.cfg + yocto-kernel-cache/bsp/mti-malta64/hardware.cfg + yocto-kernel-cache/bsp/arm-versatile-926ejs/hardware.cfg + yocto-kernel-cache/bsp/common-pc/hardware.cfg + yocto-kernel-cache/bsp/common-pc-64/hardware.cfg + yocto-kernel-cache/features/rfkill/non-hardware.cfg + yocto-kernel-cache/ktypes/base/non-hardware.cfg + yocto-kernel-cache/features/aufs/non-hardware.kcf + yocto-kernel-cache/features/ocf/non-hardware.kcf + yocto-kernel-cache/ktypes/base/non-hardware.kcf + yocto-kernel-cache/ktypes/base/hardware.kcf + yocto-kernel-cache/bsp/qemu-ppc32/hardware.kcf + +The following list +provides explanations for the various files: + +- ``hardware.kcf``: Specifies a list of kernel Kconfig files that + contain hardware options only. + +- ``non-hardware.kcf``: Specifies a list of kernel Kconfig files that + contain non-hardware options only. + +- ``hardware.cfg``: Specifies a list of kernel ``CONFIG_`` options that + are hardware, regardless of whether or not they are within a Kconfig + file specified by a hardware or non-hardware Kconfig file (i.e. + ``hardware.kcf`` or ``non-hardware.kcf``). + +- ``non-hardware.cfg``: Specifies a list of kernel ``CONFIG_`` options + that are not hardware, regardless of whether or not they are within a + Kconfig file specified by a hardware or non-hardware Kconfig file + (i.e. ``hardware.kcf`` or ``non-hardware.kcf``). + +Here is a specific example using the +``kernel-cache/bsp/mti-malta32/hardware.cfg``: +:: + + CONFIG_SERIAL_8250 + CONFIG_SERIAL_8250_CONSOLE + CONFIG_SERIAL_8250_NR_UARTS + CONFIG_SERIAL_8250_PCI + CONFIG_SERIAL_CORE + CONFIG_SERIAL_CORE_CONSOLE + CONFIG_VGA_ARB + +The kernel configuration audit automatically detects +these files (hence the names must be exactly the ones discussed here), +and uses them as inputs when generating warnings about the final +``.config`` file. + +A user-specified kernel Metadata repository, or recipe space feature, +can use these same files to classify options that are found within its +``.cfg`` files as hardware or non-hardware, to prevent the OpenEmbedded +build system from producing an error or warning when an option is not in +the final ``.config`` file. diff --git a/poky/documentation/kernel-dev/kernel-dev-concepts-appx.xml b/poky/documentation/kernel-dev/kernel-dev-concepts-appx.xml index 0f2df2a62..bf0c525ca 100644 --- a/poky/documentation/kernel-dev/kernel-dev-concepts-appx.xml +++ b/poky/documentation/kernel-dev/kernel-dev-concepts-appx.xml @@ -192,7 +192,7 @@ Forward porting Linux kernel functionality into the Yocto Linux kernels available through the Yocto Project can be thought of as a "micro uprev." - The many “micro uprevs” produce a Yocto Linux kernel version with + The many "micro uprevs" produce a Yocto Linux kernel version with a mix of important new mainline, non-mainline, BSP developments and feature integrations. This Yocto Linux kernel gives insight into new features and diff --git a/poky/documentation/kernel-dev/kernel-dev-faq.rst b/poky/documentation/kernel-dev/kernel-dev-faq.rst new file mode 100644 index 000000000..b5e6a84eb --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-faq.rst @@ -0,0 +1,81 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************** +Kernel Development FAQ +********************** + +.. _kernel-dev-faq-section: + +Common Questions and Solutions +============================== + +The following lists some solutions for common questions. + +How do I use my own Linux kernel ``.config`` file? +-------------------------------------------------- + +Refer to the +":ref:`kernel-dev/kernel-dev-common:changing the configuration`" +section for information. + +How do I create configuration fragments? +---------------------------------------- + +A: Refer to the +":ref:`kernel-dev/kernel-dev-common:creating configuration fragments`" +section for information. + +How do I use my own Linux kernel sources? +----------------------------------------- + +Refer to the +":ref:`kernel-dev/kernel-dev-common:working with your own sources`" +section for information. + +How do I install/not-install the kernel image on the rootfs? +------------------------------------------------------------ + +The kernel image (e.g. ``vmlinuz``) is provided by the +``kernel-image`` package. Image recipes depend on ``kernel-base``. To +specify whether or not the kernel image is installed in the generated +root filesystem, override ``RDEPENDS_kernel-base`` to include or not +include "kernel-image". See the +":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`" +section in the +Yocto Project Development Tasks Manual for information on how to use an +append file to override metadata. + +How do I install a specific kernel module? +------------------------------------------ + +Linux kernel modules are packaged individually. To ensure a +specific kernel module is included in an image, include it in the +appropriate machine +:term:`RRECOMMENDS` variable. +These other variables are useful for installing specific modules: +:term:`MACHINE_ESSENTIAL_EXTRA_RDEPENDS` +:term:`MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS` +:term:`MACHINE_EXTRA_RDEPENDS` +:term:`MACHINE_EXTRA_RRECOMMENDS` +For example, set the following in the ``qemux86.conf`` file to include +the ``ab123`` kernel modules with images built for the ``qemux86`` +machine: +:: + + MACHINE_EXTRA_RRECOMMENDS += "kernel-module-ab123" + +For more +information, see the "`Incorporating Out-of-Tree +Modules <#incorporating-out-of-tree-modules>`__" section. + +How do I change the Linux kernel command line? +---------------------------------------------- + +The Linux kernel command line is +typically specified in the machine config using the ``APPEND`` variable. +For example, you can add some helpful debug information doing the +following: +:: + + APPEND += "printk.time=y initcall_debug debug" + diff --git a/poky/documentation/kernel-dev/kernel-dev-intro.rst b/poky/documentation/kernel-dev/kernel-dev-intro.rst new file mode 100644 index 000000000..21d43d5e8 --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-intro.rst @@ -0,0 +1,183 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Introduction +************ + +.. _kernel-dev-overview: + +Overview +======== + +Regardless of how you intend to make use of the Yocto Project, chances +are you will work with the Linux kernel. This manual describes how to +set up your build host to support kernel development, introduces the +kernel development process, provides background information on the Yocto +Linux kernel :term:`Metadata`, describes +common tasks you can perform using the kernel tools, shows you how to +use the kernel Metadata needed to work with the kernel inside the Yocto +Project, and provides insight into how the Yocto Project team develops +and maintains Yocto Linux kernel Git repositories and Metadata. + +Each Yocto Project release has a set of Yocto Linux kernel recipes, +whose Git repositories you can view in the Yocto +:yocto_git:`Source Repositories <>` under the "Yocto Linux Kernel" +heading. New recipes for the release track the latest Linux kernel +upstream developments from http://www.kernel.org> and introduce +newly-supported platforms. Previous recipes in the release are refreshed +and supported for at least one additional Yocto Project release. As they +align, these previous releases are updated to include the latest from +the Long Term Support Initiative (LTSI) project. You can learn more +about Yocto Linux kernels and LTSI in the ":ref:`Yocto Project Kernel +Development and Maintenance `" section. + +Also included is a Yocto Linux kernel development recipe +(``linux-yocto-dev.bb``) should you want to work with the very latest in +upstream Yocto Linux kernel development and kernel Metadata development. + +.. note:: + + For more on Yocto Linux kernels, see the " + Yocto Project Kernel Development and Maintenance + section. + +The Yocto Project also provides a powerful set of kernel tools for +managing Yocto Linux kernel sources and configuration data. You can use +these tools to make a single configuration change, apply multiple +patches, or work with your own kernel sources. + +In particular, the kernel tools allow you to generate configuration +fragments that specify only what you must, and nothing more. +Configuration fragments only need to contain the highest level visible +``CONFIG`` options as presented by the Yocto Linux kernel ``menuconfig`` +system. Contrast this against a complete Yocto Linux kernel ``.config`` +file, which includes all the automatically selected ``CONFIG`` options. +This efficiency reduces your maintenance effort and allows you to +further separate your configuration in ways that make sense for your +project. A common split separates policy and hardware. For example, all +your kernels might support the ``proc`` and ``sys`` filesystems, but +only specific boards require sound, USB, or specific drivers. Specifying +these configurations individually allows you to aggregate them together +as needed, but maintains them in only one place. Similar logic applies +to separating source changes. + +If you do not maintain your own kernel sources and need to make only +minimal changes to the sources, the released recipes provide a vetted +base upon which to layer your changes. Doing so allows you to benefit +from the continual kernel integration and testing performed during +development of the Yocto Project. + +If, instead, you have a very specific Linux kernel source tree and are +unable to align with one of the official Yocto Linux kernel recipes, an +alternative exists by which you can use the Yocto Project Linux kernel +tools with your own kernel sources. + +The remainder of this manual provides instructions for completing +specific Linux kernel development tasks. These instructions assume you +are comfortable working with +`BitBake `__ recipes and basic +open-source development tools. Understanding these concepts will +facilitate the process of working with the kernel recipes. If you find +you need some additional background, please be sure to review and +understand the following documentation: + +- :doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document. + +- :doc:`../overview-manual/overview-manual`. + +- :ref:`devtool + workflow ` + as described in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + +- The ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" + section in the Yocto Project Development Tasks Manual. + +- The "`Kernel Modification + Workflow <#kernel-modification-workflow>`__" section. + +Kernel Modification Workflow +============================ + +Kernel modification involves changing the Yocto Project kernel, which +could involve changing configuration options as well as adding new +kernel recipes. Configuration changes can be added in the form of +configuration fragments, while recipe modification comes through the +kernel's ``recipes-kernel`` area in a kernel layer you create. + +This section presents a high-level overview of the Yocto Project kernel +modification workflow. The illustration and accompanying list provide +general information and references for further information. + +.. image:: figures/kernel-dev-flow.png + :align: center + +1. *Set up Your Host Development System to Support Development Using the + Yocto Project*: See the ":doc:`../dev-manual/dev-manual-start`" section in + the Yocto Project Development Tasks Manual for options on how to get + a build host ready to use the Yocto Project. + +2. *Set Up Your Host Development System for Kernel Development:* It is + recommended that you use ``devtool`` and an extensible SDK for kernel + development. Alternatively, you can use traditional kernel + development methods with the Yocto Project. Either way, there are + steps you need to take to get the development environment ready. + + Using ``devtool`` and the eSDK requires that you have a clean build + of the image and that you are set up with the appropriate eSDK. For + more information, see the + ":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``" + section. + + Using traditional kernel development requires that you have the + kernel source available in an isolated local Git repository. For more + information, see the + ":ref:`kernel-dev/kernel-dev-common:getting ready for traditional kernel development`" + section. + +3. *Make Changes to the Kernel Source Code if applicable:* Modifying the + kernel does not always mean directly changing source files. However, + if you have to do this, you make the changes to the files in the + eSDK's Build Directory if you are using ``devtool``. For more + information, see the + ":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`" + section. + + If you are using traditional kernel development, you edit the source + files in the kernel's local Git repository. For more information, see the + ":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`" + section. + +4. *Make Kernel Configuration Changes if Applicable:* If your situation + calls for changing the kernel's configuration, you can use + :ref:`menuconfig `, + which allows you to + interactively develop and test the configuration changes you are + making to the kernel. Saving changes you make with ``menuconfig`` + updates the kernel's ``.config`` file. + + .. note:: + + Try to resist the temptation to directly edit an existing + .config + file, which is found in the Build Directory among the source code + used for the build. Doing so, can produce unexpected results when + the OpenEmbedded build system regenerates the configuration file. + + Once you are satisfied with the configuration changes made using + ``menuconfig`` and you have saved them, you can directly compare the + resulting ``.config`` file against an existing original and gather + those changes into a `configuration fragment + file <#creating-config-fragments>`__ to be referenced from within the + kernel's ``.bbappend`` file. + + Additionally, if you are working in a BSP layer and need to modify + the BSP's kernel's configuration, you can use ``menuconfig``. + +5. *Rebuild the Kernel Image With Your Changes:* Rebuilding the kernel + image applies your changes. Depending on your target hardware, you + can verify your changes on actual hardware or perhaps QEMU. + +The remainder of this developer's guide covers common tasks typically +used during kernel development, advanced Metadata usage, and Yocto Linux +kernel maintenance concepts. diff --git a/poky/documentation/kernel-dev/kernel-dev-maint-appx.rst b/poky/documentation/kernel-dev/kernel-dev-maint-appx.rst new file mode 100644 index 000000000..5514dac87 --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev-maint-appx.rst @@ -0,0 +1,239 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************** +Kernel Maintenance +****************** + +Tree Construction +================= + +This section describes construction of the Yocto Project kernel source +repositories as accomplished by the Yocto Project team to create Yocto +Linux kernel repositories. These kernel repositories are found under the +heading "Yocto Linux Kernel" at :yocto_git:`/` and +are shipped as part of a Yocto Project release. The team creates these +repositories by compiling and executing the set of feature descriptions +for every BSP and feature in the product. Those feature descriptions +list all necessary patches, configurations, branches, tags, and feature +divisions found in a Yocto Linux kernel. Thus, the Yocto Project Linux +kernel repository (or tree) and accompanying Metadata in the +``yocto-kernel-cache`` are built. + +The existence of these repositories allow you to access and clone a +particular Yocto Project Linux kernel repository and use it to build +images based on their configurations and features. + +You can find the files used to describe all the valid features and BSPs +in the Yocto Project Linux kernel in any clone of the Yocto Project +Linux kernel source repository and ``yocto-kernel-cache`` Git trees. For +example, the following commands clone the Yocto Project baseline Linux +kernel that branches off ``linux.org`` version 4.12 and the +``yocto-kernel-cache``, which contains stores of kernel Metadata: +:: + + $ git clone git://git.yoctoproject.org/linux-yocto-4.12 + $ git clone git://git.yoctoproject.org/linux-kernel-cache + +For more information on +how to set up a local Git repository of the Yocto Project Linux kernel +files, see the +":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`" +section. + +Once you have cloned the kernel Git repository and the cache of Metadata +on your local machine, you can discover the branches that are available +in the repository using the following Git command: $ git branch -a +Checking out a branch allows you to work with a particular Yocto Linux +kernel. For example, the following commands check out the +"standard/beagleboard" branch of the Yocto Linux kernel repository and +the "yocto-4.12" branch of the ``yocto-kernel-cache`` repository: +:: + + $ cd ~/linux-yocto-4.12 + $ git checkout -b my-kernel-4.12 remotes/origin/standard/beagleboard + $ cd ~/linux-kernel-cache + $ git checkout -b my-4.12-metadata remotes/origin/yocto-4.12 + +.. note:: + + Branches in the + yocto-kernel-cache + repository correspond to Yocto Linux kernel versions (e.g. + "yocto-4.12", "yocto-4.10", "yocto-4.9", and so forth). + +Once you have checked out and switched to appropriate branches, you can +see a snapshot of all the kernel source files used to used to build that +particular Yocto Linux kernel for a particular board. + +To see the features and configurations for a particular Yocto Linux +kernel, you need to examine the ``yocto-kernel-cache`` Git repository. +As mentioned, branches in the ``yocto-kernel-cache`` repository +correspond to Yocto Linux kernel versions (e.g. ``yocto-4.12``). +Branches contain descriptions in the form of ``.scc`` and ``.cfg`` +files. + +You should realize, however, that browsing your local +``yocto-kernel-cache`` repository for feature descriptions and patches +is not an effective way to determine what is in a particular kernel +branch. Instead, you should use Git directly to discover the changes in +a branch. Using Git is an efficient and flexible way to inspect changes +to the kernel. + +.. note:: + + Ground up reconstruction of the complete kernel tree is an action + only taken by the Yocto Project team during an active development + cycle. When you create a clone of the kernel Git repository, you are + simply making it efficiently available for building and development. + +The following steps describe what happens when the Yocto Project Team +constructs the Yocto Project kernel source Git repository (or tree) +found at :yocto_git:`/` given the introduction of a new +top-level kernel feature or BSP. The following actions effectively +provide the Metadata and create the tree that includes the new feature, +patch, or BSP: + +1. *Pass Feature to the OpenEmbedded Build System:* A top-level kernel + feature is passed to the kernel build subsystem. Normally, this + feature is a BSP for a particular kernel type. + +2. *Locate Feature:* The file that describes the top-level feature is + located by searching these system directories: + + - The in-tree kernel-cache directories, which are located in the + :yocto_git:`yocto-kernel-cache ` + repository organized under the "Yocto Linux Kernel" heading in the + :yocto_git:`Yocto Project Source Repositories <>`. + + - Areas pointed to by ``SRC_URI`` statements found in kernel recipes + + For a typical build, the target of the search is a feature + description in an ``.scc`` file whose name follows this format (e.g. + ``beaglebone-standard.scc`` and ``beaglebone-preempt-rt.scc``): + :: + + bsp_root_name-kernel_type.scc + +3. *Expand Feature:* Once located, the feature description is either + expanded into a simple script of actions, or into an existing + equivalent script that is already part of the shipped kernel. + +4. *Append Extra Features:* Extra features are appended to the top-level + feature description. These features can come from the + :term:`KERNEL_FEATURES` + variable in recipes. + +5. *Locate, Expand, and Append Each Feature:* Each extra feature is + located, expanded and appended to the script as described in step + three. + +6. *Execute the Script:* The script is executed to produce files + ``.scc`` and ``.cfg`` files in appropriate directories of the + ``yocto-kernel-cache`` repository. These files are descriptions of + all the branches, tags, patches and configurations that need to be + applied to the base Git repository to completely create the source + (build) branch for the new BSP or feature. + +7. *Clone Base Repository:* The base repository is cloned, and the + actions listed in the ``yocto-kernel-cache`` directories are applied + to the tree. + +8. *Perform Cleanup:* The Git repositories are left with the desired + branches checked out and any required branching, patching and tagging + has been performed. + +The kernel tree and cache are ready for developer consumption to be +locally cloned, configured, and built into a Yocto Project kernel +specific to some target hardware. + +.. note:: + + - The generated ``yocto-kernel-cache`` repository adds to the kernel + as shipped with the Yocto Project release. Any add-ons and + configuration data are applied to the end of an existing branch. + The full repository generation that is found in the official Yocto + Project kernel repositories at :yocto_git:`/` is the + combination of all supported boards and configurations. + + - The technique the Yocto Project team uses is flexible and allows + for seamless blending of an immutable history with additional + patches specific to a deployment. Any additions to the kernel + become an integrated part of the branches. + + - The full kernel tree that you see on :yocto_git:`/` is + generated through repeating the above steps for all valid BSPs. + The end result is a branched, clean history tree that makes up the + kernel for a given release. You can see the script (``kgit-scc``) + responsible for this in the + :yocto_git:`yocto-kernel-tools ` + repository. + + - The steps used to construct the full kernel tree are the same + steps that BitBake uses when it builds a kernel image. + +Build Strategy +============== + +Once you have cloned a Yocto Linux kernel repository and the cache +repository (``yocto-kernel-cache``) onto your development system, you +can consider the compilation phase of kernel development, which is +building a kernel image. Some prerequisites exist that are validated by +the build process before compilation starts: + +- The :term:`SRC_URI` points to the + kernel Git repository. + +- A BSP build branch with Metadata exists in the ``yocto-kernel-cache`` + repository. The branch is based on the Yocto Linux kernel version and + has configurations and features grouped under the + ``yocto-kernel-cache/bsp`` directory. For example, features and + configurations for the BeagleBone Board assuming a + ``linux-yocto_4.12`` kernel reside in the following area of the + ``yocto-kernel-cache`` repository: yocto-kernel-cache/bsp/beaglebone + + .. note:: + + In the previous example, the "yocto-4.12" branch is checked out in + the + yocto-kernel-cache + repository. + +The OpenEmbedded build system makes sure these conditions exist before +attempting compilation. Other means, however, do exist, such as as +bootstrapping a BSP. + +Before building a kernel, the build process verifies the tree and +configures the kernel by processing all of the configuration "fragments" +specified by feature descriptions in the ``.scc`` files. As the features +are compiled, associated kernel configuration fragments are noted and +recorded in the series of directories in their compilation order. The +fragments are migrated, pre-processed and passed to the Linux Kernel +Configuration subsystem (``lkc``) as raw input in the form of a +``.config`` file. The ``lkc`` uses its own internal dependency +constraints to do the final processing of that information and generates +the final ``.config`` file that is used during compilation. + +Using the board's architecture and other relevant values from the +board's template, kernel compilation is started and a kernel image is +produced. + +The other thing that you notice once you configure a kernel is that the +build process generates a build tree that is separate from your kernel's +local Git source repository tree. This build tree has a name that uses +the following form, where ``${MACHINE}`` is the metadata name of the +machine (BSP) and "kernel_type" is one of the Yocto Project supported +kernel types (e.g. "standard"): +:: + + linux-${MACHINE}-kernel_type-build + +The existing support in the ``kernel.org`` tree achieves this default +functionality. + +This behavior means that all the generated files for a particular +machine or BSP are now in the build tree directory. The files include +the final ``.config`` file, all the ``.o`` files, the ``.a`` files, and +so forth. Since each machine or BSP has its own separate +:term:`Build Directory` in its own separate +branch of the Git repository, you can easily switch between different +builds. diff --git a/poky/documentation/kernel-dev/kernel-dev.rst b/poky/documentation/kernel-dev/kernel-dev.rst new file mode 100644 index 000000000..332e089b0 --- /dev/null +++ b/poky/documentation/kernel-dev/kernel-dev.rst @@ -0,0 +1,21 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +============================================= +Yocto Project Linux Kernel Development Manual +============================================= + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + kernel-dev-intro + kernel-dev-common + kernel-dev-advanced + kernel-dev-concepts-appx + kernel-dev-maint-appx + kernel-dev-faq + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/overview-manual/history.rst b/poky/documentation/overview-manual/history.rst new file mode 100644 index 000000000..0273d28b9 --- /dev/null +++ b/poky/documentation/overview-manual/history.rst @@ -0,0 +1,28 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 2.5 + - May 2018 + - The initial document released with the Yocto Project 2.5 Release + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/overview-manual/overview-manual-concepts.rst b/poky/documentation/overview-manual/overview-manual-concepts.rst new file mode 100644 index 000000000..3d8dc7afd --- /dev/null +++ b/poky/documentation/overview-manual/overview-manual-concepts.rst @@ -0,0 +1,2185 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************** +Yocto Project Concepts +********************** + +This chapter provides explanations for Yocto Project concepts that go +beyond the surface of "how-to" information and reference (or look-up) +material. Concepts such as components, the :term:`OpenEmbedded Build System` +workflow, +cross-development toolchains, shared state cache, and so forth are +explained. + +Yocto Project Components +======================== + +The :term:`BitBake` task executor +together with various types of configuration files form the +:term:`OpenEmbedded-Core (OE-Core)`. This section +overviews these components by describing their use and how they +interact. + +BitBake handles the parsing and execution of the data files. The data +itself is of various types: + +- *Recipes:* Provides details about particular pieces of software. + +- *Class Data:* Abstracts common build information (e.g. how to build a + Linux kernel). + +- *Configuration Data:* Defines machine-specific settings, policy + decisions, and so forth. Configuration data acts as the glue to bind + everything together. + +BitBake knows how to combine multiple data sources together and refers +to each data source as a layer. For information on layers, see the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section of the Yocto Project Development Tasks Manual. + +Following are some brief details on these core components. For +additional information on how these components interact during a build, +see the +":ref:`overview-manual/overview-manual-concepts:openembedded build system concepts`" +section. + +.. _usingpoky-components-bitbake: + +BitBake +------- + +BitBake is the tool at the heart of the :term:`OpenEmbedded Build System` +and is responsible +for parsing the :term:`Metadata`, generating +a list of tasks from it, and then executing those tasks. + +This section briefly introduces BitBake. If you want more information on +BitBake, see the :doc:`BitBake User Manual `. + +To see a list of the options BitBake supports, use either of the +following commands: +:: + + $ bitbake -h + $ bitbake --help + +The most common usage for BitBake is ``bitbake recipename``, where +``recipename`` is the name of the recipe you want to build (referred +to as the "target"). The target often equates to the first part of a +recipe's filename (e.g. "foo" for a recipe named ``foo_1.3.0-r0.bb``). +So, to process the ``matchbox-desktop_1.2.3.bb`` recipe file, you might +type the following: +:: + + $ bitbake matchbox-desktop + +Several different +versions of ``matchbox-desktop`` might exist. BitBake chooses the one +selected by the distribution configuration. You can get more details +about how BitBake chooses between different target versions and +providers in the +":ref:`Preferences `" section +of the BitBake User Manual. + +BitBake also tries to execute any dependent tasks first. So for example, +before building ``matchbox-desktop``, BitBake would build a cross +compiler and ``glibc`` if they had not already been built. + +A useful BitBake option to consider is the ``-k`` or ``--continue`` +option. This option instructs BitBake to try and continue processing the +job as long as possible even after encountering an error. When an error +occurs, the target that failed and those that depend on it cannot be +remade. However, when you use this option other dependencies can still +be processed. + +.. _overview-components-recipes: + +Recipes +------- + +Files that have the ``.bb`` suffix are "recipes" files. In general, a +recipe contains information about a single piece of software. This +information includes the location from which to download the unaltered +source, any source patches to be applied to that source (if needed), +which special configuration options to apply, how to compile the source +files, and how to package the compiled output. + +The term "package" is sometimes used to refer to recipes. However, since +the word "package" is used for the packaged output from the OpenEmbedded +build system (i.e. ``.ipk`` or ``.deb`` files), this document avoids +using the term "package" when referring to recipes. + +.. _overview-components-classes: + +Classes +------- + +Class files (``.bbclass``) contain information that is useful to share +between recipes files. An example is the +:ref:`autotools ` class, +which contains common settings for any application that Autotools uses. +The ":ref:`ref-manual/ref-classes:Classes`" chapter in the +Yocto Project Reference Manual provides details about classes and how to +use them. + +.. _overview-components-configurations: + +Configurations +-------------- + +The configuration files (``.conf``) define various configuration +variables that govern the OpenEmbedded build process. These files fall +into several areas that define machine configuration options, +distribution configuration options, compiler tuning options, general +common configuration options, and user configuration options in +``conf/local.conf``, which is found in the :term:`Build Directory`. + + +.. _overview-layers: + +Layers +====== + +Layers are repositories that contain related metadata (i.e. sets of +instructions) that tell the OpenEmbedded build system how to build a +target. Yocto Project's `layer model <#the-yocto-project-layer-model>`__ +facilitates collaboration, sharing, customization, and reuse within the +Yocto Project development environment. Layers logically separate +information for your project. For example, you can use a layer to hold +all the configurations for a particular piece of hardware. Isolating +hardware-specific configurations allows you to share other metadata by +using a different layer where that metadata might be common across +several pieces of hardware. + +Many layers exist that work in the Yocto Project development +environment. The `Yocto Project Curated Layer +Index `__ +and `OpenEmbedded Layer +Index `__ +both contain layers from which you can use or leverage. + +By convention, layers in the Yocto Project follow a specific form. +Conforming to a known structure allows BitBake to make assumptions +during builds on where to find types of metadata. You can find +procedures and learn about tools (i.e. ``bitbake-layers``) for creating +layers suitable for the Yocto Project in the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section of the Yocto Project Development Tasks Manual. + +.. _openembedded-build-system-build-concepts: + +OpenEmbedded Build System Concepts +================================== + +This section takes a more detailed look inside the build process used by +the :term:`OpenEmbedded Build System`, +which is the build +system specific to the Yocto Project. At the heart of the build system +is BitBake, the task executor. + +The following diagram represents the high-level workflow of a build. The +remainder of this section expands on the fundamental input, output, +process, and metadata logical blocks that make up the workflow. + +.. image:: figures/YP-flow-diagram.png + :align: center + +In general, the build's workflow consists of several functional areas: + +- *User Configuration:* metadata you can use to control the build + process. + +- *Metadata Layers:* Various layers that provide software, machine, and + distro metadata. + +- *Source Files:* Upstream releases, local projects, and SCMs. + +- *Build System:* Processes under the control of + :term:`BitBake`. This block expands + on how BitBake fetches source, applies patches, completes + compilation, analyzes output for package generation, creates and + tests packages, generates images, and generates cross-development + tools. + +- *Package Feeds:* Directories containing output packages (RPM, DEB or + IPK), which are subsequently used in the construction of an image or + Software Development Kit (SDK), produced by the build system. These + feeds can also be copied and shared using a web server or other means + to facilitate extending or updating existing images on devices at + runtime if runtime package management is enabled. + +- *Images:* Images produced by the workflow. + +- *Application Development SDK:* Cross-development tools that are + produced along with an image or separately with BitBake. + +User Configuration +------------------ + +User configuration helps define the build. Through user configuration, +you can tell BitBake the target architecture for which you are building +the image, where to store downloaded source, and other build properties. + +The following figure shows an expanded representation of the "User +Configuration" box of the `general workflow +figure <#general-workflow-figure>`__: + +.. image:: figures/user-configuration.png + :align: center + +BitBake needs some basic configuration files in order to complete a +build. These files are ``*.conf`` files. The minimally necessary ones +reside as example files in the ``build/conf`` directory of the +:term:`Source Directory`. For simplicity, +this section refers to the Source Directory as the "Poky Directory." + +When you clone the :term:`Poky` Git repository +or you download and unpack a Yocto Project release, you can set up the +Source Directory to be named anything you want. For this discussion, the +cloned repository uses the default name ``poky``. + +.. note:: + + The Poky repository is primarily an aggregation of existing + repositories. It is not a canonical upstream source. + +The ``meta-poky`` layer inside Poky contains a ``conf`` directory that +has example configuration files. These example files are used as a basis +for creating actual configuration files when you source +:ref:`structure-core-script`, which is the +build environment script. + +Sourcing the build environment script creates a +:term:`Build Directory` if one does not +already exist. BitBake uses the Build Directory for all its work during +builds. The Build Directory has a ``conf`` directory that contains +default versions of your ``local.conf`` and ``bblayers.conf`` +configuration files. These default configuration files are created only +if versions do not already exist in the Build Directory at the time you +source the build environment setup script. + +Because the Poky repository is fundamentally an aggregation of existing +repositories, some users might be familiar with running the +:ref:`structure-core-script` script in the context of separate +:term:`OpenEmbedded-Core (OE-Core)` and BitBake +repositories rather than a single Poky repository. This discussion +assumes the script is executed from within a cloned or unpacked version +of Poky. + +Depending on where the script is sourced, different sub-scripts are +called to set up the Build Directory (Yocto or OpenEmbedded). +Specifically, the script ``scripts/oe-setup-builddir`` inside the poky +directory sets up the Build Directory and seeds the directory (if +necessary) with configuration files appropriate for the Yocto Project +development environment. + +.. note:: + + The + scripts/oe-setup-builddir + script uses the + ``$TEMPLATECONF`` + variable to determine which sample configuration files to locate. + +The ``local.conf`` file provides many basic variables that define a +build environment. Here is a list of a few. To see the default +configurations in a ``local.conf`` file created by the build environment +script, see the +:yocto_git:`local.conf.sample ` +in the ``meta-poky`` layer: + +- *Target Machine Selection:* Controlled by the + :term:`MACHINE` variable. + +- *Download Directory:* Controlled by the + :term:`DL_DIR` variable. + +- *Shared State Directory:* Controlled by the + :term:`SSTATE_DIR` variable. + +- *Build Output:* Controlled by the + :term:`TMPDIR` variable. + +- *Distribution Policy:* Controlled by the + :term:`DISTRO` variable. + +- *Packaging Format:* Controlled by the + :term:`PACKAGE_CLASSES` + variable. + +- *SDK Target Architecture:* Controlled by the + :term:`SDKMACHINE` variable. + +- *Extra Image Packages:* Controlled by the + :term:`EXTRA_IMAGE_FEATURES` + variable. + +.. note:: + + Configurations set in the + conf/local.conf + file can also be set in the + conf/site.conf + and + conf/auto.conf + configuration files. + +The ``bblayers.conf`` file tells BitBake what layers you want considered +during the build. By default, the layers listed in this file include +layers minimally needed by the build system. However, you must manually +add any custom layers you have created. You can find more information on +working with the ``bblayers.conf`` file in the +":ref:`dev-manual/dev-manual-common-tasks:enabling your layer`" +section in the Yocto Project Development Tasks Manual. + +The files ``site.conf`` and ``auto.conf`` are not created by the +environment initialization script. If you want the ``site.conf`` file, +you need to create that yourself. The ``auto.conf`` file is typically +created by an autobuilder: + +- *site.conf:* You can use the ``conf/site.conf`` configuration + file to configure multiple build directories. For example, suppose + you had several build environments and they shared some common + features. You can set these default build properties here. A good + example is perhaps the packaging format to use through the + :term:`PACKAGE_CLASSES` + variable. + + One useful scenario for using the ``conf/site.conf`` file is to + extend your :term:`BBPATH` variable + to include the path to a ``conf/site.conf``. Then, when BitBake looks + for Metadata using ``BBPATH``, it finds the ``conf/site.conf`` file + and applies your common configurations found in the file. To override + configurations in a particular build directory, alter the similar + configurations within that build directory's ``conf/local.conf`` + file. + +- *auto.conf:* The file is usually created and written to by an + autobuilder. The settings put into the file are typically the same as + you would find in the ``conf/local.conf`` or the ``conf/site.conf`` + files. + +You can edit all configuration files to further define any particular +build environment. This process is represented by the "User +Configuration Edits" box in the figure. + +When you launch your build with the ``bitbake target`` command, BitBake +sorts out the configurations to ultimately define your build +environment. It is important to understand that the +:term:`OpenEmbedded Build System` reads the +configuration files in a specific order: ``site.conf``, ``auto.conf``, +and ``local.conf``. And, the build system applies the normal assignment +statement rules as described in the +":doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`" chapter +of the BitBake User Manual. Because the files are parsed in a specific +order, variable assignments for the same variable could be affected. For +example, if the ``auto.conf`` file and the ``local.conf`` set variable1 +to different values, because the build system parses ``local.conf`` +after ``auto.conf``, variable1 is assigned the value from the +``local.conf`` file. + +Metadata, Machine Configuration, and Policy Configuration +--------------------------------------------------------- + +The previous section described the user configurations that define +BitBake's global behavior. This section takes a closer look at the +layers the build system uses to further control the build. These layers +provide Metadata for the software, machine, and policies. + +In general, three types of layer input exists. You can see them below +the "User Configuration" box in the `general workflow +figure <#general-workflow-figure>`__: + +- *Metadata (.bb + Patches):* Software layers containing + user-supplied recipe files, patches, and append files. A good example + of a software layer might be the + `meta-qt5 layer `__ from + the `OpenEmbedded Layer + Index `__. + This layer is for version 5.0 of the popular + `Qt `__ cross-platform application + development framework for desktop, embedded and mobile. + +- *Machine BSP Configuration:* Board Support Package (BSP) layers (i.e. + "BSP Layer" in the following figure) providing machine-specific + configurations. This type of information is specific to a particular + target architecture. A good example of a BSP layer from the `Poky + Reference Distribution <#gs-reference-distribution-poky>`__ is the + :yocto_git:`meta-yocto-bsp ` + layer. + +- *Policy Configuration:* Distribution Layers (i.e. "Distro Layer" in + the following figure) providing top-level or general policies for the + images or SDKs being built for a particular distribution. For + example, in the Poky Reference Distribution the distro layer is the + :yocto_git:`meta-poky ` + layer. Within the distro layer is a ``conf/distro`` directory that + contains distro configuration files (e.g. + :yocto_git:`poky.conf ` + that contain many policy configurations for the Poky distribution. + +The following figure shows an expanded representation of these three +layers from the `general workflow figure <#general-workflow-figure>`__: + +.. image:: figures/layer-input.png + :align: center + +In general, all layers have a similar structure. They all contain a +licensing file (e.g. ``COPYING.MIT``) if the layer is to be distributed, +a ``README`` file as good practice and especially if the layer is to be +distributed, a configuration directory, and recipe directories. You can +learn about the general structure for layers used with the Yocto Project +in the +":ref:`dev-manual/dev-manual-common-tasks:creating your own layer`" +section in the +Yocto Project Development Tasks Manual. For a general discussion on +layers and the many layers from which you can draw, see the +"`Layers <#overview-layers>`__" and "`The Yocto Project Layer +Model <#the-yocto-project-layer-model>`__" sections both earlier in this +manual. + +If you explored the previous links, you discovered some areas where many +layers that work with the Yocto Project exist. The `Source +Repositories `__ also shows layers +categorized under "Yocto Metadata Layers." + +.. note:: + + Layers exist in the Yocto Project Source Repositories that cannot be + found in the OpenEmbedded Layer Index. These layers are either + deprecated or experimental in nature. + +BitBake uses the ``conf/bblayers.conf`` file, which is part of the user +configuration, to find what layers it should be using as part of the +build. + +Distro Layer +~~~~~~~~~~~~ + +The distribution layer provides policy configurations for your +distribution. Best practices dictate that you isolate these types of +configurations into their own layer. Settings you provide in +``conf/distro/distro.conf`` override similar settings that BitBake finds +in your ``conf/local.conf`` file in the Build Directory. + +The following list provides some explanation and references for what you +typically find in the distribution layer: + +- *classes:* Class files (``.bbclass``) hold common functionality that + can be shared among recipes in the distribution. When your recipes + inherit a class, they take on the settings and functions for that + class. You can read more about class files in the + ":ref:`ref-manual/ref-classes:Classes`" chapter of the Yocto + Reference Manual. + +- *conf:* This area holds configuration files for the layer + (``conf/layer.conf``), the distribution + (``conf/distro/distro.conf``), and any distribution-wide include + files. + +- *recipes-*:* Recipes and append files that affect common + functionality across the distribution. This area could include + recipes and append files to add distribution-specific configuration, + initialization scripts, custom image recipes, and so forth. Examples + of ``recipes-*`` directories are ``recipes-core`` and + ``recipes-extra``. Hierarchy and contents within a ``recipes-*`` + directory can vary. Generally, these directories contain recipe files + (``*.bb``), recipe append files (``*.bbappend``), directories that + are distro-specific for configuration files, and so forth. + +BSP Layer +~~~~~~~~~ + +The BSP Layer provides machine configurations that target specific +hardware. Everything in this layer is specific to the machine for which +you are building the image or the SDK. A common structure or form is +defined for BSP layers. You can learn more about this structure in the +:doc:`../bsp-guide/bsp-guide`. + +.. note:: + + In order for a BSP layer to be considered compliant with the Yocto + Project, it must meet some structural requirements. + +The BSP Layer's configuration directory contains configuration files for +the machine (``conf/machine/machine.conf``) and, of course, the layer +(``conf/layer.conf``). + +The remainder of the layer is dedicated to specific recipes by function: +``recipes-bsp``, ``recipes-core``, ``recipes-graphics``, +``recipes-kernel``, and so forth. Metadata can exist for multiple +formfactors, graphics support systems, and so forth. + +.. note:: + + While the figure shows several + recipes-\* + directories, not all these directories appear in all BSP layers. + +Software Layer +~~~~~~~~~~~~~~ + +The software layer provides the Metadata for additional software +packages used during the build. This layer does not include Metadata +that is specific to the distribution or the machine, which are found in +their respective layers. + +This layer contains any recipes, append files, and patches, that your +project needs. + +.. _sources-dev-environment: + +Sources +------- + +In order for the OpenEmbedded build system to create an image or any +target, it must be able to access source files. The `general workflow +figure <#general-workflow-figure>`__ represents source files using the +"Upstream Project Releases", "Local Projects", and "SCMs (optional)" +boxes. The figure represents mirrors, which also play a role in locating +source files, with the "Source Materials" box. + +The method by which source files are ultimately organized is a function +of the project. For example, for released software, projects tend to use +tarballs or other archived files that can capture the state of a release +guaranteeing that it is statically represented. On the other hand, for a +project that is more dynamic or experimental in nature, a project might +keep source files in a repository controlled by a Source Control Manager +(SCM) such as Git. Pulling source from a repository allows you to +control the point in the repository (the revision) from which you want +to build software. Finally, a combination of the two might exist, which +would give the consumer a choice when deciding where to get source +files. + +BitBake uses the :term:`SRC_URI` +variable to point to source files regardless of their location. Each +recipe must have a ``SRC_URI`` variable that points to the source. + +Another area that plays a significant role in where source files come +from is pointed to by the +:term:`DL_DIR` variable. This area is +a cache that can hold previously downloaded source. You can also +instruct the OpenEmbedded build system to create tarballs from Git +repositories, which is not the default behavior, and store them in the +``DL_DIR`` by using the +:term:`BB_GENERATE_MIRROR_TARBALLS` +variable. + +Judicious use of a ``DL_DIR`` directory can save the build system a trip +across the Internet when looking for files. A good method for using a +download directory is to have ``DL_DIR`` point to an area outside of +your Build Directory. Doing so allows you to safely delete the Build +Directory if needed without fear of removing any downloaded source file. + +The remainder of this section provides a deeper look into the source +files and the mirrors. Here is a more detailed look at the source file +area of the `general workflow figure <#general-workflow-figure>`__: + +.. image:: figures/source-input.png + :align: center + +Upstream Project Releases +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upstream project releases exist anywhere in the form of an archived file +(e.g. tarball or zip file). These files correspond to individual +recipes. For example, the figure uses specific releases each for +BusyBox, Qt, and Dbus. An archive file can be for any released product +that can be built using a recipe. + +Local Projects +~~~~~~~~~~~~~~ + +Local projects are custom bits of software the user provides. These bits +reside somewhere local to a project - perhaps a directory into which the +user checks in items (e.g. a local directory containing a development +source tree used by the group). + +The canonical method through which to include a local project is to use +the :ref:`externalsrc ` +class to include that local project. You use either the ``local.conf`` +or a recipe's append file to override or set the recipe to point to the +local directory on your disk to pull in the whole source tree. + +.. _scms: + +Source Control Managers (Optional) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Another place from which the build system can get source files is with +:ref:`fetchers ` employing various Source +Control Managers (SCMs) such as Git or Subversion. In such cases, a +repository is cloned or checked out. The +:ref:`ref-tasks-fetch` task inside +BitBake uses the :term:`SRC_URI` +variable and the argument's prefix to determine the correct fetcher +module. + +.. note:: + + For information on how to have the OpenEmbedded build system generate + tarballs for Git repositories and place them in the + DL_DIR + directory, see the :term:`BB_GENERATE_MIRROR_TARBALLS` + variable in the Yocto Project Reference Manual. + +When fetching a repository, BitBake uses the +:term:`SRCREV` variable to determine +the specific revision from which to build. + +Source Mirror(s) +~~~~~~~~~~~~~~~~ + +Two kinds of mirrors exist: pre-mirrors and regular mirrors. The +:term:`PREMIRRORS` and +:term:`MIRRORS` variables point to +these, respectively. BitBake checks pre-mirrors before looking upstream +for any source files. Pre-mirrors are appropriate when you have a shared +directory that is not a directory defined by the +:term:`DL_DIR` variable. A Pre-mirror +typically points to a shared directory that is local to your +organization. + +Regular mirrors can be any site across the Internet that is used as an +alternative location for source code should the primary site not be +functioning for some reason or another. + +.. _package-feeds-dev-environment: + +Package Feeds +------------- + +When the OpenEmbedded build system generates an image or an SDK, it gets +the packages from a package feed area located in the +:term:`Build Directory`. The `general +workflow figure <#general-workflow-figure>`__ shows this package feeds +area in the upper-right corner. + +This section looks a little closer into the package feeds area used by +the build system. Here is a more detailed look at the area: + +.. image:: figures/package-feeds.png + :align: center + +Package feeds are an intermediary step in the build process. The +OpenEmbedded build system provides classes to generate different package +types, and you specify which classes to enable through the +:term:`PACKAGE_CLASSES` +variable. Before placing the packages into package feeds, the build +process validates them with generated output quality assurance checks +through the :ref:`insane ` +class. + +The package feed area resides in the Build Directory. The directory the +build system uses to temporarily store packages is determined by a +combination of variables and the particular package manager in use. See +the "Package Feeds" box in the illustration and note the information to +the right of that area. In particular, the following defines where +package files are kept: + +- :term:`DEPLOY_DIR`: Defined as + ``tmp/deploy`` in the Build Directory. + +- ``DEPLOY_DIR_*``: Depending on the package manager used, the package + type sub-folder. Given RPM, IPK, or DEB packaging and tarball + creation, the + :term:`DEPLOY_DIR_RPM`, + :term:`DEPLOY_DIR_IPK`, + :term:`DEPLOY_DIR_DEB`, or + :term:`DEPLOY_DIR_TAR`, + variables are used, respectively. + +- :term:`PACKAGE_ARCH`: Defines + architecture-specific sub-folders. For example, packages could exist + for the i586 or qemux86 architectures. + +BitBake uses the +:ref:`do_package_write_* ` +tasks to generate packages and place them into the package holding area +(e.g. ``do_package_write_ipk`` for IPK packages). See the +":ref:`ref-tasks-package_write_deb`", +":ref:`ref-tasks-package_write_ipk`", +":ref:`ref-tasks-package_write_rpm`", +and +":ref:`ref-tasks-package_write_tar`" +sections in the Yocto Project Reference Manual for additional +information. As an example, consider a scenario where an IPK packaging +manager is being used and package architecture support for both i586 and +qemux86 exist. Packages for the i586 architecture are placed in +``build/tmp/deploy/ipk/i586``, while packages for the qemux86 +architecture are placed in ``build/tmp/deploy/ipk/qemux86``. + +.. _bitbake-dev-environment: + +BitBake Tool +------------ + +The OpenEmbedded build system uses +:term:`BitBake` to produce images and +Software Development Kits (SDKs). You can see from the `general workflow +figure <#general-workflow-figure>`__, the BitBake area consists of +several functional areas. This section takes a closer look at each of +those areas. + +.. note:: + + Separate documentation exists for the BitBake tool. See the + BitBake User Manual + for reference material on BitBake. + +.. _source-fetching-dev-environment: + +Source Fetching +~~~~~~~~~~~~~~~ + +The first stages of building a recipe are to fetch and unpack the source +code: + +.. image:: figures/source-fetching.png + :align: center + +The :ref:`ref-tasks-fetch` and +:ref:`ref-tasks-unpack` tasks fetch +the source files and unpack them into the +:term:`Build Directory`. + +.. note:: + + For every local file (e.g. + file:// + ) that is part of a recipe's + SRC_URI + statement, the OpenEmbedded build system takes a checksum of the file + for the recipe and inserts the checksum into the signature for the + do_fetch + task. If any local file has been modified, the + do_fetch + task and all tasks that depend on it are re-executed. + +By default, everything is accomplished in the Build Directory, which has +a defined structure. For additional general information on the Build +Directory, see the ":ref:`structure-core-build`" section in +the Yocto Project Reference Manual. + +Each recipe has an area in the Build Directory where the unpacked source +code resides. The :term:`S` variable points +to this area for a recipe's unpacked source code. The name of that +directory for any given recipe is defined from several different +variables. The preceding figure and the following list describe the +Build Directory's hierarchy: + +- :term:`TMPDIR`: The base directory + where the OpenEmbedded build system performs all its work during the + build. The default base directory is the ``tmp`` directory. + +- :term:`PACKAGE_ARCH`: The + architecture of the built package or packages. Depending on the + eventual destination of the package or packages (i.e. machine + architecture, :term:`Build Host`, SDK, or + specific machine), ``PACKAGE_ARCH`` varies. See the variable's + description for details. + +- :term:`TARGET_OS`: The operating + system of the target device. A typical value would be "linux" (e.g. + "qemux86-poky-linux"). + +- :term:`PN`: The name of the recipe used + to build the package. This variable can have multiple meanings. + However, when used in the context of input files, ``PN`` represents + the name of the recipe. + +- :term:`WORKDIR`: The location + where the OpenEmbedded build system builds a recipe (i.e. does the + work to create the package). + + - :term:`PV`: The version of the + recipe used to build the package. + + - :term:`PR`: The revision of the + recipe used to build the package. + +- :term:`S`: Contains the unpacked source + files for a given recipe. + + - :term:`BPN`: The name of the recipe + used to build the package. The ``BPN`` variable is a version of + the ``PN`` variable but with common prefixes and suffixes removed. + + - :term:`PV`: The version of the + recipe used to build the package. + +.. note:: + + In the previous figure, notice that two sample hierarchies exist: one + based on package architecture (i.e. + PACKAGE_ARCH + ) and one based on a machine (i.e. + MACHINE + ). The underlying structures are identical. The differentiator being + what the OpenEmbedded build system is using as a build target (e.g. + general architecture, a build host, an SDK, or a specific machine). + +.. _patching-dev-environment: + +Patching +~~~~~~~~ + +Once source code is fetched and unpacked, BitBake locates patch files +and applies them to the source files: + +.. image:: figures/patching.png + :align: center + +The :ref:`ref-tasks-patch` task uses a +recipe's :term:`SRC_URI` statements +and the :term:`FILESPATH` variable +to locate applicable patch files. + +Default processing for patch files assumes the files have either +``*.patch`` or ``*.diff`` file types. You can use ``SRC_URI`` parameters +to change the way the build system recognizes patch files. See the +:ref:`ref-tasks-patch` task for more +information. + +BitBake finds and applies multiple patches for a single recipe in the +order in which it locates the patches. The ``FILESPATH`` variable +defines the default set of directories that the build system uses to +search for patch files. Once found, patches are applied to the recipe's +source files, which are located in the +:term:`S` directory. + +For more information on how the source directories are created, see the +"`Source Fetching <#source-fetching-dev-environment>`__" section. For +more information on how to create patches and how the build system +processes patches, see the +":ref:`dev-manual/dev-manual-common-tasks:patching code`" +section in the +Yocto Project Development Tasks Manual. You can also see the +":ref:`sdk-manual/sdk-extensible:use \`\`devtool modify\`\` to modify the source of an existing component`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (SDK) manual and the +":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`" +section in the Yocto Project Linux Kernel Development Manual. + +.. _configuration-compilation-and-staging-dev-environment: + +Configuration, Compilation, and Staging +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +After source code is patched, BitBake executes tasks that configure and +compile the source code. Once compilation occurs, the files are copied +to a holding area (staged) in preparation for packaging: + +.. image:: figures/configuration-compile-autoreconf.png + :align: center + +This step in the build process consists of the following tasks: + +- :ref:`ref-tasks-prepare_recipe_sysroot`: + This task sets up the two sysroots in + ``${``\ :term:`WORKDIR`\ ``}`` + (i.e. ``recipe-sysroot`` and ``recipe-sysroot-native``) so that + during the packaging phase the sysroots can contain the contents of + the + :ref:`ref-tasks-populate_sysroot` + tasks of the recipes on which the recipe containing the tasks + depends. A sysroot exists for both the target and for the native + binaries, which run on the host system. + +- *do_configure*: This task configures the source by enabling and + disabling any build-time and configuration options for the software + being built. Configurations can come from the recipe itself as well + as from an inherited class. Additionally, the software itself might + configure itself depending on the target for which it is being built. + + The configurations handled by the + :ref:`ref-tasks-configure` task + are specific to configurations for the source code being built by the + recipe. + + If you are using the + :ref:`autotools ` class, + you can add additional configuration options by using the + :term:`EXTRA_OECONF` or + :term:`PACKAGECONFIG_CONFARGS` + variables. For information on how this variable works within that + class, see the + :ref:`autotools ` class + :yocto_git:`here `. + +- *do_compile*: Once a configuration task has been satisfied, + BitBake compiles the source using the + :ref:`ref-tasks-compile` task. + Compilation occurs in the directory pointed to by the + :term:`B` variable. Realize that the + ``B`` directory is, by default, the same as the + :term:`S` directory. + +- *do_install*: After compilation completes, BitBake executes the + :ref:`ref-tasks-install` task. + This task copies files from the ``B`` directory and places them in a + holding area pointed to by the :term:`D` + variable. Packaging occurs later using files from this holding + directory. + +.. _package-splitting-dev-environment: + +Package Splitting +~~~~~~~~~~~~~~~~~ + +After source code is configured, compiled, and staged, the build system +analyzes the results and splits the output into packages: + +.. image:: figures/analysis-for-package-splitting.png + :align: center + +The :ref:`ref-tasks-package` and +:ref:`ref-tasks-packagedata` +tasks combine to analyze the files found in the +:term:`D` directory and split them into +subsets based on available packages and files. Analysis involves the +following as well as other items: splitting out debugging symbols, +looking at shared library dependencies between packages, and looking at +package relationships. + +The ``do_packagedata`` task creates package metadata based on the +analysis such that the build system can generate the final packages. The +:ref:`ref-tasks-populate_sysroot` +task stages (copies) a subset of the files installed by the +:ref:`ref-tasks-install` task into +the appropriate sysroot. Working, staged, and intermediate results of +the analysis and package splitting process use several areas: + +- :term:`PKGD`: The destination + directory (i.e. ``package``) for packages before they are split into + individual packages. + +- :term:`PKGDESTWORK`: A + temporary work area (i.e. ``pkgdata``) used by the ``do_package`` + task to save package metadata. + +- :term:`PKGDEST`: The parent + directory (i.e. ``packages-split``) for packages after they have been + split. + +- :term:`PKGDATA_DIR`: A shared, + global-state directory that holds packaging metadata generated during + the packaging process. The packaging process copies metadata from + ``PKGDESTWORK`` to the ``PKGDATA_DIR`` area where it becomes globally + available. + +- :term:`STAGING_DIR_HOST`: + The path for the sysroot for the system on which a component is built + to run (i.e. ``recipe-sysroot``). + +- :term:`STAGING_DIR_NATIVE`: + The path for the sysroot used when building components for the build + host (i.e. ``recipe-sysroot-native``). + +- :term:`STAGING_DIR_TARGET`: + The path for the sysroot used when a component that is built to + execute on a system and it generates code for yet another machine + (e.g. cross-canadian recipes). + +The :term:`FILES` variable defines the +files that go into each package in +:term:`PACKAGES`. If you want +details on how this is accomplished, you can look at +:yocto_git:`package.bbclass `. + +Depending on the type of packages being created (RPM, DEB, or IPK), the +:ref:`do_package_write_* ` +task creates the actual packages and places them in the Package Feed +area, which is ``${TMPDIR}/deploy``. You can see the "`Package +Feeds <#package-feeds-dev-environment>`__" section for more detail on +that part of the build process. + +.. note:: + + Support for creating feeds directly from the + deploy/\* + directories does not exist. Creating such feeds usually requires some + kind of feed maintenance mechanism that would upload the new packages + into an official package feed (e.g. the Ångström distribution). This + functionality is highly distribution-specific and thus is not + provided out of the box. + +.. _image-generation-dev-environment: + +Image Generation +~~~~~~~~~~~~~~~~ + +Once packages are split and stored in the Package Feeds area, the build +system uses BitBake to generate the root filesystem image: + +.. image:: figures/image-generation.png + :align: center + +The image generation process consists of several stages and depends on +several tasks and variables. The +:ref:`ref-tasks-rootfs` task creates +the root filesystem (file and directory structure) for an image. This +task uses several key variables to help create the list of packages to +actually install: + +- :term:`IMAGE_INSTALL`: Lists + out the base set of packages from which to install from the Package + Feeds area. + +- :term:`PACKAGE_EXCLUDE`: + Specifies packages that should not be installed into the image. + +- :term:`IMAGE_FEATURES`: + Specifies features to include in the image. Most of these features + map to additional packages for installation. + +- :term:`PACKAGE_CLASSES`: + Specifies the package backend (e.g. RPM, DEB, or IPK) to use and + consequently helps determine where to locate packages within the + Package Feeds area. + +- :term:`IMAGE_LINGUAS`: + Determines the language(s) for which additional language support + packages are installed. + +- :term:`PACKAGE_INSTALL`: + The final list of packages passed to the package manager for + installation into the image. + +With :term:`IMAGE_ROOTFS` +pointing to the location of the filesystem under construction and the +``PACKAGE_INSTALL`` variable providing the final list of packages to +install, the root file system is created. + +Package installation is under control of the package manager (e.g. +dnf/rpm, opkg, or apt/dpkg) regardless of whether or not package +management is enabled for the target. At the end of the process, if +package management is not enabled for the target, the package manager's +data files are deleted from the root filesystem. As part of the final +stage of package installation, post installation scripts that are part +of the packages are run. Any scripts that fail to run on the build host +are run on the target when the target system is first booted. If you are +using a +:ref:`read-only root filesystem `, +all the post installation scripts must succeed on the build host during +the package installation phase since the root filesystem on the target +is read-only. + +The final stages of the ``do_rootfs`` task handle post processing. Post +processing includes creation of a manifest file and optimizations. + +The manifest file (``.manifest``) resides in the same directory as the +root filesystem image. This file lists out, line-by-line, the installed +packages. The manifest file is useful for the +:ref:`testimage ` class, +for example, to determine whether or not to run specific tests. See the +:term:`IMAGE_MANIFEST` +variable for additional information. + +Optimizing processes that are run across the image include ``mklibs``, +``prelink``, and any other post-processing commands as defined by the +:term:`ROOTFS_POSTPROCESS_COMMAND` +variable. The ``mklibs`` process optimizes the size of the libraries, +while the ``prelink`` process optimizes the dynamic linking of shared +libraries to reduce start up time of executables. + +After the root filesystem is built, processing begins on the image +through the :ref:`ref-tasks-image` +task. The build system runs any pre-processing commands as defined by +the +:term:`IMAGE_PREPROCESS_COMMAND` +variable. This variable specifies a list of functions to call before the +build system creates the final image output files. + +The build system dynamically creates ``do_image_*`` tasks as needed, +based on the image types specified in the +:term:`IMAGE_FSTYPES` variable. +The process turns everything into an image file or a set of image files +and can compress the root filesystem image to reduce the overall size of +the image. The formats used for the root filesystem depend on the +``IMAGE_FSTYPES`` variable. Compression depends on whether the formats +support compression. + +As an example, a dynamically created task when creating a particular +image type would take the following form: +:: + + do_image_type + +So, if the type +as specified by the ``IMAGE_FSTYPES`` were ``ext4``, the dynamically +generated task would be as follows: +:: + + do_image_ext4 + +The final task involved in image creation is the +:ref:`do_image_complete ` +task. This task completes the image by applying any image post +processing as defined through the +:term:`IMAGE_POSTPROCESS_COMMAND` +variable. The variable specifies a list of functions to call once the +build system has created the final image output files. + +.. note:: + + The entire image generation process is run under + Pseudo. Running under Pseudo ensures that the files in the root filesystem + have correct ownership. + +.. _sdk-generation-dev-environment: + +SDK Generation +~~~~~~~~~~~~~~ + +The OpenEmbedded build system uses BitBake to generate the Software +Development Kit (SDK) installer scripts for both the standard SDK and +the extensible SDK (eSDK): + +.. image:: figures/sdk-generation.png + :align: center + +.. note:: + + For more information on the cross-development toolchain generation, + see the ":ref:`overview-manual/overview-manual-concepts:cross-development toolchain generation`" + section. For information on advantages gained when building a + cross-development toolchain using the do_populate_sdk task, see the + ":ref:`sdk-manual/sdk-appendix-obtain:building an sdk installer`" section in + the Yocto Project Application Development and the Extensible Software + Development Kit (eSDK) manual. + +Like image generation, the SDK script process consists of several stages +and depends on many variables. The +:ref:`ref-tasks-populate_sdk` +and +:ref:`ref-tasks-populate_sdk_ext` +tasks use these key variables to help create the list of packages to +actually install. For information on the variables listed in the figure, +see the "`Application Development SDK <#sdk-dev-environment>`__" +section. + +The ``do_populate_sdk`` task helps create the standard SDK and handles +two parts: a target part and a host part. The target part is the part +built for the target hardware and includes libraries and headers. The +host part is the part of the SDK that runs on the +:term:`SDKMACHINE`. + +The ``do_populate_sdk_ext`` task helps create the extensible SDK and +handles host and target parts differently than its counter part does for +the standard SDK. For the extensible SDK, the task encapsulates the +build system, which includes everything needed (host and target) for the +SDK. + +Regardless of the type of SDK being constructed, the tasks perform some +cleanup after which a cross-development environment setup script and any +needed configuration files are created. The final output is the +Cross-development toolchain installation script (``.sh`` file), which +includes the environment setup script. + +Stamp Files and the Rerunning of Tasks +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For each task that completes successfully, BitBake writes a stamp file +into the :term:`STAMPS_DIR` +directory. The beginning of the stamp file's filename is determined by +the :term:`STAMP` variable, and the end +of the name consists of the task's name and current `input +checksum <#overview-checksums>`__. + +.. note:: + + This naming scheme assumes that + BB_SIGNATURE_HANDLER + is "OEBasicHash", which is almost always the case in current + OpenEmbedded. + +To determine if a task needs to be rerun, BitBake checks if a stamp file +with a matching input checksum exists for the task. If such a stamp file +exists, the task's output is assumed to exist and still be valid. If the +file does not exist, the task is rerun. + +.. note:: + + The stamp mechanism is more general than the shared state (sstate) + cache mechanism described in the "`Setscene Tasks and Shared + State <#setscene-tasks-and-shared-state>`__" section. BitBake avoids + rerunning any task that has a valid stamp file, not just tasks that + can be accelerated through the sstate cache. + + However, you should realize that stamp files only serve as a marker + that some work has been done and that these files do not record task + output. The actual task output would usually be somewhere in + :term:`TMPDIR` (e.g. in some + recipe's :term:`WORKDIR`.) What + the sstate cache mechanism adds is a way to cache task output that + can then be shared between build machines. + +Since ``STAMPS_DIR`` is usually a subdirectory of ``TMPDIR``, removing +``TMPDIR`` will also remove ``STAMPS_DIR``, which means tasks will +properly be rerun to repopulate ``TMPDIR``. + +If you want some task to always be considered "out of date", you can +mark it with the :ref:`nostamp ` +varflag. If some other task depends on such a task, then that task will +also always be considered out of date, which might not be what you want. + +For details on how to view information about a task's signature, see the +":ref:`dev-manual/dev-manual-common-tasks:viewing task variable dependencies`" +section in the Yocto Project Development Tasks Manual. + +Setscene Tasks and Shared State +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The description of tasks so far assumes that BitBake needs to build +everything and no available prebuilt objects exist. BitBake does support +skipping tasks if prebuilt objects are available. These objects are +usually made available in the form of a shared state (sstate) cache. + +.. note:: + + For information on variables affecting sstate, see the + :term:`SSTATE_DIR` + and + :term:`SSTATE_MIRRORS` + variables. + +The idea of a setscene task (i.e ``do_``\ taskname\ ``_setscene``) is a +version of the task where instead of building something, BitBake can +skip to the end result and simply place a set of files into specific +locations as needed. In some cases, it makes sense to have a setscene +task variant (e.g. generating package files in the +:ref:`do_package_write_* ` +task). In other cases, it does not make sense (e.g. a +:ref:`ref-tasks-patch` task or a +:ref:`ref-tasks-unpack` task) since +the work involved would be equal to or greater than the underlying task. + +In the build system, the common tasks that have setscene variants are +:ref:`ref-tasks-package`, +``do_package_write_*``, +:ref:`ref-tasks-deploy`, +:ref:`ref-tasks-packagedata`, and +:ref:`ref-tasks-populate_sysroot`. +Notice that these tasks represent most of the tasks whose output is an +end result. + +The build system has knowledge of the relationship between these tasks +and other preceding tasks. For example, if BitBake runs +``do_populate_sysroot_setscene`` for something, it does not make sense +to run any of the ``do_fetch``, ``do_unpack``, ``do_patch``, +``do_configure``, ``do_compile``, and ``do_install`` tasks. However, if +``do_package`` needs to be run, BitBake needs to run those other tasks. + +It becomes more complicated if everything can come from an sstate cache +because some objects are simply not required at all. For example, you do +not need a compiler or native tools, such as quilt, if nothing exists to +compile or patch. If the ``do_package_write_*`` packages are available +from sstate, BitBake does not need the ``do_package`` task data. + +To handle all these complexities, BitBake runs in two phases. The first +is the "setscene" stage. During this stage, BitBake first checks the +sstate cache for any targets it is planning to build. BitBake does a +fast check to see if the object exists rather than a complete download. +If nothing exists, the second phase, which is the setscene stage, +completes and the main build proceeds. + +If objects are found in the sstate cache, the build system works +backwards from the end targets specified by the user. For example, if an +image is being built, the build system first looks for the packages +needed for that image and the tools needed to construct an image. If +those are available, the compiler is not needed. Thus, the compiler is +not even downloaded. If something was found to be unavailable, or the +download or setscene task fails, the build system then tries to install +dependencies, such as the compiler, from the cache. + +The availability of objects in the sstate cache is handled by the +function specified by the +:term:`bitbake:BB_HASHCHECK_FUNCTION` +variable and returns a list of available objects. The function specified +by the +:term:`bitbake:BB_SETSCENE_DEPVALID` +variable is the function that determines whether a given dependency +needs to be followed, and whether for any given relationship the +function needs to be passed. The function returns a True or False value. + +.. _images-dev-environment: + +Images +------ + +The images produced by the build system are compressed forms of the root +filesystem and are ready to boot on a target device. You can see from +the `general workflow figure <#general-workflow-figure>`__ that BitBake +output, in part, consists of images. This section takes a closer look at +this output: + +.. image:: figures/images.png + :align: center + +.. note:: + + For a list of example images that the Yocto Project provides, see the + ":doc:`../ref-manual/ref-images`" chapter in the Yocto Project Reference + Manual. + +The build process writes images out to the :term:`Build Directory` +inside the +``tmp/deploy/images/machine/`` folder as shown in the figure. This +folder contains any files expected to be loaded on the target device. +The :term:`DEPLOY_DIR` variable +points to the ``deploy`` directory, while the +:term:`DEPLOY_DIR_IMAGE` +variable points to the appropriate directory containing images for the +current configuration. + +- kernel-image: A kernel binary file. The + :term:`KERNEL_IMAGETYPE` + variable determines the naming scheme for the kernel image file. + Depending on this variable, the file could begin with a variety of + naming strings. The ``deploy/images/``\ machine directory can contain + multiple image files for the machine. + +- root-filesystem-image: Root filesystems for the target device (e.g. + ``*.ext3`` or ``*.bz2`` files). The + :term:`IMAGE_FSTYPES` + variable determines the root filesystem image type. The + ``deploy/images/``\ machine directory can contain multiple root + filesystems for the machine. + +- kernel-modules: Tarballs that contain all the modules built for the + kernel. Kernel module tarballs exist for legacy purposes and can be + suppressed by setting the + :term:`MODULE_TARBALL_DEPLOY` + variable to "0". The ``deploy/images/``\ machine directory can + contain multiple kernel module tarballs for the machine. + +- bootloaders: If applicable to the target machine, bootloaders + supporting the image. The ``deploy/images/``\ machine directory can + contain multiple bootloaders for the machine. + +- symlinks: The ``deploy/images/``\ machine folder contains a symbolic + link that points to the most recently built file for each machine. + These links might be useful for external scripts that need to obtain + the latest version of each file. + +.. _sdk-dev-environment: + +Application Development SDK +--------------------------- + +In the `general workflow figure <#general-workflow-figure>`__, the +output labeled "Application Development SDK" represents an SDK. The SDK +generation process differs depending on whether you build an extensible +SDK (e.g. ``bitbake -c populate_sdk_ext`` imagename) or a standard SDK +(e.g. ``bitbake -c populate_sdk`` imagename). This section takes a +closer look at this output: + +.. image:: figures/sdk.png + :align: center + +The specific form of this output is a set of files that includes a +self-extracting SDK installer (``*.sh``), host and target manifest +files, and files used for SDK testing. When the SDK installer file is +run, it installs the SDK. The SDK consists of a cross-development +toolchain, a set of libraries and headers, and an SDK environment setup +script. Running this installer essentially sets up your +cross-development environment. You can think of the cross-toolchain as +the "host" part because it runs on the SDK machine. You can think of the +libraries and headers as the "target" part because they are built for +the target hardware. The environment setup script is added so that you +can initialize the environment before using the tools. + +.. note:: + + - The Yocto Project supports several methods by which you can set up + this cross-development environment. These methods include + downloading pre-built SDK installers or building and installing + your own SDK installer. + + - For background information on cross-development toolchains in the + Yocto Project development environment, see the "`Cross-Development + Toolchain Generation <#cross-development-toolchain-generation>`__" + section. + + - For information on setting up a cross-development environment, see + the :doc:`../sdk-manual/sdk-manual` manual. + +All the output files for an SDK are written to the ``deploy/sdk`` folder +inside the :term:`Build Directory` as +shown in the previous figure. Depending on the type of SDK, several +variables exist that help configure these files. The following list +shows the variables associated with an extensible SDK: + +- :term:`DEPLOY_DIR`: Points to + the ``deploy`` directory. + +- :term:`SDK_EXT_TYPE`: + Controls whether or not shared state artifacts are copied into the + extensible SDK. By default, all required shared state artifacts are + copied into the SDK. + +- :term:`SDK_INCLUDE_PKGDATA`: + Specifies whether or not packagedata is included in the extensible + SDK for all recipes in the "world" target. + +- :term:`SDK_INCLUDE_TOOLCHAIN`: + Specifies whether or not the toolchain is included when building the + extensible SDK. + +- :term:`SDK_LOCAL_CONF_WHITELIST`: + A list of variables allowed through from the build system + configuration into the extensible SDK configuration. + +- :term:`SDK_LOCAL_CONF_BLACKLIST`: + A list of variables not allowed through from the build system + configuration into the extensible SDK configuration. + +- :term:`SDK_INHERIT_BLACKLIST`: + A list of classes to remove from the + :term:`INHERIT` value globally + within the extensible SDK configuration. + +This next list, shows the variables associated with a standard SDK: + +- :term:`DEPLOY_DIR`: Points to + the ``deploy`` directory. + +- :term:`SDKMACHINE`: Specifies + the architecture of the machine on which the cross-development tools + are run to create packages for the target hardware. + +- :term:`SDKIMAGE_FEATURES`: + Lists the features to include in the "target" part of the SDK. + +- :term:`TOOLCHAIN_HOST_TASK`: + Lists packages that make up the host part of the SDK (i.e. the part + that runs on the ``SDKMACHINE``). When you use + ``bitbake -c populate_sdk imagename`` to create the SDK, a set of + default packages apply. This variable allows you to add more + packages. + +- :term:`TOOLCHAIN_TARGET_TASK`: + Lists packages that make up the target part of the SDK (i.e. the part + built for the target hardware). + +- :term:`SDKPATH`: Defines the + default SDK installation path offered by the installation script. + +- :term:`SDK_HOST_MANIFEST`: + Lists all the installed packages that make up the host part of the + SDK. This variable also plays a minor role for extensible SDK + development as well. However, it is mainly used for the standard SDK. + +- :term:`SDK_TARGET_MANIFEST`: + Lists all the installed packages that make up the target part of the + SDK. This variable also plays a minor role for extensible SDK + development as well. However, it is mainly used for the standard SDK. + +Cross-Development Toolchain Generation +====================================== + +The Yocto Project does most of the work for you when it comes to +creating :ref:`sdk-manual/sdk-intro:the cross-development toolchain`. This +section provides some technical background on how cross-development +toolchains are created and used. For more information on toolchains, you +can also see the :doc:`../sdk-manual/sdk-manual` manual. + +In the Yocto Project development environment, cross-development +toolchains are used to build images and applications that run on the +target hardware. With just a few commands, the OpenEmbedded build system +creates these necessary toolchains for you. + +The following figure shows a high-level build environment regarding +toolchain construction and use. + +.. image:: figures/cross-development-toolchains.png + :align: center + +Most of the work occurs on the Build Host. This is the machine used to +build images and generally work within the the Yocto Project +environment. When you run +:term:`BitBake` to create an image, the +OpenEmbedded build system uses the host ``gcc`` compiler to bootstrap a +cross-compiler named ``gcc-cross``. The ``gcc-cross`` compiler is what +BitBake uses to compile source files when creating the target image. You +can think of ``gcc-cross`` simply as an automatically generated +cross-compiler that is used internally within BitBake only. + +.. note:: + + The extensible SDK does not use + gcc-cross-canadian + since this SDK ships a copy of the OpenEmbedded build system and the + sysroot within it contains + gcc-cross + . + +The chain of events that occurs when ``gcc-cross`` is bootstrapped is as +follows: +:: + + gcc -> binutils-cross -> gcc-cross-initial -> linux-libc-headers -> glibc-initial -> glibc -> gcc-cross -> gcc-runtime + +- ``gcc``: The build host's GNU Compiler Collection (GCC). + +- ``binutils-cross``: The bare minimum binary utilities needed in order + to run the ``gcc-cross-initial`` phase of the bootstrap operation. + +- ``gcc-cross-initial``: An early stage of the bootstrap process for + creating the cross-compiler. This stage builds enough of the + ``gcc-cross``, the C library, and other pieces needed to finish + building the final cross-compiler in later stages. This tool is a + "native" package (i.e. it is designed to run on the build host). + +- ``linux-libc-headers``: Headers needed for the cross-compiler. + +- ``glibc-initial``: An initial version of the Embedded GNU C Library + (GLIBC) needed to bootstrap ``glibc``. + +- ``glibc``: The GNU C Library. + +- ``gcc-cross``: The final stage of the bootstrap process for the + cross-compiler. This stage results in the actual cross-compiler that + BitBake uses when it builds an image for a targeted device. + + .. note:: + + If you are replacing this cross compiler toolchain with a custom + version, you must replace + gcc-cross + . + + This tool is also a "native" package (i.e. it is designed to run on + the build host). + +- ``gcc-runtime``: Runtime libraries resulting from the toolchain + bootstrapping process. This tool produces a binary that consists of + the runtime libraries need for the targeted device. + +You can use the OpenEmbedded build system to build an installer for the +relocatable SDK used to develop applications. When you run the +installer, it installs the toolchain, which contains the development +tools (e.g., ``gcc-cross-canadian``, ``binutils-cross-canadian``, and +other ``nativesdk-*`` tools), which are tools native to the SDK (i.e. +native to :term:`SDK_ARCH`), you +need to cross-compile and test your software. The figure shows the +commands you use to easily build out this toolchain. This +cross-development toolchain is built to execute on the +:term:`SDKMACHINE`, which might or +might not be the same machine as the Build Host. + +.. note:: + + If your target architecture is supported by the Yocto Project, you + can take advantage of pre-built images that ship with the Yocto + Project and already contain cross-development toolchain installers. + +Here is the bootstrap process for the relocatable toolchain: +:: + + gcc -> binutils-crosssdk -> gcc-crosssdk-initial -> linux-libc-headers -> glibc-initial -> nativesdk-glibc -> gcc-crosssdk -> gcc-cross-canadian + +- ``gcc``: The build host's GNU Compiler Collection (GCC). + +- ``binutils-crosssdk``: The bare minimum binary utilities needed in + order to run the ``gcc-crosssdk-initial`` phase of the bootstrap + operation. + +- ``gcc-crosssdk-initial``: An early stage of the bootstrap process for + creating the cross-compiler. This stage builds enough of the + ``gcc-crosssdk`` and supporting pieces so that the final stage of the + bootstrap process can produce the finished cross-compiler. This tool + is a "native" binary that runs on the build host. + +- ``linux-libc-headers``: Headers needed for the cross-compiler. + +- ``glibc-initial``: An initial version of the Embedded GLIBC needed to + bootstrap ``nativesdk-glibc``. + +- ``nativesdk-glibc``: The Embedded GLIBC needed to bootstrap the + ``gcc-crosssdk``. + +- ``gcc-crosssdk``: The final stage of the bootstrap process for the + relocatable cross-compiler. The ``gcc-crosssdk`` is a transitory + compiler and never leaves the build host. Its purpose is to help in + the bootstrap process to create the eventual ``gcc-cross-canadian`` + compiler, which is relocatable. This tool is also a "native" package + (i.e. it is designed to run on the build host). + +- ``gcc-cross-canadian``: The final relocatable cross-compiler. When + run on the :term:`SDKMACHINE`, + this tool produces executable code that runs on the target device. + Only one cross-canadian compiler is produced per architecture since + they can be targeted at different processor optimizations using + configurations passed to the compiler through the compile commands. + This circumvents the need for multiple compilers and thus reduces the + size of the toolchains. + +.. note:: + + For information on advantages gained when building a + cross-development toolchain installer, see the + ":ref:`sdk-manual/sdk-appendix-obtain:building an sdk installer`" appendix + in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + +Shared State Cache +================== + +By design, the OpenEmbedded build system builds everything from scratch +unless :term:`BitBake` can determine +that parts do not need to be rebuilt. Fundamentally, building from +scratch is attractive as it means all parts are built fresh and no +possibility of stale data exists that can cause problems. When +developers hit problems, they typically default back to building from +scratch so they have a know state from the start. + +Building an image from scratch is both an advantage and a disadvantage +to the process. As mentioned in the previous paragraph, building from +scratch ensures that everything is current and starts from a known +state. However, building from scratch also takes much longer as it +generally means rebuilding things that do not necessarily need to be +rebuilt. + +The Yocto Project implements shared state code that supports incremental +builds. The implementation of the shared state code answers the +following questions that were fundamental roadblocks within the +OpenEmbedded incremental build support system: + +- What pieces of the system have changed and what pieces have not + changed? + +- How are changed pieces of software removed and replaced? + +- How are pre-built components that do not need to be rebuilt from + scratch used when they are available? + +For the first question, the build system detects changes in the "inputs" +to a given task by creating a checksum (or signature) of the task's +inputs. If the checksum changes, the system assumes the inputs have +changed and the task needs to be rerun. For the second question, the +shared state (sstate) code tracks which tasks add which output to the +build process. This means the output from a given task can be removed, +upgraded or otherwise manipulated. The third question is partly +addressed by the solution for the second question assuming the build +system can fetch the sstate objects from remote locations and install +them if they are deemed to be valid. + +.. note:: + + - The build system does not maintain + :term:`PR` information as part of + the shared state packages. Consequently, considerations exist that + affect maintaining shared state feeds. For information on how the + build system works with packages and can track incrementing ``PR`` + information, see the ":ref:`dev-manual/dev-manual-common-tasks:automatically incrementing a package version number`" + section in the Yocto Project Development Tasks Manual. + + - The code in the build system that supports incremental builds is + not simple code. For techniques that help you work around issues + related to shared state code, see the + ":ref:`dev-manual/dev-manual-common-tasks:viewing metadata used to create the input signature of a shared state task`" + and + ":ref:`dev-manual/dev-manual-common-tasks:invalidating shared state to force a task to run`" + sections both in the Yocto Project Development Tasks Manual. + +The rest of this section goes into detail about the overall incremental +build architecture, the checksums (signatures), and shared state. + +.. _concepts-overall-architecture: + +Overall Architecture +-------------------- + +When determining what parts of the system need to be built, BitBake +works on a per-task basis rather than a per-recipe basis. You might +wonder why using a per-task basis is preferred over a per-recipe basis. +To help explain, consider having the IPK packaging backend enabled and +then switching to DEB. In this case, the +:ref:`ref-tasks-install` and +:ref:`ref-tasks-package` task outputs +are still valid. However, with a per-recipe approach, the build would +not include the ``.deb`` files. Consequently, you would have to +invalidate the whole build and rerun it. Rerunning everything is not the +best solution. Also, in this case, the core must be "taught" much about +specific tasks. This methodology does not scale well and does not allow +users to easily add new tasks in layers or as external recipes without +touching the packaged-staging core. + +.. _overview-checksums: + +Checksums (Signatures) +---------------------- + +The shared state code uses a checksum, which is a unique signature of a +task's inputs, to determine if a task needs to be run again. Because it +is a change in a task's inputs that triggers a rerun, the process needs +to detect all the inputs to a given task. For shell tasks, this turns +out to be fairly easy because the build process generates a "run" shell +script for each task and it is possible to create a checksum that gives +you a good idea of when the task's data changes. + +To complicate the problem, there are things that should not be included +in the checksum. First, there is the actual specific build path of a +given task - the :term:`WORKDIR`. It +does not matter if the work directory changes because it should not +affect the output for target packages. Also, the build process has the +objective of making native or cross packages relocatable. + +.. note:: + + Both native and cross packages run on the + build host. However, cross packages generate output for the target + architecture. + +The checksum therefore needs to exclude ``WORKDIR``. The simplistic +approach for excluding the work directory is to set ``WORKDIR`` to some +fixed value and create the checksum for the "run" script. + +Another problem results from the "run" scripts containing functions that +might or might not get called. The incremental build solution contains +code that figures out dependencies between shell functions. This code is +used to prune the "run" scripts down to the minimum set, thereby +alleviating this problem and making the "run" scripts much more readable +as a bonus. + +So far, solutions for shell scripts exist. What about Python tasks? The +same approach applies even though these tasks are more difficult. The +process needs to figure out what variables a Python function accesses +and what functions it calls. Again, the incremental build solution +contains code that first figures out the variable and function +dependencies, and then creates a checksum for the data used as the input +to the task. + +Like the ``WORKDIR`` case, situations exist where dependencies should be +ignored. For these situations, you can instruct the build process to +ignore a dependency by using a line like the following: +:: + + PACKAGE_ARCHS[vardepsexclude] = "MACHINE" + +This example ensures that the :term:`PACKAGE_ARCHS` variable +does not depend on the value of :term:`MACHINE`, even if it does +reference it. + +Equally, there are cases where you need to add dependencies BitBake is +not able to find. You can accomplish this by using a line like the +following: +:: + + PACKAGE_ARCHS[vardeps] = "MACHINE" + +This example explicitly +adds the ``MACHINE`` variable as a dependency for ``PACKAGE_ARCHS``. + +As an example, consider a case with in-line Python where BitBake is not +able to figure out dependencies. When running in debug mode (i.e. using +``-DDD``), BitBake produces output when it discovers something for which +it cannot figure out dependencies. The Yocto Project team has currently +not managed to cover those dependencies in detail and is aware of the +need to fix this situation. + +Thus far, this section has limited discussion to the direct inputs into +a task. Information based on direct inputs is referred to as the +"basehash" in the code. However, the question of a task's indirect +inputs still exits - items already built and present in the +:term:`Build Directory`. The checksum (or +signature) for a particular task needs to add the hashes of all the +tasks on which the particular task depends. Choosing which dependencies +to add is a policy decision. However, the effect is to generate a master +checksum that combines the basehash and the hashes of the task's +dependencies. + +At the code level, a variety of ways exist by which both the basehash +and the dependent task hashes can be influenced. Within the BitBake +configuration file, you can give BitBake some extra information to help +it construct the basehash. The following statement effectively results +in a list of global variable dependency excludes (i.e. variables never +included in any checksum): +:: + + BB_HASHBASE_WHITELIST ?= "TMPDIR FILE PATH PWD BB_TASKHASH BBPATH DL_DIR \\ + SSTATE_DIR THISDIR FILESEXTRAPATHS FILE_DIRNAME HOME LOGNAME SHELL TERM \\ + USER FILESPATH STAGING_DIR_HOST STAGING_DIR_TARGET COREBASE PRSERV_HOST \\ + PRSERV_DUMPDIR PRSERV_DUMPFILE PRSERV_LOCKDOWN PARALLEL_MAKE \\ + CCACHE_DIR EXTERNAL_TOOLCHAIN CCACHE CCACHE_DISABLE LICENSE_PATH SDKPKGSUFFIX" + +The +previous example excludes +:term:`WORKDIR` since that variable +is actually constructed as a path within +:term:`TMPDIR`, which is on the +whitelist. + +The rules for deciding which hashes of dependent tasks to include +through dependency chains are more complex and are generally +accomplished with a Python function. The code in +``meta/lib/oe/sstatesig.py`` shows two examples of this and also +illustrates how you can insert your own policy into the system if so +desired. This file defines the two basic signature generators +:term:`OpenEmbedded-Core (OE-Core)` uses: "OEBasic" and +"OEBasicHash". By default, a dummy "noop" signature handler is enabled +in BitBake. This means that behavior is unchanged from previous +versions. OE-Core uses the "OEBasicHash" signature handler by default +through this setting in the ``bitbake.conf`` file: +:: + + BB_SIGNATURE_HANDLER ?= "OEBasicHash" + +The "OEBasicHash" ``BB_SIGNATURE_HANDLER`` is the same +as the "OEBasic" version but adds the task hash to the `stamp +files <#stamp-files-and-the-rerunning-of-tasks>`__. This results in any +metadata change that changes the task hash, automatically causing the +task to be run again. This removes the need to bump +:term:`PR` values, and changes to metadata +automatically ripple across the build. + +It is also worth noting that the end result of these signature +generators is to make some dependency and hash information available to +the build. This information includes: + +- ``BB_BASEHASH_task-``\ taskname: The base hashes for each task in the + recipe. + +- ``BB_BASEHASH_``\ filename\ ``:``\ taskname: The base hashes for each + dependent task. + +- ``BBHASHDEPS_``\ filename\ ``:``\ taskname: The task dependencies for + each task. + +- ``BB_TASKHASH``: The hash of the currently running task. + +Shared State +------------ + +Checksums and dependencies, as discussed in the previous section, solve +half the problem of supporting a shared state. The other half of the +problem is being able to use checksum information during the build and +being able to reuse or rebuild specific components. + +The :ref:`sstate ` class is a +relatively generic implementation of how to "capture" a snapshot of a +given task. The idea is that the build process does not care about the +source of a task's output. Output could be freshly built or it could be +downloaded and unpacked from somewhere. In other words, the build +process does not need to worry about its origin. + +Two types of output exist. One type is just about creating a directory +in :term:`WORKDIR`. A good example is +the output of either +:ref:`ref-tasks-install` or +:ref:`ref-tasks-package`. The other +type of output occurs when a set of data is merged into a shared +directory tree such as the sysroot. + +The Yocto Project team has tried to keep the details of the +implementation hidden in ``sstate`` class. From a user's perspective, +adding shared state wrapping to a task is as simple as this +:ref:`ref-tasks-deploy` example taken +from the :ref:`deploy ` class: +:: + + DEPLOYDIR = "${WORKDIR}/deploy-${PN}" + SSTATETASKS += "do_deploy" + do_deploy[sstate-inputdirs] = "${DEPLOYDIR}" + do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}" + + python do_deploy_setscene () { + sstate_setscene(d) + } + addtask do_deploy_setscene + do_deploy[dirs] = "${DEPLOYDIR} ${B}" + do_deploy[stamp-extra-info] = "${MACHINE_ARCH}" + +The following list explains the previous example: + +- Adding "do_deploy" to ``SSTATETASKS`` adds some required + sstate-related processing, which is implemented in the + :ref:`sstate ` class, to + before and after the + :ref:`ref-tasks-deploy` task. + +- The ``do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"`` declares that + ``do_deploy`` places its output in ``${DEPLOYDIR}`` when run normally + (i.e. when not using the sstate cache). This output becomes the input + to the shared state cache. + +- The ``do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"`` line + causes the contents of the shared state cache to be copied to + ``${DEPLOY_DIR_IMAGE}``. + + .. note:: + + If ``do_deploy`` is not already in the shared state cache or if its input + checksum (signature) has changed from when the output was cached, the task + runs to populate the shared state cache, after which the contents of the + shared state cache is copied to ${:term:`DEPLOY_DIR_IMAGE`}. If + ``do_deploy`` is in the shared state cache and its signature indicates + that the cached output is still valid (i.e. if no relevant task inputs + have changed), then the contents of the shared state cache copies + directly to ${``DEPLOY_DIR_IMAGE``} by the ``do_deploy_setscene`` task + instead, skipping the ``do_deploy`` task. + +- The following task definition is glue logic needed to make the + previous settings effective: + :: + + python do_deploy_setscene () { + sstate_setscene(d) + } + addtask do_deploy_setscene + + ``sstate_setscene()`` takes the flags above as input and accelerates the ``do_deploy`` task + through the shared state cache if possible. If the task was + accelerated, ``sstate_setscene()`` returns True. Otherwise, it + returns False, and the normal ``do_deploy`` task runs. For more + information, see the ":ref:`setscene `" + section in the BitBake User Manual. + +- The ``do_deploy[dirs] = "${DEPLOYDIR} ${B}"`` line creates + ``${DEPLOYDIR}`` and ``${B}`` before the ``do_deploy`` task runs, and + also sets the current working directory of ``do_deploy`` to ``${B}``. + For more information, see the ":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" + section in the BitBake + User Manual. + + .. note:: + + In cases where ``sstate-inputdirs`` and ``sstate-outputdirs`` would be + the same, you can use ``sstate-plaindirs``. For example, to preserve the + ${:term:`PKGD`} and ${:term:`PKGDEST`} output from the ``do_package`` + task, use the following: + :: + + do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST}" + + +- The ``do_deploy[stamp-extra-info] = "${MACHINE_ARCH}"`` line appends + extra metadata to the `stamp + file <#stamp-files-and-the-rerunning-of-tasks>`__. In this case, the + metadata makes the task specific to a machine's architecture. See + ":ref:`bitbake:ref-bitbake-tasklist`" + section in the BitBake User Manual for more information on the + ``stamp-extra-info`` flag. + +- ``sstate-inputdirs`` and ``sstate-outputdirs`` can also be used with + multiple directories. For example, the following declares + ``PKGDESTWORK`` and ``SHLIBWORK`` as shared state input directories, + which populates the shared state cache, and ``PKGDATA_DIR`` and + ``SHLIBSDIR`` as the corresponding shared state output directories: + :: + + do_package[sstate-inputdirs] = "${PKGDESTWORK} ${SHLIBSWORKDIR}" + do_package[sstate-outputdirs] = "${PKGDATA_DIR} ${SHLIBSDIR}" + +- These methods also include the ability to take a lockfile when + manipulating shared state directory structures, for cases where file + additions or removals are sensitive: + :: + + do_package[sstate-lockfile] = "${PACKAGELOCK}" + +Behind the scenes, the shared state code works by looking in +:term:`SSTATE_DIR` and +:term:`SSTATE_MIRRORS` for +shared state files. Here is an example: +:: + + SSTATE_MIRRORS ?= "\ + file://.\* http://someserver.tld/share/sstate/PATH;downloadfilename=PATH \n \ + file://.\* file:///some/local/dir/sstate/PATH" + +.. note:: + + The shared state directory (``SSTATE_DIR``) is organized into two-character + subdirectories, where the subdirectory names are based on the first two + characters of the hash. + If the shared state directory structure for a mirror has the same structure + as ``SSTATE_DIR``, you must specify "PATH" as part of the URI to enable the build + system to map to the appropriate subdirectory. + +The shared state package validity can be detected just by looking at the +filename since the filename contains the task checksum (or signature) as +described earlier in this section. If a valid shared state package is +found, the build process downloads it and uses it to accelerate the +task. + +The build processes use the ``*_setscene`` tasks for the task +acceleration phase. BitBake goes through this phase before the main +execution code and tries to accelerate any tasks for which it can find +shared state packages. If a shared state package for a task is +available, the shared state package is used. This means the task and any +tasks on which it is dependent are not executed. + +As a real world example, the aim is when building an IPK-based image, +only the +:ref:`ref-tasks-package_write_ipk` +tasks would have their shared state packages fetched and extracted. +Since the sysroot is not used, it would never get extracted. This is +another reason why a task-based approach is preferred over a +recipe-based approach, which would have to install the output from every +task. + +Automatically Added Runtime Dependencies +======================================== + +The OpenEmbedded build system automatically adds common types of runtime +dependencies between packages, which means that you do not need to +explicitly declare the packages using +:term:`RDEPENDS`. Three automatic +mechanisms exist (``shlibdeps``, ``pcdeps``, and ``depchains``) that +handle shared libraries, package configuration (pkg-config) modules, and +``-dev`` and ``-dbg`` packages, respectively. For other types of runtime +dependencies, you must manually declare the dependencies. + +- ``shlibdeps``: During the + :ref:`ref-tasks-package` task of + each recipe, all shared libraries installed by the recipe are + located. For each shared library, the package that contains the + shared library is registered as providing the shared library. More + specifically, the package is registered as providing the + `soname `__ of the library. The + resulting shared-library-to-package mapping is saved globally in + :term:`PKGDATA_DIR` by the + :ref:`ref-tasks-packagedata` + task. + + Simultaneously, all executables and shared libraries installed by the + recipe are inspected to see what shared libraries they link against. + For each shared library dependency that is found, ``PKGDATA_DIR`` is + queried to see if some package (likely from a different recipe) + contains the shared library. If such a package is found, a runtime + dependency is added from the package that depends on the shared + library to the package that contains the library. + + The automatically added runtime dependency also includes a version + restriction. This version restriction specifies that at least the + current version of the package that provides the shared library must + be used, as if "package (>= version)" had been added to ``RDEPENDS``. + This forces an upgrade of the package containing the shared library + when installing the package that depends on the library, if needed. + + If you want to avoid a package being registered as providing a + particular shared library (e.g. because the library is for internal + use only), then add the library to + :term:`PRIVATE_LIBS` inside + the package's recipe. + +- ``pcdeps``: During the ``do_package`` task of each recipe, all + pkg-config modules (``*.pc`` files) installed by the recipe are + located. For each module, the package that contains the module is + registered as providing the module. The resulting module-to-package + mapping is saved globally in ``PKGDATA_DIR`` by the + ``do_packagedata`` task. + + Simultaneously, all pkg-config modules installed by the recipe are + inspected to see what other pkg-config modules they depend on. A + module is seen as depending on another module if it contains a + "Requires:" line that specifies the other module. For each module + dependency, ``PKGDATA_DIR`` is queried to see if some package + contains the module. If such a package is found, a runtime dependency + is added from the package that depends on the module to the package + that contains the module. + + .. note:: + + The + pcdeps + mechanism most often infers dependencies between + -dev + packages. + +- ``depchains``: If a package ``foo`` depends on a package ``bar``, + then ``foo-dev`` and ``foo-dbg`` are also made to depend on + ``bar-dev`` and ``bar-dbg``, respectively. Taking the ``-dev`` + packages as an example, the ``bar-dev`` package might provide headers + and shared library symlinks needed by ``foo-dev``, which shows the + need for a dependency between the packages. + + The dependencies added by ``depchains`` are in the form of + :term:`RRECOMMENDS`. + + .. note:: + + By default, ``foo-dev`` also has an ``RDEPENDS``-style dependency on + ``foo``, because the default value of ``RDEPENDS_${PN}-dev`` (set in + bitbake.conf) includes "${PN}". + + To ensure that the dependency chain is never broken, ``-dev`` and + ``-dbg`` packages are always generated by default, even if the + packages turn out to be empty. See the + :term:`ALLOW_EMPTY` variable + for more information. + +The ``do_package`` task depends on the ``do_packagedata`` task of each +recipe in :term:`DEPENDS` through use +of a ``[``\ :ref:`deptask `\ ``]`` +declaration, which guarantees that the required +shared-library/module-to-package mapping information will be available +when needed as long as ``DEPENDS`` has been correctly set. + +Fakeroot and Pseudo +=================== + +Some tasks are easier to implement when allowed to perform certain +operations that are normally reserved for the root user (e.g. +:ref:`ref-tasks-install`, +:ref:`do_package_write* `, +:ref:`ref-tasks-rootfs`, and +:ref:`do_image* `). For example, +the ``do_install`` task benefits from being able to set the UID and GID +of installed files to arbitrary values. + +One approach to allowing tasks to perform root-only operations would be +to require :term:`BitBake` to run as +root. However, this method is cumbersome and has security issues. The +approach that is actually used is to run tasks that benefit from root +privileges in a "fake" root environment. Within this environment, the +task and its child processes believe that they are running as the root +user, and see an internally consistent view of the filesystem. As long +as generating the final output (e.g. a package or an image) does not +require root privileges, the fact that some earlier steps ran in a fake +root environment does not cause problems. + +The capability to run tasks in a fake root environment is known as +"`fakeroot `__", which is derived from +the BitBake keyword/variable flag that requests a fake root environment +for a task. + +In the :term:`OpenEmbedded Build System`, +the program that +implements fakeroot is known as +`Pseudo `__. Pseudo +overrides system calls by using the environment variable ``LD_PRELOAD``, +which results in the illusion of running as root. To keep track of +"fake" file ownership and permissions resulting from operations that +require root permissions, Pseudo uses an SQLite 3 database. This +database is stored in +``${``\ :term:`WORKDIR`\ ``}/pseudo/files.db`` +for individual recipes. Storing the database in a file as opposed to in +memory gives persistence between tasks and builds, which is not +accomplished using fakeroot. + +.. note:: + + If you add your own task that manipulates the same files or + directories as a fakeroot task, then that task also needs to run + under fakeroot. Otherwise, the task cannot run root-only operations, + and cannot see the fake file ownership and permissions set by the + other task. You need to also add a dependency on + virtual/fakeroot-native:do_populate_sysroot + , giving the following: + :: + + fakeroot do_mytask () { + ... + } + do_mytask[depends] += "virtual/fakeroot-native:do_populate_sysroot" + + +For more information, see the +:term:`FAKEROOT* ` variables in the +BitBake User Manual. You can also reference the "`Why Not +Fakeroot? `__" +article for background information on Fakeroot and Pseudo. diff --git a/poky/documentation/overview-manual/overview-manual-development-environment.rst b/poky/documentation/overview-manual/overview-manual-development-environment.rst new file mode 100644 index 000000000..3b5147d73 --- /dev/null +++ b/poky/documentation/overview-manual/overview-manual-development-environment.rst @@ -0,0 +1,672 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************************** +The Yocto Project Development Environment +***************************************** + +This chapter takes a look at the Yocto Project development environment. +The chapter provides Yocto Project Development environment concepts that +help you understand how work is accomplished in an open source +environment, which is very different as compared to work accomplished in +a closed, proprietary environment. + +Specifically, this chapter addresses open source philosophy, source +repositories, workflows, Git, and licensing. + +Open Source Philosophy +====================== + +Open source philosophy is characterized by software development directed +by peer production and collaboration through an active community of +developers. Contrast this to the more standard centralized development +models used by commercial software companies where a finite set of +developers produces a product for sale using a defined set of procedures +that ultimately result in an end product whose architecture and source +material are closed to the public. + +Open source projects conceptually have differing concurrent agendas, +approaches, and production. These facets of the development process can +come from anyone in the public (community) who has a stake in the +software project. The open source environment contains new copyright, +licensing, domain, and consumer issues that differ from the more +traditional development environment. In an open source environment, the +end product, source material, and documentation are all available to the +public at no cost. + +A benchmark example of an open source project is the Linux kernel, which +was initially conceived and created by Finnish computer science student +Linus Torvalds in 1991. Conversely, a good example of a non-open source +project is the Windows family of operating systems developed by +Microsoft Corporation. + +Wikipedia has a good historical description of the Open Source +Philosophy `here `__. You can +also find helpful information on how to participate in the Linux +Community +`here `__. + +.. _gs-the-development-host: + +The Development Host +==================== + +A development host or :term:`Build Host` is key to +using the Yocto Project. Because the goal of the Yocto Project is to +develop images or applications that run on embedded hardware, +development of those images and applications generally takes place on a +system not intended to run the software - the development host. + +You need to set up a development host in order to use it with the Yocto +Project. Most find that it is best to have a native Linux machine +function as the development host. However, it is possible to use a +system that does not run Linux as its operating system as your +development host. When you have a Mac or Windows-based system, you can +set it up as the development host by using +`CROPS `__, which leverages +`Docker Containers `__. Once you take the steps +to set up a CROPS machine, you effectively have access to a shell +environment that is similar to what you see when using a Linux-based +development host. For the steps needed to set up a system using CROPS, +see the +":ref:`dev-manual/dev-manual-start:setting up to use cross platforms (crops)`" +section in +the Yocto Project Development Tasks Manual. + +If your development host is going to be a system that runs a Linux +distribution, steps still exist that you must take to prepare the system +for use with the Yocto Project. You need to be sure that the Linux +distribution on the system is one that supports the Yocto Project. You +also need to be sure that the correct set of host packages are installed +that allow development using the Yocto Project. For the steps needed to +set up a development host that runs Linux, see the +":ref:`dev-manual/dev-manual-start:setting up a native linux host`" +section in the Yocto Project Development Tasks Manual. + +Once your development host is set up to use the Yocto Project, several +methods exist for you to do work in the Yocto Project environment: + +- *Command Lines, BitBake, and Shells:* Traditional development in the + Yocto Project involves using the :term:`OpenEmbedded Build System`, + which uses + BitBake, in a command-line environment from a shell on your + development host. You can accomplish this from a host that is a + native Linux machine or from a host that has been set up with CROPS. + Either way, you create, modify, and build images and applications all + within a shell-based environment using components and tools available + through your Linux distribution and the Yocto Project. + + For a general flow of the build procedures, see the + ":ref:`dev-manual/dev-manual-common-tasks:building a simple image`" + section in the Yocto Project Development Tasks Manual. + +- *Board Support Package (BSP) Development:* Development of BSPs + involves using the Yocto Project to create and test layers that allow + easy development of images and applications targeted for specific + hardware. To development BSPs, you need to take some additional steps + beyond what was described in setting up a development host. + + The :doc:`../bsp-guide/bsp-guide` provides BSP-related development + information. For specifics on development host preparation, see the + ":ref:`bsp-guide/bsp:preparing your build host to work with bsp layers`" + section in the Yocto Project Board Support Package (BSP) Developer's + Guide. + +- *Kernel Development:* If you are going to be developing kernels using + the Yocto Project you likely will be using ``devtool``. A workflow + using ``devtool`` makes kernel development quicker by reducing + iteration cycle times. + + The :doc:`../kernel-dev/kernel-dev` provides kernel-related + development information. For specifics on development host + preparation, see the + ":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`" + section in the Yocto Project Linux Kernel Development Manual. + +- *Using Toaster:* The other Yocto Project development method that + involves an interface that effectively puts the Yocto Project into + the background is Toaster. Toaster provides an interface to the + OpenEmbedded build system. The interface enables you to configure and + run your builds. Information about builds is collected and stored in + a database. You can use Toaster to configure and start builds on + multiple remote build servers. + + For steps that show you how to set up your development host to use + Toaster and on how to use Toaster in general, see the + :doc:`../toaster-manual/toaster-manual`. + +.. _yocto-project-repositories: + +Yocto Project Source Repositories +================================= + +The Yocto Project team maintains complete source repositories for all +Yocto Project files at :yocto_git:`/`. This web-based source +code browser is organized into categories by function such as IDE +Plugins, Matchbox, Poky, Yocto Linux Kernel, and so forth. From the +interface, you can click on any particular item in the "Name" column and +see the URL at the bottom of the page that you need to clone a Git +repository for that particular item. Having a local Git repository of +the :term:`Source Directory`, which +is usually named "poky", allows you to make changes, contribute to the +history, and ultimately enhance the Yocto Project's tools, Board Support +Packages, and so forth. + +For any supported release of Yocto Project, you can also go to the +:yocto_home:`Yocto Project Website <>` and select the "DOWNLOADS" +item from the "SOFTWARE" menu and get a released tarball of the ``poky`` +repository, any supported BSP tarball, or Yocto Project tools. Unpacking +these tarballs gives you a snapshot of the released files. + +.. note:: + + - The recommended method for setting up the Yocto Project + :term:`Source Directory` and the files + for supported BSPs (e.g., ``meta-intel``) is to use `Git <#git>`__ + to create a local copy of the upstream repositories. + + - Be sure to always work in matching branches for both the selected + BSP repository and the Source Directory (i.e. ``poky``) + repository. For example, if you have checked out the "master" + branch of ``poky`` and you are going to use ``meta-intel``, be + sure to checkout the "master" branch of ``meta-intel``. + +In summary, here is where you can get the project files needed for +development: + +- :yocto_git:`Source Repositories: <>` This area contains IDE + Plugins, Matchbox, Poky, Poky Support, Tools, Yocto Linux Kernel, and + Yocto Metadata Layers. You can create local copies of Git + repositories for each of these areas. + + .. image:: figures/source-repos.png + :align: center + + For steps on how to view and access these upstream Git repositories, + see the ":ref:`dev-manual/dev-manual-start:accessing source repositories`" + Section in the Yocto Project Development Tasks Manual. + +- :yocto_dl:`Index of /releases: ` This is an index + of releases such as Poky, Pseudo, installers for cross-development + toolchains, miscellaneous support and all released versions of Yocto + Project in the form of images or tarballs. Downloading and extracting + these files does not produce a local copy of the Git repository but + rather a snapshot of a particular release or image. + + .. image:: figures/index-downloads.png + :align: center + + For steps on how to view and access these files, see the + ":ref:`dev-manual/dev-manual-start:accessing index of releases`" + section in the Yocto Project Development Tasks Manual. + +- *"DOWNLOADS" page for the* :yocto_home:`Yocto Project Website <>` *:* + + The Yocto Project website includes a "DOWNLOADS" page accessible + through the "SOFTWARE" menu that allows you to download any Yocto + Project release, tool, and Board Support Package (BSP) in tarball + form. The tarballs are similar to those found in the + :yocto_dl:`Index of /releases: ` area. + + .. image:: figures/yp-download.png + :align: center + + For steps on how to use the "DOWNLOADS" page, see the + ":ref:`dev-manual/dev-manual-start:using the downloads page`" + section in the Yocto Project Development Tasks Manual. + +.. _gs-git-workflows-and-the-yocto-project: + +Git Workflows and the Yocto Project +=================================== + +Developing using the Yocto Project likely requires the use of +`Git <#git>`__. Git is a free, open source distributed version control +system used as part of many collaborative design environments. This +section provides workflow concepts using the Yocto Project and Git. In +particular, the information covers basic practices that describe roles +and actions in a collaborative development environment. + +.. note:: + + If you are familiar with this type of development environment, you + might not want to read this section. + +The Yocto Project files are maintained using Git in "branches" whose Git +histories track every change and whose structures provide branches for +all diverging functionality. Although there is no need to use Git, many +open source projects do so. + +For the Yocto Project, a key individual called the "maintainer" is +responsible for the integrity of the "master" branch of a given Git +repository. The "master" branch is the "upstream" repository from which +final or most recent builds of a project occur. The maintainer is +responsible for accepting changes from other developers and for +organizing the underlying branch structure to reflect release strategies +and so forth. + +.. note:: + + For information on finding out who is responsible for (maintains) a + particular area of code in the Yocto Project, see the + ":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" + section of the Yocto Project Development Tasks Manual. + +The Yocto Project ``poky`` Git repository also has an upstream +contribution Git repository named ``poky-contrib``. You can see all the +branches in this repository using the web interface of the +:yocto_git:`Source Repositories <>` organized within the "Poky Support" +area. These branches hold changes (commits) to the project that have +been submitted or committed by the Yocto Project development team and by +community members who contribute to the project. The maintainer +determines if the changes are qualified to be moved from the "contrib" +branches into the "master" branch of the Git repository. + +Developers (including contributing community members) create and +maintain cloned repositories of upstream branches. The cloned +repositories are local to their development platforms and are used to +develop changes. When a developer is satisfied with a particular feature +or change, they "push" the change to the appropriate "contrib" +repository. + +Developers are responsible for keeping their local repository up-to-date +with whatever upstream branch they are working against. They are also +responsible for straightening out any conflicts that might arise within +files that are being worked on simultaneously by more than one person. +All this work is done locally on the development host before anything is +pushed to a "contrib" area and examined at the maintainer's level. + +A somewhat formal method exists by which developers commit changes and +push them into the "contrib" area and subsequently request that the +maintainer include them into an upstream branch. This process is called +"submitting a patch" or "submitting a change." For information on +submitting patches and changes, see the +":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" +section in the Yocto Project Development Tasks Manual. + +In summary, a single point of entry exists for changes into a "master" +or development branch of the Git repository, which is controlled by the +project's maintainer. And, a set of developers exist who independently +develop, test, and submit changes to "contrib" areas for the maintainer +to examine. The maintainer then chooses which changes are going to +become a permanent part of the project. + +.. image:: figures/git-workflow.png + :align: center + +While each development environment is unique, there are some best +practices or methods that help development run smoothly. The following +list describes some of these practices. For more information about Git +workflows, see the workflow topics in the `Git Community +Book `__. + +- *Make Small Changes:* It is best to keep the changes you commit small + as compared to bundling many disparate changes into a single commit. + This practice not only keeps things manageable but also allows the + maintainer to more easily include or refuse changes. + +- *Make Complete Changes:* It is also good practice to leave the + repository in a state that allows you to still successfully build + your project. In other words, do not commit half of a feature, then + add the other half as a separate, later commit. Each commit should + take you from one buildable project state to another buildable state. + +- *Use Branches Liberally:* It is very easy to create, use, and delete + local branches in your working Git repository on the development + host. You can name these branches anything you like. It is helpful to + give them names associated with the particular feature or change on + which you are working. Once you are done with a feature or change and + have merged it into your local master branch, simply discard the + temporary branch. + +- *Merge Changes:* The ``git merge`` command allows you to take the + changes from one branch and fold them into another branch. This + process is especially helpful when more than a single developer might + be working on different parts of the same feature. Merging changes + also automatically identifies any collisions or "conflicts" that + might happen as a result of the same lines of code being altered by + two different developers. + +- *Manage Branches:* Because branches are easy to use, you should use a + system where branches indicate varying levels of code readiness. For + example, you can have a "work" branch to develop in, a "test" branch + where the code or change is tested, a "stage" branch where changes + are ready to be committed, and so forth. As your project develops, + you can merge code across the branches to reflect ever-increasing + stable states of the development. + +- *Use Push and Pull:* The push-pull workflow is based on the concept + of developers "pushing" local commits to a remote repository, which + is usually a contribution repository. This workflow is also based on + developers "pulling" known states of the project down into their + local development repositories. The workflow easily allows you to + pull changes submitted by other developers from the upstream + repository into your work area ensuring that you have the most recent + software on which to develop. The Yocto Project has two scripts named + ``create-pull-request`` and ``send-pull-request`` that ship with the + release to facilitate this workflow. You can find these scripts in + the ``scripts`` folder of the + :term:`Source Directory`. For information + on how to use these scripts, see the + ":ref:`dev-manual/dev-manual-common-tasks:using scripts to push a change upstream and request a pull`" + section in the Yocto Project Development Tasks Manual. + +- *Patch Workflow:* This workflow allows you to notify the maintainer + through an email that you have a change (or patch) you would like + considered for the "master" branch of the Git repository. To send + this type of change, you format the patch and then send the email + using the Git commands ``git format-patch`` and ``git send-email``. + For information on how to use these scripts, see the + ":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`" + section in the Yocto Project Development Tasks Manual. + +Git +=== + +The Yocto Project makes extensive use of Git, which is a free, open +source distributed version control system. Git supports distributed +development, non-linear development, and can handle large projects. It +is best that you have some fundamental understanding of how Git tracks +projects and how to work with Git if you are going to use the Yocto +Project for development. This section provides a quick overview of how +Git works and provides you with a summary of some essential Git +commands. + +.. note:: + + - For more information on Git, see + http://git-scm.com/documentation. + + - If you need to download Git, it is recommended that you add Git to + your system through your distribution's "software store" (e.g. for + Ubuntu, use the Ubuntu Software feature). For the Git download + page, see http://git-scm.com/download. + + - For information beyond the introductory nature in this section, + see the ":ref:`dev-manual/dev-manual-start:locating yocto project source files`" + section in the Yocto Project Development Tasks Manual. + +Repositories, Tags, and Branches +-------------------------------- + +As mentioned briefly in the previous section and also in the "`Git +Workflows and the Yocto +Project <#gs-git-workflows-and-the-yocto-project>`__" section, the Yocto +Project maintains source repositories at :yocto_git:`/`. If you +look at this web-interface of the repositories, each item is a separate +Git repository. + +Git repositories use branching techniques that track content change (not +files) within a project (e.g. a new feature or updated documentation). +Creating a tree-like structure based on project divergence allows for +excellent historical information over the life of a project. This +methodology also allows for an environment from which you can do lots of +local experimentation on projects as you develop changes or new +features. + +A Git repository represents all development efforts for a given project. +For example, the Git repository ``poky`` contains all changes and +developments for that repository over the course of its entire life. +That means that all changes that make up all releases are captured. The +repository maintains a complete history of changes. + +You can create a local copy of any repository by "cloning" it with the +``git clone`` command. When you clone a Git repository, you end up with +an identical copy of the repository on your development system. Once you +have a local copy of a repository, you can take steps to develop +locally. For examples on how to clone Git repositories, see the +":ref:`dev-manual/dev-manual-start:locating yocto project source files`" +section in the Yocto Project Development Tasks Manual. + +It is important to understand that Git tracks content change and not +files. Git uses "branches" to organize different development efforts. +For example, the ``poky`` repository has several branches that include +the current "&DISTRO_NAME_NO_CAP;" branch, the "master" branch, and many +branches for past Yocto Project releases. You can see all the branches +by going to https://git.yoctoproject.org/cgit.cgi/poky/ and clicking on the +``[...]`` link beneath the "Branch" heading. + +Each of these branches represents a specific area of development. The +"master" branch represents the current or most recent development. All +other branches represent offshoots of the "master" branch. + +When you create a local copy of a Git repository, the copy has the same +set of branches as the original. This means you can use Git to create a +local working area (also called a branch) that tracks a specific +development branch from the upstream source Git repository. in other +words, you can define your local Git environment to work on any +development branch in the repository. To help illustrate, consider the +following example Git commands: +:: + + $ cd ~ + $ git clone git://git.yoctoproject.org/poky + $ cd poky + $ git checkout -b &DISTRO_NAME_NO_CAP; origin/&DISTRO_NAME_NO_CAP; + +In the previous example +after moving to the home directory, the ``git clone`` command creates a +local copy of the upstream ``poky`` Git repository. By default, Git +checks out the "master" branch for your work. After changing the working +directory to the new local repository (i.e. ``poky``), the +``git checkout`` command creates and checks out a local branch named +"&DISTRO_NAME_NO_CAP;", which tracks the upstream +"origin/&DISTRO_NAME_NO_CAP;" branch. Changes you make while in this +branch would ultimately affect the upstream "&DISTRO_NAME_NO_CAP;" branch +of the ``poky`` repository. + +It is important to understand that when you create and checkout a local +working branch based on a branch name, your local environment matches +the "tip" of that particular development branch at the time you created +your local branch, which could be different from the files in the +"master" branch of the upstream repository. In other words, creating and +checking out a local branch based on the "&DISTRO_NAME_NO_CAP;" branch +name is not the same as checking out the "master" branch in the +repository. Keep reading to see how you create a local snapshot of a +Yocto Project Release. + +Git uses "tags" to mark specific changes in a repository branch +structure. Typically, a tag is used to mark a special point such as the +final change (or commit) before a project is released. You can see the +tags used with the ``poky`` Git repository by going to +https://git.yoctoproject.org/cgit.cgi/poky/ and clicking on the ``[...]`` link +beneath the "Tag" heading. + +Some key tags for the ``poky`` repository are ``jethro-14.0.3``, +``morty-16.0.1``, ``pyro-17.0.0``, and +``&DISTRO_NAME_NO_CAP;-&POKYVERSION;``. These tags represent Yocto Project +releases. + +When you create a local copy of the Git repository, you also have access +to all the tags in the upstream repository. Similar to branches, you can +create and checkout a local working Git branch based on a tag name. When +you do this, you get a snapshot of the Git repository that reflects the +state of the files when the change was made associated with that tag. +The most common use is to checkout a working branch that matches a +specific Yocto Project release. Here is an example: +:: + + $ cd ~ + $ git clone git://git.yoctoproject.org/poky + $ cd poky + $ git fetch --tags + $ git checkout tags/rocko-18.0.0 -b my_rocko-18.0.0 + +In this example, the name +of the top-level directory of your local Yocto Project repository is +``poky``. After moving to the ``poky`` directory, the ``git fetch`` +command makes all the upstream tags available locally in your +repository. Finally, the ``git checkout`` command creates and checks out +a branch named "my-rocko-18.0.0" that is based on the upstream branch +whose "HEAD" matches the commit in the repository associated with the +"rocko-18.0.0" tag. The files in your repository now exactly match that +particular Yocto Project release as it is tagged in the upstream Git +repository. It is important to understand that when you create and +checkout a local working branch based on a tag, your environment matches +a specific point in time and not the entire development branch (i.e. +from the "tip" of the branch backwards). + +Basic Commands +-------------- + +Git has an extensive set of commands that lets you manage changes and +perform collaboration over the life of a project. Conveniently though, +you can manage with a small set of basic operations and workflows once +you understand the basic philosophy behind Git. You do not have to be an +expert in Git to be functional. A good place to look for instruction on +a minimal set of Git commands is +`here `__. + +The following list of Git commands briefly describes some basic Git +operations as a way to get started. As with any set of commands, this +list (in most cases) simply shows the base command and omits the many +arguments it supports. See the Git documentation for complete +descriptions and strategies on how to use these commands: + +- *git init:* Initializes an empty Git repository. You cannot use + Git commands unless you have a ``.git`` repository. + +- *git clone:* Creates a local clone of a Git repository that is on + equal footing with a fellow developer's Git repository or an upstream + repository. + +- *git add:* Locally stages updated file contents to the index that + Git uses to track changes. You must stage all files that have changed + before you can commit them. + +- *git commit:* Creates a local "commit" that documents the changes + you made. Only changes that have been staged can be committed. + Commits are used for historical purposes, for determining if a + maintainer of a project will allow the change, and for ultimately + pushing the change from your local Git repository into the project's + upstream repository. + +- *git status:* Reports any modified files that possibly need to be + staged and gives you a status of where you stand regarding local + commits as compared to the upstream repository. + +- *git checkout branch-name:* Changes your local working branch and + in this form assumes the local branch already exists. This command is + analogous to "cd". + +- *git checkout –b working-branch upstream-branch:* Creates and + checks out a working branch on your local machine. The local branch + tracks the upstream branch. You can use your local branch to isolate + your work. It is a good idea to use local branches when adding + specific features or changes. Using isolated branches facilitates + easy removal of changes if they do not work out. + +- *git branch:* Displays the existing local branches associated + with your local repository. The branch that you have currently + checked out is noted with an asterisk character. + +- *git branch -D branch-name:* Deletes an existing local branch. + You need to be in a local branch other than the one you are deleting + in order to delete branch-name. + +- *git pull --rebase:* Retrieves information from an upstream Git + repository and places it in your local Git repository. You use this + command to make sure you are synchronized with the repository from + which you are basing changes (.e.g. the "master" branch). The + "--rebase" option ensures that any local commits you have in your + branch are preserved at the top of your local branch. + +- *git push repo-name local-branch:upstream-branch:* Sends + all your committed local changes to the upstream Git repository that + your local repository is tracking (e.g. a contribution repository). + The maintainer of the project draws from these repositories to merge + changes (commits) into the appropriate branch of project's upstream + repository. + +- *git merge:* Combines or adds changes from one local branch of + your repository with another branch. When you create a local Git + repository, the default branch is named "master". A typical workflow + is to create a temporary branch that is based off "master" that you + would use for isolated work. You would make your changes in that + isolated branch, stage and commit them locally, switch to the + "master" branch, and then use the ``git merge`` command to apply the + changes from your isolated branch into the currently checked out + branch (e.g. "master"). After the merge is complete and if you are + done with working in that isolated branch, you can safely delete the + isolated branch. + +- *git cherry-pick commits:* Choose and apply specific commits from + one branch into another branch. There are times when you might not be + able to merge all the changes in one branch with another but need to + pick out certain ones. + +- *gitk:* Provides a GUI view of the branches and changes in your + local Git repository. This command is a good way to graphically see + where things have diverged in your local repository. + + .. note:: + + You need to install the + gitk + package on your development system to use this command. + +- *git log:* Reports a history of your commits to the repository. + This report lists all commits regardless of whether you have pushed + them upstream or not. + +- *git diff:* Displays line-by-line differences between a local + working file and the same file as understood by Git. This command is + useful to see what you have changed in any given file. + +Licensing +========= + +Because open source projects are open to the public, they have different +licensing structures in place. License evolution for both Open Source +and Free Software has an interesting history. If you are interested in +this history, you can find basic information here: + +- `Open source license + history `__ + +- `Free software license + history `__ + +In general, the Yocto Project is broadly licensed under the +Massachusetts Institute of Technology (MIT) License. MIT licensing +permits the reuse of software within proprietary software as long as the +license is distributed with that software. MIT is also compatible with +the GNU General Public License (GPL). Patches to the Yocto Project +follow the upstream licensing scheme. You can find information on the +MIT license +`here `__. You can +find information on the GNU GPL +`here `__. + +When you build an image using the Yocto Project, the build process uses +a known list of licenses to ensure compliance. You can find this list in +the :term:`Source Directory` at +``meta/files/common-licenses``. Once the build completes, the list of +all licenses found and used during that build are kept in the +:term:`Build Directory` at +``tmp/deploy/licenses``. + +If a module requires a license that is not in the base list, the build +process generates a warning during the build. These tools make it easier +for a developer to be certain of the licenses with which their shipped +products must comply. However, even with these tools it is still up to +the developer to resolve potential licensing issues. + +The base list of licenses used by the build process is a combination of +the Software Package Data Exchange (SPDX) list and the Open Source +Initiative (OSI) projects. `SPDX Group `__ is a working +group of the Linux Foundation that maintains a specification for a +standard format for communicating the components, licenses, and +copyrights associated with a software package. +`OSI `__ is a corporation dedicated to the Open +Source Definition and the effort for reviewing and approving licenses +that conform to the Open Source Definition (OSD). + +You can find a list of the combined SPDX and OSI licenses that the Yocto +Project uses in the ``meta/files/common-licenses`` directory in your +:term:`Source Directory`. + +For information that can help you maintain compliance with various open +source licensing during the lifecycle of a product created using the +Yocto Project, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. diff --git a/poky/documentation/overview-manual/overview-manual-development-environment.xml b/poky/documentation/overview-manual/overview-manual-development-environment.xml index 8415d1dd7..08ad07131 100644 --- a/poky/documentation/overview-manual/overview-manual-development-environment.xml +++ b/poky/documentation/overview-manual/overview-manual-development-environment.xml @@ -327,7 +327,7 @@ For the Yocto Project, a key individual called the "maintainer" is responsible for the integrity of the "master" branch of a given Git repository. - The "master" branch is the “upstream” repository from which final or + The "master" branch is the "upstream" repository from which final or most recent builds of a project occur. The maintainer is responsible for accepting changes from other developers and for organizing the underlying branch structure to @@ -372,7 +372,7 @@ might arise within files that are being worked on simultaneously by more than one person. All this work is done locally on the development host before - anything is pushed to a "contrib" area and examined at the maintainer’s + anything is pushed to a "contrib" area and examined at the maintainer's level. @@ -380,7 +380,7 @@ A somewhat formal method exists by which developers commit changes and push them into the "contrib" area and subsequently request that the maintainer include them into an upstream branch. - This process is called “submitting a patch” or "submitting a change." + This process is called "submitting a patch" or "submitting a change." For information on submitting patches and changes, see the "Submitting a Change to the Yocto Project" section in the Yocto Project Development Tasks Manual. @@ -389,7 +389,7 @@ In summary, a single point of entry exists for changes into a "master" or development branch of the - Git repository, which is controlled by the project’s maintainer. + Git repository, which is controlled by the project's maintainer. And, a set of developers exist who independently develop, test, and submit changes to "contrib" areas for the maintainer to examine. The maintainer then chooses which changes are going to become a @@ -734,7 +734,7 @@ git clone: Creates a local clone of a Git repository that is on - equal footing with a fellow developer’s Git repository + equal footing with a fellow developer's Git repository or an upstream repository. @@ -752,7 +752,7 @@ Commits are used for historical purposes, for determining if a maintainer of a project will allow the change, and for ultimately pushing the change from your local - Git repository into the project’s upstream repository. + Git repository into the project's upstream repository. git status: diff --git a/poky/documentation/overview-manual/overview-manual-intro.rst b/poky/documentation/overview-manual/overview-manual-intro.rst new file mode 100644 index 000000000..3f206fd54 --- /dev/null +++ b/poky/documentation/overview-manual/overview-manual-intro.rst @@ -0,0 +1,74 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************************************** +The Yocto Project Overview and Concepts Manual +********************************************** + +.. _overview-manual-welcome: + +Welcome +======= + +Welcome to the Yocto Project Overview and Concepts Manual! This manual +introduces the Yocto Project by providing concepts, software overviews, +best-known-methods (BKMs), and any other high-level introductory +information suitable for a new Yocto Project user. + +The following list describes what you can get from this manual: + +- `Introducing the Yocto Project <#overview-yp>`__\ *:* This chapter + provides an introduction to the Yocto Project. You will learn about + features and challenges of the Yocto Project, the layer model, + components and tools, development methods, the + :term:`Poky` reference distribution, the + OpenEmbedded build system workflow, and some basic Yocto terms. + +- `The Yocto Project Development + Environment <#overview-development-environment>`__\ *:* This chapter + helps you get started understanding the Yocto Project development + environment. You will learn about open source, development hosts, + Yocto Project source repositories, workflows using Git and the Yocto + Project, a Git primer, and information about licensing. + +- :doc:`overview-manual-concepts` *:* This + chapter presents various concepts regarding the Yocto Project. You + can find conceptual information about components, development, + cross-toolchains, and so forth. + +This manual does not give you the following: + +- *Step-by-step Instructions for Development Tasks:* Instructional + procedures reside in other manuals within the Yocto Project + documentation set. For example, the :doc:`../dev-manual/dev-manual` + provides examples on how to perform + various development tasks. As another example, the + :doc:`../sdk-manual/sdk-manual` manual contains detailed + instructions on how to install an SDK, which is used to develop + applications for target hardware. + +- *Reference Material:* This type of material resides in an appropriate + reference manual. For example, system variables are documented in the + :doc:`../ref-manual/ref-manual`. As another + example, the :doc:`../bsp-guide/bsp-guide` contains reference information on + BSPs. + +- *Detailed Public Information Not Specific to the Yocto Project:* For + example, exhaustive information on how to use the Source Control + Manager Git is better covered with Internet searches and official Git + Documentation than through the Yocto Project documentation. + +.. _overview-manual-other-information: + +Other Information +================= + +Because this manual presents information for many different topics, +supplemental information is recommended for full comprehension. For +additional introductory information on the Yocto Project, see the +:yocto_home:`Yocto Project Website <>`. If you want to build an image +with no knowledge of Yocto Project as a way of quickly testing it out, +see the :doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document. +For a comprehensive list of links and other documentation, see the +":ref:`Links and Related +Documentation `" +section in the Yocto Project Reference Manual. diff --git a/poky/documentation/overview-manual/overview-manual-yp-intro.rst b/poky/documentation/overview-manual/overview-manual-yp-intro.rst new file mode 100644 index 000000000..265fbda7f --- /dev/null +++ b/poky/documentation/overview-manual/overview-manual-yp-intro.rst @@ -0,0 +1,941 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************** +Introducing the Yocto Project +***************************** + +What is the Yocto Project? +========================== + +The Yocto Project is an open source collaboration project that helps +developers create custom Linux-based systems that are designed for +embedded products regardless of the product's hardware architecture. +Yocto Project provides a flexible toolset and a development environment +that allows embedded device developers across the world to collaborate +through shared technologies, software stacks, configurations, and best +practices used to create these tailored Linux images. + +Thousands of developers worldwide have discovered that Yocto Project +provides advantages in both systems and applications development, +archival and management benefits, and customizations used for speed, +footprint, and memory utilization. The project is a standard when it +comes to delivering embedded software stacks. The project allows +software customizations and build interchange for multiple hardware +platforms as well as software stacks that can be maintained and scaled. + +.. image:: figures/key-dev-elements.png + :align: center + +For further introductory information on the Yocto Project, you might be +interested in this +`article `__ +by Drew Moseley and in this short introductory +`video `__. + +The remainder of this section overviews advantages and challenges tied +to the Yocto Project. + +.. _gs-features: + +Features +-------- + +The following list describes features and advantages of the Yocto +Project: + +- *Widely Adopted Across the Industry:* Semiconductor, operating + system, software, and service vendors exist whose products and + services adopt and support the Yocto Project. For a look at the Yocto + Project community and the companies involved with the Yocto Project, + see the "COMMUNITY" and "ECOSYSTEM" tabs on the + :yocto_home:`Yocto Project <>` home page. + +- *Architecture Agnostic:* Yocto Project supports Intel, ARM, MIPS, + AMD, PPC and other architectures. Most ODMs, OSVs, and chip vendors + create and supply BSPs that support their hardware. If you have + custom silicon, you can create a BSP that supports that architecture. + + Aside from lots of architecture support, the Yocto Project fully + supports a wide range of device emulation through the Quick EMUlator + (QEMU). + +- *Images and Code Transfer Easily:* Yocto Project output can easily + move between architectures without moving to new development + environments. Additionally, if you have used the Yocto Project to + create an image or application and you find yourself not able to + support it, commercial Linux vendors such as Wind River, Mentor + Graphics, Timesys, and ENEA could take it and provide ongoing + support. These vendors have offerings that are built using the Yocto + Project. + +- *Flexibility:* Corporations use the Yocto Project many different + ways. One example is to create an internal Linux distribution as a + code base the corporation can use across multiple product groups. + Through customization and layering, a project group can leverage the + base Linux distribution to create a distribution that works for their + product needs. + +- *Ideal for Constrained Embedded and IoT devices:* Unlike a full Linux + distribution, you can use the Yocto Project to create exactly what + you need for embedded devices. You only add the feature support or + packages that you absolutely need for the device. For devices that + have display hardware, you can use available system components such + as X11, GTK+, Qt, Clutter, and SDL (among others) to create a rich + user experience. For devices that do not have a display or where you + want to use alternative UI frameworks, you can choose to not install + these components. + +- *Comprehensive Toolchain Capabilities:* Toolchains for supported + architectures satisfy most use cases. However, if your hardware + supports features that are not part of a standard toolchain, you can + easily customize that toolchain through specification of + platform-specific tuning parameters. And, should you need to use a + third-party toolchain, mechanisms built into the Yocto Project allow + for that. + +- *Mechanism Rules Over Policy:* Focusing on mechanism rather than + policy ensures that you are free to set policies based on the needs + of your design instead of adopting decisions enforced by some system + software provider. + +- *Uses a Layer Model:* The Yocto Project `layer + infrastructure <#the-yocto-project-layer-model>`__ groups related + functionality into separate bundles. You can incrementally add these + grouped functionalities to your project as needed. Using layers to + isolate and group functionality reduces project complexity and + redundancy, allows you to easily extend the system, make + customizations, and keep functionality organized. + +- *Supports Partial Builds:* You can build and rebuild individual + packages as needed. Yocto Project accomplishes this through its + `shared-state cache <#shared-state-cache>`__ (sstate) scheme. Being + able to build and debug components individually eases project + development. + +- *Releases According to a Strict Schedule:* Major releases occur on a + :doc:`six-month cycle <../ref-manual/ref-release-process>` + predictably in October and April. The most recent two releases + support point releases to address common vulnerabilities and + exposures. This predictability is crucial for projects based on the + Yocto Project and allows development teams to plan activities. + +- *Rich Ecosystem of Individuals and Organizations:* For open source + projects, the value of community is very important. Support forums, + expertise, and active developers who continue to push the Yocto + Project forward are readily available. + +- *Binary Reproducibility:* The Yocto Project allows you to be very + specific about dependencies and achieves very high percentages of + binary reproducibility (e.g. 99.8% for ``core-image-minimal``). When + distributions are not specific about which packages are pulled in and + in what order to support dependencies, other build systems can + arbitrarily include packages. + +- *License Manifest:* The Yocto Project provides a :ref:`license + manifest ` + for review by people who need to track the use of open source + licenses (e.g. legal teams). + +.. _gs-challenges: + +Challenges +---------- + +The following list presents challenges you might encounter when +developing using the Yocto Project: + +- *Steep Learning Curve:* The Yocto Project has a steep learning curve + and has many different ways to accomplish similar tasks. It can be + difficult to choose how to proceed when varying methods exist by + which to accomplish a given task. + +- *Understanding What Changes You Need to Make For Your Design Requires + Some Research:* Beyond the simple tutorial stage, understanding what + changes need to be made for your particular design can require a + significant amount of research and investigation. For information + that helps you transition from trying out the Yocto Project to using + it for your project, see the ":ref:`what-i-wish-id-known:what i wish i'd known about yocto project`" and + ":ref:`transitioning-to-a-custom-environment:transitioning to a custom environment for systems development`" + documents on the Yocto Project website. + +- *Project Workflow Could Be Confusing:* The `Yocto Project + workflow <#overview-development-environment>`__ could be confusing if + you are used to traditional desktop and server software development. + In a desktop development environment, mechanisms exist to easily pull + and install new packages, which are typically pre-compiled binaries + from servers accessible over the Internet. Using the Yocto Project, + you must modify your configuration and rebuild to add additional + packages. + +- *Working in a Cross-Build Environment Can Feel Unfamiliar:* When + developing code to run on a target, compilation, execution, and + testing done on the actual target can be faster than running a + BitBake build on a development host and then deploying binaries to + the target for test. While the Yocto Project does support development + tools on the target, the additional step of integrating your changes + back into the Yocto Project build environment would be required. + Yocto Project supports an intermediate approach that involves making + changes on the development system within the BitBake environment and + then deploying only the updated packages to the target. + + The Yocto Project :term:`OpenEmbedded Build System` + produces packages + in standard formats (i.e. RPM, DEB, IPK, and TAR). You can deploy + these packages into the running system on the target by using + utilities on the target such as ``rpm`` or ``ipk``. + +- *Initial Build Times Can be Significant:* Long initial build times + are unfortunately unavoidable due to the large number of packages + initially built from scratch for a fully functioning Linux system. + Once that initial build is completed, however, the shared-state + (sstate) cache mechanism Yocto Project uses keeps the system from + rebuilding packages that have not been "touched" since the last + build. The sstate mechanism significantly reduces times for + successive builds. + +The Yocto Project Layer Model +============================= + +The Yocto Project's "Layer Model" is a development model for embedded +and IoT Linux creation that distinguishes the Yocto Project from other +simple build systems. The Layer Model simultaneously supports +collaboration and customization. Layers are repositories that contain +related sets of instructions that tell the :term:`OpenEmbedded Build System` +what to do. You can +collaborate, share, and reuse layers. + +Layers can contain changes to previous instructions or settings at any +time. This powerful override capability is what allows you to customize +previously supplied collaborative or community layers to suit your +product requirements. + +You use different layers to logically separate information in your +build. As an example, you could have BSP, GUI, distro configuration, +middleware, or application layers. Putting your entire build into one +layer limits and complicates future customization and reuse. Isolating +information into layers, on the other hand, helps simplify future +customizations and reuse. You might find it tempting to keep everything +in one layer when working on a single project. However, the more modular +your Metadata, the easier it is to cope with future changes. + +.. note:: + + - Use Board Support Package (BSP) layers from silicon vendors when + possible. + + - Familiarize yourself with the `Yocto Project curated layer + index `__ + or the `OpenEmbedded layer + index `__. + The latter contains more layers but they are less universally + validated. + + - Layers support the inclusion of technologies, hardware components, + and software components. The :ref:`Yocto Project + Compatible ` + designation provides a minimum level of standardization that + contributes to a strong ecosystem. "YP Compatible" is applied to + appropriate products and software components such as BSPs, other + OE-compatible layers, and related open-source projects, allowing + the producer to use Yocto Project badges and branding assets. + +To illustrate how layers are used to keep things modular, consider +machine customizations. These types of customizations typically reside +in a special layer, rather than a general layer, called a BSP Layer. +Furthermore, the machine customizations should be isolated from recipes +and Metadata that support a new GUI environment, for example. This +situation gives you a couple of layers: one for the machine +configurations, and one for the GUI environment. It is important to +understand, however, that the BSP layer can still make machine-specific +additions to recipes within the GUI environment layer without polluting +the GUI layer itself with those machine-specific changes. You can +accomplish this through a recipe that is a BitBake append +(``.bbappend``) file, which is described later in this section. + +.. note:: + + For general information on BSP layer structure, see the + :doc:`../bsp-guide/bsp-guide` + . + +The :term:`Source Directory` +contains both general layers and BSP layers right out of the box. You +can easily identify layers that ship with a Yocto Project release in the +Source Directory by their names. Layers typically have names that begin +with the string ``meta-``. + +.. note:: + + It is not a requirement that a layer name begin with the prefix + meta- + , but it is a commonly accepted standard in the Yocto Project + community. + +For example, if you were to examine the `tree +view `__ of the +``poky`` repository, you will see several layers: ``meta``, +``meta-skeleton``, ``meta-selftest``, ``meta-poky``, and +``meta-yocto-bsp``. Each of these repositories represents a distinct +layer. + +For procedures on how to create layers, see the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section in the Yocto Project Development Tasks Manual. + +Components and Tools +==================== + +The Yocto Project employs a collection of components and tools used by +the project itself, by project developers, and by those using the Yocto +Project. These components and tools are open source projects and +metadata that are separate from the reference distribution +(:term:`Poky`) and the +:term:`OpenEmbedded Build System`. Most of the +components and tools are downloaded separately. + +This section provides brief overviews of the components and tools +associated with the Yocto Project. + +.. _gs-development-tools: + +Development Tools +----------------- + +The following list consists of tools that help you develop images and +applications using the Yocto Project: + +- *CROPS:* `CROPS `__ is an + open source, cross-platform development framework that leverages + `Docker Containers `__. CROPS provides an + easily managed, extensible environment that allows you to build + binaries for a variety of architectures on Windows, Linux and Mac OS + X hosts. + +- *devtool:* This command-line tool is available as part of the + extensible SDK (eSDK) and is its cornerstone. You can use ``devtool`` + to help build, test, and package software within the eSDK. You can + use the tool to optionally integrate what you build into an image + built by the OpenEmbedded build system. + + The ``devtool`` command employs a number of sub-commands that allow + you to add, modify, and upgrade recipes. As with the OpenEmbedded + build system, "recipes" represent software packages within + ``devtool``. When you use ``devtool add``, a recipe is automatically + created. When you use ``devtool modify``, the specified existing + recipe is used in order to determine where to get the source code and + how to patch it. In both cases, an environment is set up so that when + you build the recipe a source tree that is under your control is used + in order to allow you to make changes to the source as desired. By + default, both new recipes and the source go into a "workspace" + directory under the eSDK. The ``devtool upgrade`` command updates an + existing recipe so that you can build it for an updated set of source + files. + + You can read about the ``devtool`` workflow in the Yocto Project + Application Development and Extensible Software Development Kit + (eSDK) Manual in the + ":ref:`sdk-manual/sdk-extensible:using \`\`devtool\`\` in your sdk workflow`" + section. + +- *Extensible Software Development Kit (eSDK):* The eSDK provides a + cross-development toolchain and libraries tailored to the contents of + a specific image. The eSDK makes it easy to add new applications and + libraries to an image, modify the source for an existing component, + test changes on the target hardware, and integrate into the rest of + the OpenEmbedded build system. The eSDK gives you a toolchain + experience supplemented with the powerful set of ``devtool`` commands + tailored for the Yocto Project environment. + + For information on the eSDK, see the :doc:`../sdk-manual/sdk-manual` Manual. + +- *Toaster:* Toaster is a web interface to the Yocto Project + OpenEmbedded build system. Toaster allows you to configure, run, and + view information about builds. For information on Toaster, see the + :doc:`../toaster-manual/toaster-manual`. + +.. _gs-production-tools: + +Production Tools +---------------- + +The following list consists of tools that help production related +activities using the Yocto Project: + +- *Auto Upgrade Helper:* This utility when used in conjunction with the + :term:`OpenEmbedded Build System` + (BitBake and + OE-Core) automatically generates upgrades for recipes that are based + on new versions of the recipes published upstream. See + :ref:`dev-manual/dev-manual-common-tasks:using the auto upgrade helper (auh)` + for how to set it up. + +- *Recipe Reporting System:* The Recipe Reporting System tracks recipe + versions available for Yocto Project. The main purpose of the system + is to help you manage the recipes you maintain and to offer a dynamic + overview of the project. The Recipe Reporting System is built on top + of the `OpenEmbedded Layer + Index `__, which + is a website that indexes OpenEmbedded-Core layers. + +- *Patchwork:* `Patchwork `__ + is a fork of a project originally started by + `OzLabs `__. The project is a web-based tracking + system designed to streamline the process of bringing contributions + into a project. The Yocto Project uses Patchwork as an organizational + tool to handle patches, which number in the thousands for every + release. + +- *AutoBuilder:* AutoBuilder is a project that automates build tests + and quality assurance (QA). By using the public AutoBuilder, anyone + can determine the status of the current "master" branch of Poky. + + .. note:: + + AutoBuilder is based on buildbot. + + A goal of the Yocto Project is to lead the open source industry with + a project that automates testing and QA procedures. In doing so, the + project encourages a development community that publishes QA and test + plans, publicly demonstrates QA and test plans, and encourages + development of tools that automate and test and QA procedures for the + benefit of the development community. + + You can learn more about the AutoBuilder used by the Yocto Project + Autobuilder :doc:`here <../test-manual/test-manual-understand-autobuilder>`. + +- *Cross-Prelink:* Prelinking is the process of pre-computing the load + addresses and link tables generated by the dynamic linker as compared + to doing this at runtime. Doing this ahead of time results in + performance improvements when the application is launched and reduced + memory usage for libraries shared by many applications. + + Historically, cross-prelink is a variant of prelink, which was + conceived by `Jakub + Jelínek `__ a number of + years ago. Both prelink and cross-prelink are maintained in the same + repository albeit on separate branches. By providing an emulated + runtime dynamic linker (i.e. ``glibc``-derived ``ld.so`` emulation), + the cross-prelink project extends the prelink software's ability to + prelink a sysroot environment. Additionally, the cross-prelink + software enables the ability to work in sysroot style environments. + + The dynamic linker determines standard load address calculations + based on a variety of factors such as mapping addresses, library + usage, and library function conflicts. The prelink tool uses this + information, from the dynamic linker, to determine unique load + addresses for executable and linkable format (ELF) binaries that are + shared libraries and dynamically linked. The prelink tool modifies + these ELF binaries with the pre-computed information. The result is + faster loading and often lower memory consumption because more of the + library code can be re-used from shared Copy-On-Write (COW) pages. + + The original upstream prelink project only supports running prelink + on the end target device due to the reliance on the target device's + dynamic linker. This restriction causes issues when developing a + cross-compiled system. The cross-prelink adds a synthesized dynamic + loader that runs on the host, thus permitting cross-prelinking + without ever having to run on a read-write target filesystem. + +- *Pseudo:* Pseudo is the Yocto Project implementation of + `fakeroot `__, which is used to run + commands in an environment that seemingly has root privileges. + + During a build, it can be necessary to perform operations that + require system administrator privileges. For example, file ownership + or permissions might need definition. Pseudo is a tool that you can + either use directly or through the environment variable + ``LD_PRELOAD``. Either method allows these operations to succeed as + if system administrator privileges exist even when they do not. + + You can read more about Pseudo in the "`Fakeroot and + Pseudo <#fakeroot-and-pseudo>`__" section. + +.. _gs-openembedded-build-system: + +Open-Embedded Build System Components +------------------------------------- + +The following list consists of components associated with the +:term:`OpenEmbedded Build System`: + +- *BitBake:* BitBake is a core component of the Yocto Project and is + used by the OpenEmbedded build system to build images. While BitBake + is key to the build system, BitBake is maintained separately from the + Yocto Project. + + BitBake is a generic task execution engine that allows shell and + Python tasks to be run efficiently and in parallel while working + within complex inter-task dependency constraints. In short, BitBake + is a build engine that works through recipes written in a specific + format in order to perform sets of tasks. + + You can learn more about BitBake in the :doc:`BitBake User + Manual `. + +- *OpenEmbedded-Core:* OpenEmbedded-Core (OE-Core) is a common layer of + metadata (i.e. recipes, classes, and associated files) used by + OpenEmbedded-derived systems, which includes the Yocto Project. The + Yocto Project and the OpenEmbedded Project both maintain the + OpenEmbedded-Core. You can find the OE-Core metadata in the Yocto + Project :yocto_git:`Source Repositories `. + + Historically, the Yocto Project integrated the OE-Core metadata + throughout the Yocto Project source repository reference system + (Poky). After Yocto Project Version 1.0, the Yocto Project and + OpenEmbedded agreed to work together and share a common core set of + metadata (OE-Core), which contained much of the functionality + previously found in Poky. This collaboration achieved a long-standing + OpenEmbedded objective for having a more tightly controlled and + quality-assured core. The results also fit well with the Yocto + Project objective of achieving a smaller number of fully featured + tools as compared to many different ones. + + Sharing a core set of metadata results in Poky as an integration + layer on top of OE-Core. You can see that in this + `figure <#yp-key-dev-elements>`__. The Yocto Project combines various + components such as BitBake, OE-Core, script "glue", and documentation + for its build system. + +.. _gs-reference-distribution-poky: + +Reference Distribution (Poky) +----------------------------- + +Poky is the Yocto Project reference distribution. It contains the +:term:`OpenEmbedded Build System` +(BitBake and OE-Core) as well as a set of metadata to get you started +building your own distribution. See the +`figure <#what-is-the-yocto-project>`__ in "What is the Yocto Project?" +section for an illustration that shows Poky and its relationship with +other parts of the Yocto Project. + +To use the Yocto Project tools and components, you can download +(``clone``) Poky and use it to bootstrap your own distribution. + +.. note:: + + Poky does not contain binary files. It is a working example of how to + build your own custom Linux distribution from source. + +You can read more about Poky in the "`Reference Embedded Distribution +(Poky) <#reference-embedded-distribution>`__" section. + +.. _gs-packages-for-finished-targets: + +Packages for Finished Targets +----------------------------- + +The following lists components associated with packages for finished +targets: + +- *Matchbox:* Matchbox is an Open Source, base environment for the X + Window System running on non-desktop, embedded platforms such as + handhelds, set-top boxes, kiosks, and anything else for which screen + space, input mechanisms, or system resources are limited. + + Matchbox consists of a number of interchangeable and optional + applications that you can tailor to a specific, non-desktop platform + to enhance usability in constrained environments. + + You can find the Matchbox source in the Yocto Project + :yocto_git:`Source Repositories <>`. + +- *Opkg:* Open PacKaGe management (opkg) is a lightweight package + management system based on the itsy package (ipkg) management system. + Opkg is written in C and resembles Advanced Package Tool (APT) and + Debian Package (dpkg) in operation. + + Opkg is intended for use on embedded Linux devices and is used in + this capacity in the + `OpenEmbedded `__ and + `OpenWrt `__ projects, as well as the Yocto + Project. + + .. note:: + + As best it can, opkg maintains backwards compatibility with ipkg + and conforms to a subset of Debian's policy manual regarding + control files. + + You can find the opkg source in the Yocto Project + :yocto_git:`Source Repositories <>`. + +.. _gs-archived-components: + +Archived Components +------------------- + +The Build Appliance is a virtual machine image that enables you to build +and boot a custom embedded Linux image with the Yocto Project using a +non-Linux development system. + +Historically, the Build Appliance was the second of three methods by +which you could use the Yocto Project on a system that was not native to +Linux. + +1. *Hob:* Hob, which is now deprecated and is no longer available since + the 2.1 release of the Yocto Project provided a rudimentary, + GUI-based interface to the Yocto Project. Toaster has fully replaced + Hob. + +2. *Build Appliance:* Post Hob, the Build Appliance became available. It + was never recommended that you use the Build Appliance as a + day-to-day production development environment with the Yocto Project. + Build Appliance was useful as a way to try out development in the + Yocto Project environment. + +3. *CROPS:* The final and best solution available now for developing + using the Yocto Project on a system not native to Linux is with + `CROPS <#gs-crops-overview>`__. + +.. _gs-development-methods: + +Development Methods +=================== + +The Yocto Project development environment usually involves a +:term:`Build Host` and target +hardware. You use the Build Host to build images and develop +applications, while you use the target hardware to test deployed +software. + +This section provides an introduction to the choices or development +methods you have when setting up your Build Host. Depending on the your +particular workflow preference and the type of operating system your +Build Host runs, several choices exist that allow you to use the Yocto +Project. + +.. note:: + + For additional detail about the Yocto Project development + environment, see the ":doc:`overview-manual-development-environment`" + chapter. + +- *Native Linux Host:* By far the best option for a Build Host. A + system running Linux as its native operating system allows you to + develop software by directly using the + :term:`BitBake` tool. You can + accomplish all aspects of development from a familiar shell of a + supported Linux distribution. + + For information on how to set up a Build Host on a system running + Linux as its native operating system, see the + ":ref:`dev-manual/dev-manual-start:setting up a native linux host`" + section in the Yocto Project Development Tasks Manual. + +- *CROss PlatformS (CROPS):* Typically, you use + `CROPS `__, which leverages + `Docker Containers `__, to set up a Build + Host that is not running Linux (e.g. Microsoft Windows or macOS). + + .. note:: + + You can, however, use CROPS on a Linux-based system. + + CROPS is an open source, cross-platform development framework that + provides an easily managed, extensible environment for building + binaries targeted for a variety of architectures on Windows, macOS, + or Linux hosts. Once the Build Host is set up using CROPS, you can + prepare a shell environment to mimic that of a shell being used on a + system natively running Linux. + + For information on how to set up a Build Host with CROPS, see the + ":ref:`dev-manual/dev-manual-start:setting up to use cross platforms (crops)`" + section in the Yocto Project Development Tasks Manual. + +- *Windows Subsystem For Linux (WSLv2):* You may use Windows Subsystem + For Linux v2 to set up a build host using Windows 10. + + .. note:: + + The Yocto Project is not compatible with WSLv1, it is compatible + but not officially supported nor validated with WSLv2, if you + still decide to use WSL please upgrade to WSLv2. + + The Windows Subsystem For Linux allows Windows 10 to run a real Linux + kernel inside of a lightweight utility virtual machine (VM) using + virtualization technology. + + For information on how to set up a Build Host with WSLv2, see the + ":ref:`dev-manual/dev-manual-start:setting up to use windows subsystem for linux (wslv2)`" + section in the Yocto Project Development Tasks Manual. + +- *Toaster:* Regardless of what your Build Host is running, you can use + Toaster to develop software using the Yocto Project. Toaster is a web + interface to the Yocto Project's :term:`OpenEmbedded Build System`. + The interface + enables you to configure and run your builds. Information about + builds is collected and stored in a database. You can use Toaster to + configure and start builds on multiple remote build servers. + + For information about and how to use Toaster, see the + :doc:`../toaster-manual/toaster-manual`. + +.. _reference-embedded-distribution: + +Reference Embedded Distribution (Poky) +====================================== + +"Poky", which is pronounced *Pock*-ee, is the name of the Yocto +Project's reference distribution or Reference OS Kit. Poky contains the +:term:`OpenEmbedded Build System` +(:term:`BitBake` and +:term:`OpenEmbedded-Core (OE-Core)`) as well as a set +of :term:`Metadata` to get you started +building your own distro. In other words, Poky is a base specification +of the functionality needed for a typical embedded system as well as the +components from the Yocto Project that allow you to build a distribution +into a usable binary image. + +Poky is a combined repository of BitBake, OpenEmbedded-Core (which is +found in ``meta``), ``meta-poky``, ``meta-yocto-bsp``, and documentation +provided all together and known to work well together. You can view +these items that make up the Poky repository in the +:yocto_git:`Source Repositories `. + +.. note:: + + If you are interested in all the contents of the + poky + Git repository, see the ":ref:`ref-manual/ref-structure:top-level core components`" + section in the Yocto Project Reference Manual. + +The following figure illustrates what generally comprises Poky: + +.. image:: figures/poky-reference-distribution.png + :align: center + +- BitBake is a task executor and scheduler that is the heart of the + OpenEmbedded build system. + +- ``meta-poky``, which is Poky-specific metadata. + +- ``meta-yocto-bsp``, which are Yocto Project-specific Board Support + Packages (BSPs). + +- OpenEmbedded-Core (OE-Core) metadata, which includes shared + configurations, global variable definitions, shared classes, + packaging, and recipes. Classes define the encapsulation and + inheritance of build logic. Recipes are the logical units of software + and images to be built. + +- Documentation, which contains the Yocto Project source files used to + make the set of user manuals. + +.. note:: + + While Poky is a "complete" distribution specification and is tested + and put through QA, you cannot use it as a product "out of the box" + in its current form. + +To use the Yocto Project tools, you can use Git to clone (download) the +Poky repository then use your local copy of the reference distribution +to bootstrap your own distribution. + +.. note:: + + Poky does not contain binary files. It is a working example of how to + build your own custom Linux distribution from source. + +Poky has a regular, well established, six-month release cycle under its +own version. Major releases occur at the same time major releases (point +releases) occur for the Yocto Project, which are typically in the Spring +and Fall. For more information on the Yocto Project release schedule and +cadence, see the ":doc:`../ref-manual/ref-release-process`" chapter in the +Yocto Project Reference Manual. + +Much has been said about Poky being a "default configuration". A default +configuration provides a starting image footprint. You can use Poky out +of the box to create an image ranging from a shell-accessible minimal +image all the way up to a Linux Standard Base-compliant image that uses +a GNOME Mobile and Embedded (GMAE) based reference user interface called +Sato. + +One of the most powerful properties of Poky is that every aspect of a +build is controlled by the metadata. You can use metadata to augment +these base image types by adding metadata +`layers <#the-yocto-project-layer-model>`__ that extend functionality. +These layers can provide, for example, an additional software stack for +an image type, add a board support package (BSP) for additional +hardware, or even create a new image type. + +Metadata is loosely grouped into configuration files or package recipes. +A recipe is a collection of non-executable metadata used by BitBake to +set variables or define additional build-time tasks. A recipe contains +fields such as the recipe description, the recipe version, the license +of the package and the upstream source repository. A recipe might also +indicate that the build process uses autotools, make, distutils or any +other build process, in which case the basic functionality can be +defined by the classes it inherits from the OE-Core layer's class +definitions in ``./meta/classes``. Within a recipe you can also define +additional tasks as well as task prerequisites. Recipe syntax through +BitBake also supports both ``_prepend`` and ``_append`` operators as a +method of extending task functionality. These operators inject code into +the beginning or end of a task. For information on these BitBake +operators, see the +":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:appending and prepending (override style syntax)`" +section in the BitBake User's Manual. + +.. _openembedded-build-system-workflow: + +The OpenEmbedded Build System Workflow +====================================== + +The :term:`OpenEmbedded Build System` uses a "workflow" to +accomplish image and SDK generation. The following figure overviews that +workflow: + +.. image:: figures/YP-flow-diagram.png + :align: center + +Following is a brief summary of the "workflow": + +1. Developers specify architecture, policies, patches and configuration + details. + +2. The build system fetches and downloads the source code from the + specified location. The build system supports standard methods such + as tarballs or source code repositories systems such as Git. + +3. Once source code is downloaded, the build system extracts the sources + into a local work area where patches are applied and common steps for + configuring and compiling the software are run. + +4. The build system then installs the software into a temporary staging + area where the binary package format you select (DEB, RPM, or IPK) is + used to roll up the software. + +5. Different QA and sanity checks run throughout entire build process. + +6. After the binaries are created, the build system generates a binary + package feed that is used to create the final root file image. + +7. The build system generates the file system image and a customized + Extensible SDK (eSDK) for application development in parallel. + +For a very detailed look at this workflow, see the "`OpenEmbedded Build +System Concepts <#openembedded-build-system-build-concepts>`__" section. + +Some Basic Terms +================ + +It helps to understand some basic fundamental terms when learning the +Yocto Project. Although a list of terms exists in the ":doc:`Yocto Project +Terms <../ref-manual/ref-terms>`" section of the Yocto Project +Reference Manual, this section provides the definitions of some terms +helpful for getting started: + +- *Configuration Files:* Files that hold global definitions of + variables, user-defined variables, and hardware configuration + information. These files tell the :term:`OpenEmbedded Build System` + what to build and + what to put into the image to support a particular platform. + +- *Extensible Software Development Kit (eSDK):* A custom SDK for + application developers. This eSDK allows developers to incorporate + their library and programming changes back into the image to make + their code available to other application developers. For information + on the eSDK, see the :doc:`../sdk-manual/sdk-manual` manual. + +- *Layer:* A collection of related recipes. Layers allow you to + consolidate related metadata to customize your build. Layers also + isolate information used when building for multiple architectures. + Layers are hierarchical in their ability to override previous + specifications. You can include any number of available layers from + the Yocto Project and customize the build by adding your layers after + them. You can search the Layer Index for layers used within Yocto + Project. + + For more detailed information on layers, see the + ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" + section in the Yocto Project Development Tasks Manual. For a + discussion specifically on BSP Layers, see the + ":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto + Project Board Support Packages (BSP) Developer's Guide. + +- *Metadata:* A key element of the Yocto Project is the Metadata that + is used to construct a Linux distribution and is contained in the + files that the OpenEmbedded build system parses when building an + image. In general, Metadata includes recipes, configuration files, + and other information that refers to the build instructions + themselves, as well as the data used to control what things get built + and the effects of the build. Metadata also includes commands and + data used to indicate what versions of software are used, from where + they are obtained, and changes or additions to the software itself + (patches or auxiliary files) that are used to fix bugs or customize + the software for use in a particular situation. OpenEmbedded-Core is + an important set of validated metadata. + +- *OpenEmbedded Build System:* The terms "BitBake" and "build system" + are sometimes used for the OpenEmbedded Build System. + + BitBake is a task scheduler and execution engine that parses + instructions (i.e. recipes) and configuration data. After a parsing + phase, BitBake creates a dependency tree to order the compilation, + schedules the compilation of the included code, and finally executes + the building of the specified custom Linux image (distribution). + BitBake is similar to the ``make`` tool. + + During a build process, the build system tracks dependencies and + performs a native or cross-compilation of the package. As a first + step in a cross-build setup, the framework attempts to create a + cross-compiler toolchain (i.e. Extensible SDK) suited for the target + platform. + +- *OpenEmbedded-Core (OE-Core):* OE-Core is metadata comprised of + foundation recipes, classes, and associated files that are meant to + be common among many different OpenEmbedded-derived systems, + including the Yocto Project. OE-Core is a curated subset of an + original repository developed by the OpenEmbedded community that has + been pared down into a smaller, core set of continuously validated + recipes. The result is a tightly controlled and quality-assured core + set of recipes. + + You can see the Metadata in the ``meta`` directory of the Yocto + Project `Source + Repositories `__. + +- *Packages:* In the context of the Yocto Project, this term refers to + a recipe's packaged output produced by BitBake (i.e. a "baked + recipe"). A package is generally the compiled binaries produced from + the recipe's sources. You "bake" something by running it through + BitBake. + + It is worth noting that the term "package" can, in general, have + subtle meanings. For example, the packages referred to in the + ":ref:`ref-manual/ref-system-requirements:required packages for the build host`" + section in the Yocto Project Reference Manual are compiled binaries + that, when installed, add functionality to your Linux distribution. + + Another point worth noting is that historically within the Yocto + Project, recipes were referred to as packages - thus, the existence + of several BitBake variables that are seemingly mis-named, (e.g. + :term:`PR`, + :term:`PV`, and + :term:`PE`). + +- *Poky:* Poky is a reference embedded distribution and a reference + test configuration. Poky provides the following: + + - A base-level functional distro used to illustrate how to customize + a distribution. + + - A means by which to test the Yocto Project components (i.e. Poky + is used to validate the Yocto Project). + + - A vehicle through which you can download the Yocto Project. + + Poky is not a product level distro. Rather, it is a good starting + point for customization. + + .. note:: + + Poky is an integration layer on top of OE-Core. + +- *Recipe:* The most common form of metadata. A recipe contains a list + of settings and tasks (i.e. instructions) for building packages that + are then used to build the binary image. A recipe describes where you + get source code and which patches to apply. Recipes describe + dependencies for libraries or for other recipes as well as + configuration and compilation options. Related recipes are + consolidated into a layer. diff --git a/poky/documentation/overview-manual/overview-manual-yp-intro.xml b/poky/documentation/overview-manual/overview-manual-yp-intro.xml index 2097ed36e..a2a1f494b 100644 --- a/poky/documentation/overview-manual/overview-manual-yp-intro.xml +++ b/poky/documentation/overview-manual/overview-manual-yp-intro.xml @@ -459,7 +459,7 @@ The devtool command employs a number of sub-commands that allow you to add, modify, and upgrade recipes. - As with the OpenEmbedded build system, “recipes” + As with the OpenEmbedded build system, "recipes" represent software packages within devtool. When you use devtool add, a recipe @@ -472,7 +472,7 @@ control is used in order to allow you to make changes to the source as desired. By default, both new recipes and the source go into - a “workspace” directory under the eSDK. + a "workspace" directory under the eSDK. The devtool upgrade command updates an existing recipe so that you can build it for an updated set of source files. @@ -598,7 +598,7 @@ By providing an emulated runtime dynamic linker (i.e. glibc-derived ld.so emulation), the - cross-prelink project extends the prelink software’s + cross-prelink project extends the prelink software's ability to prelink a sysroot environment. Additionally, the cross-prelink software enables the ability to work in sysroot style environments. @@ -620,7 +620,7 @@ The original upstream prelink project only supports running prelink on the end target device - due to the reliance on the target device’s dynamic + due to the reliance on the target device's dynamic linker. This restriction causes issues when developing a cross-compiled system. @@ -713,7 +713,7 @@ You can see that in this figure. The Yocto Project combines various components such as - BitBake, OE-Core, script “glue”, and documentation + BitBake, OE-Core, script "glue", and documentation for its build system. @@ -791,7 +791,7 @@ As best it can, opkg maintains backwards compatibility with ipkg and conforms to a subset - of Debian’s policy manual regarding control files. + of Debian's policy manual regarding control files. diff --git a/poky/documentation/overview-manual/overview-manual.rst b/poky/documentation/overview-manual/overview-manual.rst new file mode 100644 index 000000000..80ce9aae7 --- /dev/null +++ b/poky/documentation/overview-manual/overview-manual.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +========================================== +Yocto Project Overview and Concepts Manual +========================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + overview-manual-intro + overview-manual-yp-intro + overview-manual-development-environment + overview-manual-concepts + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/poky.yaml b/poky/documentation/poky.yaml new file mode 100644 index 000000000..7d544b41a --- /dev/null +++ b/poky/documentation/poky.yaml @@ -0,0 +1,89 @@ +DISTRO : "3.1" +DISTRO_COMPRESSED : "31" +DISTRO_NAME_NO_CAP : "dunfell" +DISTRO_NAME : "Dunfell" +DISTRO_NAME_NO_CAP_MINUS_ONE : "zeus" +DISTRO_NAME_MINUS_ONE : "Zeus" +YOCTO_DOC_VERSION : "3.1" +YOCTO_DOC_VERSION_MINUS_ONE : "3.0.2" +DISTRO_REL_TAG : "yocto-3.1" +METAINTELVERSION : "12.0" +REL_MONTH_YEAR : "April 2020" +META_INTEL_REL_TAG : "&METAINTELVERSION;-&DISTRO_NAME_NO_CAP;-&YOCTO_DOC_VERSION;" +POKYVERSION : "23.0.0" +POKYVERSION_COMPRESSED : "2300" +YOCTO_POKY : "poky-&DISTRO_NAME_NO_CAP;-&POKYVERSION;" +COPYRIGHT_YEAR : "2010-2020" +ORGNAME : "The Yocto Project" +ORGEMAIL : "docs@lists.yoctoproject.org" +YOCTO_DL_URL : "http://downloads.yoctoproject.org" +YOCTO_HOME_URL : "http://www.yoctoproject.org" +YOCTO_LISTS_URL : "http://lists.yoctoproject.org" +YOCTO_BUGZILLA_URL : "http://bugzilla.yoctoproject.org" +YOCTO_WIKI_URL : "https://wiki.yoctoproject.org" +YOCTO_AB_URL : "http://autobuilder.yoctoproject.org" +YOCTO_GIT_URL : "http://git.yoctoproject.org" +YOCTO_ADTREPO_URL : "http://adtrepo.yoctoproject.org" +OE_HOME_URL : "http://www.openembedded.org" +OE_LISTS_URL : "http://lists.openembedded.org/mailman" +OE_DOCS_URL : "http://docs.openembedded.org" +OH_HOME_URL : "http://o-hand.com" +BITBAKE_HOME_URL : "http://developer.berlios.de/projects/bitbake/" +YOCTO_DOCS_URL : "&YOCTO_HOME_URL;/docs" +YOCTO_SOURCES_URL : "&YOCTO_HOME_URL;/sources/" +YOCTO_AB_PORT_URL : "https://autobuilder.yocto.io/" +YOCTO_AB_NIGHTLY_URL : "&YOCTO_AB_PORT_URL;/pub/nightly/" +YOCTO_POKY_URL : "&YOCTO_DL_URL;/releases/poky/" +YOCTO_RELEASE_DL_URL : "&YOCTO_DL_URL;/releases/yocto/yocto-&DISTRO;" +YOCTO_TOOLCHAIN_DL_URL : "&YOCTO_RELEASE_DL_URL;/toolchain/" +YOCTO_ADTINSTALLER_DL_URL : "&YOCTO_RELEASE_DL_URL;/adt-installer" +YOCTO_POKY_DL_URL : "&YOCTO_RELEASE_DL_URL;/&YOCTO_POKY;.tar.bz2" +YOCTO_MACHINES_DL_URL : "&YOCTO_RELEASE_DL_URL;/machines" +YOCTO_QEMU_DL_URL : "&YOCTO_MACHINES_DL_URL;/qemu" +YOCTO_PYTHON-i686_DL_URL : "&YOCTO_DL_URL;/releases/miscsupport/python-nativesdk-standalone-i686.tar.bz2" +YOCTO_PYTHON-x86_64_DL_URL : "&YOCTO_DL_URL;/releases/miscsupport/python-nativesdk-standalone-x86_64.tar.bz2" +YOCTO_DOCS_QS_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/yocto-project-qs/yocto-project-qs.html" +YOCTO_DOCS_ADT_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/adt-manual/adt-manual.html" +YOCTO_DOCS_REF_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/ref-manual/ref-manual.html" +YOCTO_DOCS_BSP_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/bsp-guide/bsp-guide.html" +YOCTO_DOCS_DEV_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/dev-manual/dev-manual.html" +YOCTO_DOCS_KERNEL_DEV_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/kernel-dev/kernel-dev.html" +YOCTO_DOCS_PROF_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/profile-manual/profile-manual.html" +YOCTO_DOCS_MM_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/mega-manual/mega-manual.html" +YOCTO_DOCS_BB_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/bitbake-user-manual/bitbake-user-manual.html" +YOCTO_DOCS_TOAST_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/toaster-manual/toaster-manual.html" +YOCTO_DOCS_SDK_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/sdk-manual/sdk-manual.html" +YOCTO_DOCS_OM_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/overview-manual/overview-manual.html" +YOCTO_DOCS_BRIEF_URL : "&YOCTO_DOCS_URL;/&YOCTO_DOC_VERSION;/brief-yoctoprojectqs/brief-yoctoprojectqs.html" +YOCTO_ADTPATH_DIR : "/opt/poky/&DISTRO;" +YOCTO_POKY_TARBALL : "&YOCTO_POKY;.tar.bz2" +OE_INIT_PATH : "&YOCTO_POKY;/oe-init-build-env" +OE_INIT_FILE : "oe-init-build-env" +UBUNTU_HOST_PACKAGES_ESSENTIAL : "gawk wget git-core diffstat unzip texinfo gcc-multilib \ + build-essential chrpath socat cpio python3 python3-pip python3-pexpect \ + xz-utils debianutils iputils-ping python3-git python3-jinja2 libegl1-mesa libsdl1.2-dev \ + pylint3 xterm python3-subunit mesa-common-dev" +FEDORA_HOST_PACKAGES_ESSENTIAL : "gawk make wget tar bzip2 gzip python3 unzip perl patch \ + diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath \ + ccache perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue perl-bignum socat \ + python3-pexpect findutils which file cpio python python3-pip xz python3-GitPython \ + python3-jinja2 SDL-devel xterm rpcgen mesa-libGL-devel" +OPENSUSE_HOST_PACKAGES_ESSENTIAL : "python gcc gcc-c++ git chrpath make wget python-xml \ + diffstat makeinfo python-curses patch socat python3 python3-curses tar python3-pip \ + python3-pexpect xz which python3-Jinja2 Mesa-libEGL1 libSDL-devel xterm rpcgen Mesa-dri-devel + $ sudo pip3 install GitPython" +CENTOS7_HOST_PACKAGES_ESSENTIAL : "-y epel-release + $ sudo yum makecache + $ sudo yum install gawk make wget tar bzip2 gzip python3 unzip perl patch \ + diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath socat \ + perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue python36-pip xz \ + which SDL-devel xterm mesa-libGL-devel + $ sudo pip3 install GitPython jinja2" +CENTOS8_HOST_PACKAGES_ESSENTIAL : "-y epel-release + $ sudo dnf config-manager --set-enabled PowerTools + $ sudo dnf makecache + $ sudo dnf install gawk make wget tar bzip2 gzip python3 unzip perl patch \ + diffutils diffstat git cpp gcc gcc-c++ glibc-devel texinfo chrpath ccache \ + socat perl-Data-Dumper perl-Text-ParseWords perl-Thread-Queue python3-pip \ + python3-GitPython python3-jinja2 python3-pexpect xz which SDL-devel xterm \ + rpcgen mesa-libGL-devel" diff --git a/poky/documentation/profile-manual/history.rst b/poky/documentation/profile-manual/history.rst new file mode 100644 index 000000000..3ffb7eacb --- /dev/null +++ b/poky/documentation/profile-manual/history.rst @@ -0,0 +1,58 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 1.4 + - April 2013 + - The initial document released with the Yocto Project 1.4 Release + * - 1.5 + - October 2013 + - Released with the Yocto Project 1.5 Release. + * - 1.6 + - April 2014 + - Released with the Yocto Project 1.6 Release. + * - 1.7 + - October 2014 + - Released with the Yocto Project 1.7 Release. + * - 1.8 + - April 2015 + - Released with the Yocto Project 1.8 Release. + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/profile-manual/profile-manual-arch.rst b/poky/documentation/profile-manual/profile-manual-arch.rst new file mode 100644 index 000000000..9e1e400e4 --- /dev/null +++ b/poky/documentation/profile-manual/profile-manual-arch.rst @@ -0,0 +1,29 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************************************************* +Overall Architecture of the Linux Tracing and Profiling Tools +************************************************************* + +Architecture of the Tracing and Profiling Tools +=============================================== + +It may seem surprising to see a section covering an 'overall +architecture' for what seems to be a random collection of tracing tools +that together make up the Linux tracing and profiling space. The fact +is, however, that in recent years this seemingly disparate set of tools +has started to converge on a 'core' set of underlying mechanisms: + +- static tracepoints +- dynamic tracepoints + + - kprobes + - uprobes + +- the perf_events subsystem +- debugfs + +.. admonition:: Tying it Together + + Rather than enumerating here how each tool makes use of these common + mechanisms, textboxes like this will make note of the specific usages + in each tool as they come up in the course of the text. diff --git a/poky/documentation/profile-manual/profile-manual-examples.rst b/poky/documentation/profile-manual/profile-manual-examples.rst new file mode 100644 index 000000000..32ccd37b8 --- /dev/null +++ b/poky/documentation/profile-manual/profile-manual-examples.rst @@ -0,0 +1,24 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************* +Real-World Examples +******************* + +| + +This chapter contains real-world examples. + +Slow Write Speed on Live Images +=============================== + +In one of our previous releases (denzil), users noticed that booting off +of a live image and writing to disk was noticeably slower. This included +the boot itself, especially the first one, since first boots tend to do +a significant amount of writing due to certain post-install scripts. + +The problem (and solution) was discovered by using the Yocto tracing +tools, in this case 'perf stat', 'perf script', 'perf record' and 'perf +report'. + +See all the unvarnished details of how this bug was diagnosed and solved +here: Yocto Bug #3049 diff --git a/poky/documentation/profile-manual/profile-manual-intro.rst b/poky/documentation/profile-manual/profile-manual-intro.rst new file mode 100644 index 000000000..994b1c508 --- /dev/null +++ b/poky/documentation/profile-manual/profile-manual-intro.rst @@ -0,0 +1,79 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************************************** +Yocto Project Profiling and Tracing Manual +****************************************** + +.. _profile-intro: + +Introduction +============ + +Yocto bundles a number of tracing and profiling tools - this 'HOWTO' +describes their basic usage and shows by example how to make use of them +to examine application and system behavior. + +The tools presented are for the most part completely open-ended and have +quite good and/or extensive documentation of their own which can be used +to solve just about any problem you might come across in Linux. Each +section that describes a particular tool has links to that tool's +documentation and website. + +The purpose of this 'HOWTO' is to present a set of common and generally +useful tracing and profiling idioms along with their application (as +appropriate) to each tool, in the context of a general-purpose +'drill-down' methodology that can be applied to solving a large number +(90%?) of problems. For help with more advanced usages and problems, +please see the documentation and/or websites listed for each tool. + +The final section of this 'HOWTO' is a collection of real-world examples +which we'll be continually adding to as we solve more problems using the +tools - feel free to add your own examples to the list! + +.. _profile-manual-general-setup: + +General Setup +============= + +Most of the tools are available only in 'sdk' images or in images built +after adding 'tools-profile' to your local.conf. So, in order to be able +to access all of the tools described here, please first build and boot +an 'sdk' image e.g. :: + + $ bitbake core-image-sato-sdk + +or alternatively by adding 'tools-profile' to the EXTRA_IMAGE_FEATURES line in +your local.conf: :: + + EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile" + +If you use the 'tools-profile' method, you don't need to build an sdk image - +the tracing and profiling tools will be included in non-sdk images as well e.g.: :: + + $ bitbake core-image-sato + +.. note:: + + By default, the Yocto build system strips symbols from the binaries + it packages, which makes it difficult to use some of the tools. + + You can prevent that by setting the + :term:`INHIBIT_PACKAGE_STRIP` + variable to "1" in your ``local.conf`` when you build the image: :: + + INHIBIT_PACKAGE_STRIP = "1" + + The above setting will noticeably increase the size of your image. + +If you've already built a stripped image, you can generate debug +packages (xxx-dbg) which you can manually install as needed. + +To generate debug info for packages, you can add dbg-pkgs to +EXTRA_IMAGE_FEATURES in local.conf. For example: :: + + EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile dbg-pkgs" + +Additionally, in order to generate the right type of debuginfo, we also need to +set :term:`PACKAGE_DEBUG_SPLIT_STYLE` in the ``local.conf`` file: :: + + PACKAGE_DEBUG_SPLIT_STYLE = 'debug-file-directory' diff --git a/poky/documentation/profile-manual/profile-manual-usage.rst b/poky/documentation/profile-manual/profile-manual-usage.rst new file mode 100644 index 000000000..32b04f6ff --- /dev/null +++ b/poky/documentation/profile-manual/profile-manual-usage.rst @@ -0,0 +1,2624 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK +.. highlight:: shell + +*************************************************************** +Basic Usage (with examples) for each of the Yocto Tracing Tools +*************************************************************** + +| + +This chapter presents basic usage examples for each of the tracing +tools. + +.. _profile-manual-perf: + +perf +==== + +The 'perf' tool is the profiling and tracing tool that comes bundled +with the Linux kernel. + +Don't let the fact that it's part of the kernel fool you into thinking +that it's only for tracing and profiling the kernel - you can indeed use +it to trace and profile just the kernel, but you can also use it to +profile specific applications separately (with or without kernel +context), and you can also use it to trace and profile the kernel and +all applications on the system simultaneously to gain a system-wide view +of what's going on. + +In many ways, perf aims to be a superset of all the tracing and +profiling tools available in Linux today, including all the other tools +covered in this HOWTO. The past couple of years have seen perf subsume a +lot of the functionality of those other tools and, at the same time, +those other tools have removed large portions of their previous +functionality and replaced it with calls to the equivalent functionality +now implemented by the perf subsystem. Extrapolation suggests that at +some point those other tools will simply become completely redundant and +go away; until then, we'll cover those other tools in these pages and in +many cases show how the same things can be accomplished in perf and the +other tools when it seems useful to do so. + +The coverage below details some of the most common ways you'll likely +want to apply the tool; full documentation can be found either within +the tool itself or in the man pages at +`perf(1) `__. + +.. _perf-setup: + +Perf Setup +---------- + +For this section, we'll assume you've already performed the basic setup +outlined in the ":ref:`profile-manual/profile-manual-intro:General Setup`" section. + +In particular, you'll get the most mileage out of perf if you profile an +image built with the following in your ``local.conf`` file: :: + + INHIBIT_PACKAGE_STRIP = "1" + +perf runs on the target system for the most part. You can archive +profile data and copy it to the host for analysis, but for the rest of +this document we assume you've ssh'ed to the host and will be running +the perf commands on the target. + +.. _perf-basic-usage: + +Basic Perf Usage +---------------- + +The perf tool is pretty much self-documenting. To remind yourself of the +available commands, simply type 'perf', which will show you basic usage +along with the available perf subcommands: :: + + root@crownbay:~# perf + + usage: perf [--version] [--help] COMMAND [ARGS] + + The most commonly used perf commands are: + annotate Read perf.data (created by perf record) and display annotated code + archive Create archive with object files with build-ids found in perf.data file + bench General framework for benchmark suites + buildid-cache Manage build-id cache. + buildid-list List the buildids in a perf.data file + diff Read two perf.data files and display the differential profile + evlist List the event names in a perf.data file + inject Filter to augment the events stream with additional information + kmem Tool to trace/measure kernel memory(slab) properties + kvm Tool to trace/measure kvm guest os + list List all symbolic event types + lock Analyze lock events + probe Define new dynamic tracepoints + record Run a command and record its profile into perf.data + report Read perf.data (created by perf record) and display the profile + sched Tool to trace/measure scheduler properties (latencies) + script Read perf.data (created by perf record) and display trace output + stat Run a command and gather performance counter statistics + test Runs sanity tests. + timechart Tool to visualize total system behavior during a workload + top System profiling tool. + + See 'perf help COMMAND' for more information on a specific command. + + +Using perf to do Basic Profiling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As a simple test case, we'll profile the 'wget' of a fairly large file, +which is a minimally interesting case because it has both file and +network I/O aspects, and at least in the case of standard Yocto images, +it's implemented as part of busybox, so the methods we use to analyze it +can be used in a very similar way to the whole host of supported busybox +applets in Yocto. :: + + root@crownbay:~# rm linux-2.6.19.2.tar.bz2; \ + wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + +The quickest and easiest way to get some basic overall data about what's +going on for a particular workload is to profile it using 'perf stat'. +'perf stat' basically profiles using a few default counters and displays +the summed counts at the end of the run: :: + + root@crownbay:~# perf stat wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% |***************************************************| 41727k 0:00:00 ETA + + Performance counter stats for 'wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2': + + 4597.223902 task-clock # 0.077 CPUs utilized + 23568 context-switches # 0.005 M/sec + 68 CPU-migrations # 0.015 K/sec + 241 page-faults # 0.052 K/sec + 3045817293 cycles # 0.663 GHz + stalled-cycles-frontend + stalled-cycles-backend + 858909167 instructions # 0.28 insns per cycle + 165441165 branches # 35.987 M/sec + 19550329 branch-misses # 11.82% of all branches + + 59.836627620 seconds time elapsed + +Many times such a simple-minded test doesn't yield much of +interest, but sometimes it does (see Real-world Yocto bug (slow +loop-mounted write speed)). + +Also, note that 'perf stat' isn't restricted to a fixed set of counters +- basically any event listed in the output of 'perf list' can be tallied +by 'perf stat'. For example, suppose we wanted to see a summary of all +the events related to kernel memory allocation/freeing along with cache +hits and misses: :: + + root@crownbay:~# perf stat -e kmem:* -e cache-references -e cache-misses wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% |***************************************************| 41727k 0:00:00 ETA + + Performance counter stats for 'wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2': + + 5566 kmem:kmalloc + 125517 kmem:kmem_cache_alloc + 0 kmem:kmalloc_node + 0 kmem:kmem_cache_alloc_node + 34401 kmem:kfree + 69920 kmem:kmem_cache_free + 133 kmem:mm_page_free + 41 kmem:mm_page_free_batched + 11502 kmem:mm_page_alloc + 11375 kmem:mm_page_alloc_zone_locked + 0 kmem:mm_page_pcpu_drain + 0 kmem:mm_page_alloc_extfrag + 66848602 cache-references + 2917740 cache-misses # 4.365 % of all cache refs + + 44.831023415 seconds time elapsed + +So 'perf stat' gives us a nice easy +way to get a quick overview of what might be happening for a set of +events, but normally we'd need a little more detail in order to +understand what's going on in a way that we can act on in a useful way. + +To dive down into a next level of detail, we can use 'perf record'/'perf +report' which will collect profiling data and present it to use using an +interactive text-based UI (or simply as text if we specify --stdio to +'perf report'). + +As our first attempt at profiling this workload, we'll simply run 'perf +record', handing it the workload we want to profile (everything after +'perf record' and any perf options we hand it - here none - will be +executed in a new shell). perf collects samples until the process exits +and records them in a file named 'perf.data' in the current working +directory. :: + + root@crownbay:~# perf record wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% |************************************************| 41727k 0:00:00 ETA + [ perf record: Woken up 1 times to write data ] + [ perf record: Captured and wrote 0.176 MB perf.data (~7700 samples) ] + +To see the results in a +'text-based UI' (tui), simply run 'perf report', which will read the +perf.data file in the current working directory and display the results +in an interactive UI: :: + + root@crownbay:~# perf report + +.. image:: figures/perf-wget-flat-stripped.png + :align: center + +The above screenshot displays a 'flat' profile, one entry for each +'bucket' corresponding to the functions that were profiled during the +profiling run, ordered from the most popular to the least (perf has +options to sort in various orders and keys as well as display entries +only above a certain threshold and so on - see the perf documentation +for details). Note that this includes both userspace functions (entries +containing a [.]) and kernel functions accounted to the process (entries +containing a [k]). (perf has command-line modifiers that can be used to +restrict the profiling to kernel or userspace, among others). + +Notice also that the above report shows an entry for 'busybox', which is +the executable that implements 'wget' in Yocto, but that instead of a +useful function name in that entry, it displays a not-so-friendly hex +value instead. The steps below will show how to fix that problem. + +Before we do that, however, let's try running a different profile, one +which shows something a little more interesting. The only difference +between the new profile and the previous one is that we'll add the -g +option, which will record not just the address of a sampled function, +but the entire callchain to the sampled function as well: :: + + root@crownbay:~# perf record -g wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% |************************************************| 41727k 0:00:00 ETA + [ perf record: Woken up 3 times to write data ] + [ perf record: Captured and wrote 0.652 MB perf.data (~28476 samples) ] + + + root@crownbay:~# perf report + +.. image:: figures/perf-wget-g-copy-to-user-expanded-stripped.png + :align: center + +Using the callgraph view, we can actually see not only which functions +took the most time, but we can also see a summary of how those functions +were called and learn something about how the program interacts with the +kernel in the process. + +Notice that each entry in the above screenshot now contains a '+' on the +left-hand side. This means that we can expand the entry and drill down +into the callchains that feed into that entry. Pressing 'enter' on any +one of them will expand the callchain (you can also press 'E' to expand +them all at the same time or 'C' to collapse them all). + +In the screenshot above, we've toggled the ``__copy_to_user_ll()`` entry +and several subnodes all the way down. This lets us see which callchains +contributed to the profiled ``__copy_to_user_ll()`` function which +contributed 1.77% to the total profile. + +As a bit of background explanation for these callchains, think about +what happens at a high level when you run wget to get a file out on the +network. Basically what happens is that the data comes into the kernel +via the network connection (socket) and is passed to the userspace +program 'wget' (which is actually a part of busybox, but that's not +important for now), which takes the buffers the kernel passes to it and +writes it to a disk file to save it. + +The part of this process that we're looking at in the above call stacks +is the part where the kernel passes the data it's read from the socket +down to wget i.e. a copy-to-user. + +Notice also that here there's also a case where the hex value is +displayed in the callstack, here in the expanded ``sys_clock_gettime()`` +function. Later we'll see it resolve to a userspace function call in +busybox. + +.. image:: figures/perf-wget-g-copy-from-user-expanded-stripped.png + :align: center + +The above screenshot shows the other half of the journey for the data - +from the wget program's userspace buffers to disk. To get the buffers to +disk, the wget program issues a ``write(2)``, which does a ``copy-from-user`` to +the kernel, which then takes care via some circuitous path (probably +also present somewhere in the profile data), to get it safely to disk. + +Now that we've seen the basic layout of the profile data and the basics +of how to extract useful information out of it, let's get back to the +task at hand and see if we can get some basic idea about where the time +is spent in the program we're profiling, wget. Remember that wget is +actually implemented as an applet in busybox, so while the process name +is 'wget', the executable we're actually interested in is busybox. So +let's expand the first entry containing busybox: + +.. image:: figures/perf-wget-busybox-expanded-stripped.png + :align: center + +Again, before we expanded we saw that the function was labeled with a +hex value instead of a symbol as with most of the kernel entries. +Expanding the busybox entry doesn't make it any better. + +The problem is that perf can't find the symbol information for the +busybox binary, which is actually stripped out by the Yocto build +system. + +One way around that is to put the following in your ``local.conf`` file +when you build the image: :: + + INHIBIT_PACKAGE_STRIP = "1" + +However, we already have an image with the binaries stripped, so +what can we do to get perf to resolve the symbols? Basically we need to +install the debuginfo for the busybox package. + +To generate the debug info for the packages in the image, we can add +``dbg-pkgs`` to :term:`EXTRA_IMAGE_FEATURES` in ``local.conf``. For example: :: + + EXTRA_IMAGE_FEATURES = "debug-tweaks tools-profile dbg-pkgs" + +Additionally, in order to generate the type of debuginfo that perf +understands, we also need to set +:term:`PACKAGE_DEBUG_SPLIT_STYLE` +in the ``local.conf`` file: :: + + PACKAGE_DEBUG_SPLIT_STYLE = 'debug-file-directory' + +Once we've done that, we can install the +debuginfo for busybox. The debug packages once built can be found in +``build/tmp/deploy/rpm/*`` on the host system. Find the busybox-dbg-...rpm +file and copy it to the target. For example: :: + + [trz@empanada core2]$ scp /home/trz/yocto/crownbay-tracing-dbg/build/tmp/deploy/rpm/core2_32/busybox-dbg-1.20.2-r2.core2_32.rpm root@192.168.1.31: + busybox-dbg-1.20.2-r2.core2_32.rpm 100% 1826KB 1.8MB/s 00:01 + +Now install the debug rpm on the target: :: + + root@crownbay:~# rpm -i busybox-dbg-1.20.2-r2.core2_32.rpm + +Now that the debuginfo is installed, we see that the busybox entries now display +their functions symbolically: + +.. image:: figures/perf-wget-busybox-debuginfo.png + :align: center + +If we expand one of the entries and press 'enter' on a leaf node, we're +presented with a menu of actions we can take to get more information +related to that entry: + +.. image:: figures/perf-wget-busybox-dso-zoom-menu.png + :align: center + +One of these actions allows us to show a view that displays a +busybox-centric view of the profiled functions (in this case we've also +expanded all the nodes using the 'E' key): + +.. image:: figures/perf-wget-busybox-dso-zoom.png + :align: center + +Finally, we can see that now that the busybox debuginfo is installed, +the previously unresolved symbol in the ``sys_clock_gettime()`` entry +mentioned previously is now resolved, and shows that the +sys_clock_gettime system call that was the source of 6.75% of the +copy-to-user overhead was initiated by the ``handle_input()`` busybox +function: + +.. image:: figures/perf-wget-g-copy-to-user-expanded-debuginfo.png + :align: center + +At the lowest level of detail, we can dive down to the assembly level +and see which instructions caused the most overhead in a function. +Pressing 'enter' on the 'udhcpc_main' function, we're again presented +with a menu: + +.. image:: figures/perf-wget-busybox-annotate-menu.png + :align: center + +Selecting 'Annotate udhcpc_main', we get a detailed listing of +percentages by instruction for the udhcpc_main function. From the +display, we can see that over 50% of the time spent in this function is +taken up by a couple tests and the move of a constant (1) to a register: + +.. image:: figures/perf-wget-busybox-annotate-udhcpc.png + :align: center + +As a segue into tracing, let's try another profile using a different +counter, something other than the default 'cycles'. + +The tracing and profiling infrastructure in Linux has become unified in +a way that allows us to use the same tool with a completely different +set of counters, not just the standard hardware counters that +traditional tools have had to restrict themselves to (of course the +traditional tools can also make use of the expanded possibilities now +available to them, and in some cases have, as mentioned previously). + +We can get a list of the available events that can be used to profile a +workload via 'perf list': :: + + root@crownbay:~# perf list + + List of pre-defined events (to be used in -e): + cpu-cycles OR cycles [Hardware event] + stalled-cycles-frontend OR idle-cycles-frontend [Hardware event] + stalled-cycles-backend OR idle-cycles-backend [Hardware event] + instructions [Hardware event] + cache-references [Hardware event] + cache-misses [Hardware event] + branch-instructions OR branches [Hardware event] + branch-misses [Hardware event] + bus-cycles [Hardware event] + ref-cycles [Hardware event] + + cpu-clock [Software event] + task-clock [Software event] + page-faults OR faults [Software event] + minor-faults [Software event] + major-faults [Software event] + context-switches OR cs [Software event] + cpu-migrations OR migrations [Software event] + alignment-faults [Software event] + emulation-faults [Software event] + + L1-dcache-loads [Hardware cache event] + L1-dcache-load-misses [Hardware cache event] + L1-dcache-prefetch-misses [Hardware cache event] + L1-icache-loads [Hardware cache event] + L1-icache-load-misses [Hardware cache event] + . + . + . + rNNN [Raw hardware event descriptor] + cpu/t1=v1[,t2=v2,t3 ...]/modifier [Raw hardware event descriptor] + (see 'perf list --help' on how to encode it) + + mem:[:access] [Hardware breakpoint] + + sunrpc:rpc_call_status [Tracepoint event] + sunrpc:rpc_bind_status [Tracepoint event] + sunrpc:rpc_connect_status [Tracepoint event] + sunrpc:rpc_task_begin [Tracepoint event] + skb:kfree_skb [Tracepoint event] + skb:consume_skb [Tracepoint event] + skb:skb_copy_datagram_iovec [Tracepoint event] + net:net_dev_xmit [Tracepoint event] + net:net_dev_queue [Tracepoint event] + net:netif_receive_skb [Tracepoint event] + net:netif_rx [Tracepoint event] + napi:napi_poll [Tracepoint event] + sock:sock_rcvqueue_full [Tracepoint event] + sock:sock_exceed_buf_limit [Tracepoint event] + udp:udp_fail_queue_rcv_skb [Tracepoint event] + hda:hda_send_cmd [Tracepoint event] + hda:hda_get_response [Tracepoint event] + hda:hda_bus_reset [Tracepoint event] + scsi:scsi_dispatch_cmd_start [Tracepoint event] + scsi:scsi_dispatch_cmd_error [Tracepoint event] + scsi:scsi_eh_wakeup [Tracepoint event] + drm:drm_vblank_event [Tracepoint event] + drm:drm_vblank_event_queued [Tracepoint event] + drm:drm_vblank_event_delivered [Tracepoint event] + random:mix_pool_bytes [Tracepoint event] + random:mix_pool_bytes_nolock [Tracepoint event] + random:credit_entropy_bits [Tracepoint event] + gpio:gpio_direction [Tracepoint event] + gpio:gpio_value [Tracepoint event] + block:block_rq_abort [Tracepoint event] + block:block_rq_requeue [Tracepoint event] + block:block_rq_issue [Tracepoint event] + block:block_bio_bounce [Tracepoint event] + block:block_bio_complete [Tracepoint event] + block:block_bio_backmerge [Tracepoint event] + . + . + writeback:writeback_wake_thread [Tracepoint event] + writeback:writeback_wake_forker_thread [Tracepoint event] + writeback:writeback_bdi_register [Tracepoint event] + . + . + writeback:writeback_single_inode_requeue [Tracepoint event] + writeback:writeback_single_inode [Tracepoint event] + kmem:kmalloc [Tracepoint event] + kmem:kmem_cache_alloc [Tracepoint event] + kmem:mm_page_alloc [Tracepoint event] + kmem:mm_page_alloc_zone_locked [Tracepoint event] + kmem:mm_page_pcpu_drain [Tracepoint event] + kmem:mm_page_alloc_extfrag [Tracepoint event] + vmscan:mm_vmscan_kswapd_sleep [Tracepoint event] + vmscan:mm_vmscan_kswapd_wake [Tracepoint event] + vmscan:mm_vmscan_wakeup_kswapd [Tracepoint event] + vmscan:mm_vmscan_direct_reclaim_begin [Tracepoint event] + . + . + module:module_get [Tracepoint event] + module:module_put [Tracepoint event] + module:module_request [Tracepoint event] + sched:sched_kthread_stop [Tracepoint event] + sched:sched_wakeup [Tracepoint event] + sched:sched_wakeup_new [Tracepoint event] + sched:sched_process_fork [Tracepoint event] + sched:sched_process_exec [Tracepoint event] + sched:sched_stat_runtime [Tracepoint event] + rcu:rcu_utilization [Tracepoint event] + workqueue:workqueue_queue_work [Tracepoint event] + workqueue:workqueue_execute_end [Tracepoint event] + signal:signal_generate [Tracepoint event] + signal:signal_deliver [Tracepoint event] + timer:timer_init [Tracepoint event] + timer:timer_start [Tracepoint event] + timer:hrtimer_cancel [Tracepoint event] + timer:itimer_state [Tracepoint event] + timer:itimer_expire [Tracepoint event] + irq:irq_handler_entry [Tracepoint event] + irq:irq_handler_exit [Tracepoint event] + irq:softirq_entry [Tracepoint event] + irq:softirq_exit [Tracepoint event] + irq:softirq_raise [Tracepoint event] + printk:console [Tracepoint event] + task:task_newtask [Tracepoint event] + task:task_rename [Tracepoint event] + syscalls:sys_enter_socketcall [Tracepoint event] + syscalls:sys_exit_socketcall [Tracepoint event] + . + . + . + syscalls:sys_enter_unshare [Tracepoint event] + syscalls:sys_exit_unshare [Tracepoint event] + raw_syscalls:sys_enter [Tracepoint event] + raw_syscalls:sys_exit [Tracepoint event] + +.. admonition:: Tying it Together + + These are exactly the same set of events defined by the trace event + subsystem and exposed by ftrace/tracecmd/kernelshark as files in + /sys/kernel/debug/tracing/events, by SystemTap as + kernel.trace("tracepoint_name") and (partially) accessed by LTTng. + +Only a subset of these would be of interest to us when looking at this +workload, so let's choose the most likely subsystems (identified by the +string before the colon in the Tracepoint events) and do a 'perf stat' +run using only those wildcarded subsystems: :: + + root@crownbay:~# perf stat -e skb:* -e net:* -e napi:* -e sched:* -e workqueue:* -e irq:* -e syscalls:* wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + Performance counter stats for 'wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2': + + 23323 skb:kfree_skb + 0 skb:consume_skb + 49897 skb:skb_copy_datagram_iovec + 6217 net:net_dev_xmit + 6217 net:net_dev_queue + 7962 net:netif_receive_skb + 2 net:netif_rx + 8340 napi:napi_poll + 0 sched:sched_kthread_stop + 0 sched:sched_kthread_stop_ret + 3749 sched:sched_wakeup + 0 sched:sched_wakeup_new + 0 sched:sched_switch + 29 sched:sched_migrate_task + 0 sched:sched_process_free + 1 sched:sched_process_exit + 0 sched:sched_wait_task + 0 sched:sched_process_wait + 0 sched:sched_process_fork + 1 sched:sched_process_exec + 0 sched:sched_stat_wait + 2106519415641 sched:sched_stat_sleep + 0 sched:sched_stat_iowait + 147453613 sched:sched_stat_blocked + 12903026955 sched:sched_stat_runtime + 0 sched:sched_pi_setprio + 3574 workqueue:workqueue_queue_work + 3574 workqueue:workqueue_activate_work + 0 workqueue:workqueue_execute_start + 0 workqueue:workqueue_execute_end + 16631 irq:irq_handler_entry + 16631 irq:irq_handler_exit + 28521 irq:softirq_entry + 28521 irq:softirq_exit + 28728 irq:softirq_raise + 1 syscalls:sys_enter_sendmmsg + 1 syscalls:sys_exit_sendmmsg + 0 syscalls:sys_enter_recvmmsg + 0 syscalls:sys_exit_recvmmsg + 14 syscalls:sys_enter_socketcall + 14 syscalls:sys_exit_socketcall + . + . + . + 16965 syscalls:sys_enter_read + 16965 syscalls:sys_exit_read + 12854 syscalls:sys_enter_write + 12854 syscalls:sys_exit_write + . + . + . + + 58.029710972 seconds time elapsed + + + +Let's pick one of these tracepoints +and tell perf to do a profile using it as the sampling event: :: + + root@crownbay:~# perf record -g -e sched:sched_wakeup wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + +.. image:: figures/sched-wakeup-profile.png + :align: center + +The screenshot above shows the results of running a profile using +sched:sched_switch tracepoint, which shows the relative costs of various +paths to sched_wakeup (note that sched_wakeup is the name of the +tracepoint - it's actually defined just inside ttwu_do_wakeup(), which +accounts for the function name actually displayed in the profile: + +.. code-block:: c + + /* + * Mark the task runnable and perform wakeup-preemption. + */ + static void + ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) + { + trace_sched_wakeup(p, true); + . + . + . + } + +A couple of the more interesting +callchains are expanded and displayed above, basically some network +receive paths that presumably end up waking up wget (busybox) when +network data is ready. + +Note that because tracepoints are normally used for tracing, the default +sampling period for tracepoints is 1 i.e. for tracepoints perf will +sample on every event occurrence (this can be changed using the -c +option). This is in contrast to hardware counters such as for example +the default 'cycles' hardware counter used for normal profiling, where +sampling periods are much higher (in the thousands) because profiling +should have as low an overhead as possible and sampling on every cycle +would be prohibitively expensive. + +Using perf to do Basic Tracing +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Profiling is a great tool for solving many problems or for getting a +high-level view of what's going on with a workload or across the system. +It is however by definition an approximation, as suggested by the most +prominent word associated with it, 'sampling'. On the one hand, it +allows a representative picture of what's going on in the system to be +cheaply taken, but on the other hand, that cheapness limits its utility +when that data suggests a need to 'dive down' more deeply to discover +what's really going on. In such cases, the only way to see what's really +going on is to be able to look at (or summarize more intelligently) the +individual steps that go into the higher-level behavior exposed by the +coarse-grained profiling data. + +As a concrete example, we can trace all the events we think might be +applicable to our workload: :: + + root@crownbay:~# perf record -g -e skb:* -e net:* -e napi:* -e sched:sched_switch -e sched:sched_wakeup -e irq:* + -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write + wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + +We can look at the raw trace output using 'perf script' with no +arguments: :: + + root@crownbay:~# perf script + + perf 1262 [000] 11624.857082: sys_exit_read: 0x0 + perf 1262 [000] 11624.857193: sched_wakeup: comm=migration/0 pid=6 prio=0 success=1 target_cpu=000 + wget 1262 [001] 11624.858021: softirq_raise: vec=1 [action=TIMER] + wget 1262 [001] 11624.858074: softirq_entry: vec=1 [action=TIMER] + wget 1262 [001] 11624.858081: softirq_exit: vec=1 [action=TIMER] + wget 1262 [001] 11624.858166: sys_enter_read: fd: 0x0003, buf: 0xbf82c940, count: 0x0200 + wget 1262 [001] 11624.858177: sys_exit_read: 0x200 + wget 1262 [001] 11624.858878: kfree_skb: skbaddr=0xeb248d80 protocol=0 location=0xc15a5308 + wget 1262 [001] 11624.858945: kfree_skb: skbaddr=0xeb248000 protocol=0 location=0xc15a5308 + wget 1262 [001] 11624.859020: softirq_raise: vec=1 [action=TIMER] + wget 1262 [001] 11624.859076: softirq_entry: vec=1 [action=TIMER] + wget 1262 [001] 11624.859083: softirq_exit: vec=1 [action=TIMER] + wget 1262 [001] 11624.859167: sys_enter_read: fd: 0x0003, buf: 0xb7720000, count: 0x0400 + wget 1262 [001] 11624.859192: sys_exit_read: 0x1d7 + wget 1262 [001] 11624.859228: sys_enter_read: fd: 0x0003, buf: 0xb7720000, count: 0x0400 + wget 1262 [001] 11624.859233: sys_exit_read: 0x0 + wget 1262 [001] 11624.859573: sys_enter_read: fd: 0x0003, buf: 0xbf82c580, count: 0x0200 + wget 1262 [001] 11624.859584: sys_exit_read: 0x200 + wget 1262 [001] 11624.859864: sys_enter_read: fd: 0x0003, buf: 0xb7720000, count: 0x0400 + wget 1262 [001] 11624.859888: sys_exit_read: 0x400 + wget 1262 [001] 11624.859935: sys_enter_read: fd: 0x0003, buf: 0xb7720000, count: 0x0400 + wget 1262 [001] 11624.859944: sys_exit_read: 0x400 + +This gives us a detailed timestamped sequence of events that occurred within the +workload with respect to those events. + +In many ways, profiling can be viewed as a subset of tracing - +theoretically, if you have a set of trace events that's sufficient to +capture all the important aspects of a workload, you can derive any of +the results or views that a profiling run can. + +Another aspect of traditional profiling is that while powerful in many +ways, it's limited by the granularity of the underlying data. Profiling +tools offer various ways of sorting and presenting the sample data, +which make it much more useful and amenable to user experimentation, but +in the end it can't be used in an open-ended way to extract data that +just isn't present as a consequence of the fact that conceptually, most +of it has been thrown away. + +Full-blown detailed tracing data does however offer the opportunity to +manipulate and present the information collected during a tracing run in +an infinite variety of ways. + +Another way to look at it is that there are only so many ways that the +'primitive' counters can be used on their own to generate interesting +output; to get anything more complicated than simple counts requires +some amount of additional logic, which is typically very specific to the +problem at hand. For example, if we wanted to make use of a 'counter' +that maps to the value of the time difference between when a process was +scheduled to run on a processor and the time it actually ran, we +wouldn't expect such a counter to exist on its own, but we could derive +one called say 'wakeup_latency' and use it to extract a useful view of +that metric from trace data. Likewise, we really can't figure out from +standard profiling tools how much data every process on the system reads +and writes, along with how many of those reads and writes fail +completely. If we have sufficient trace data, however, we could with the +right tools easily extract and present that information, but we'd need +something other than pre-canned profiling tools to do that. + +Luckily, there is a general-purpose way to handle such needs, called +'programming languages'. Making programming languages easily available +to apply to such problems given the specific format of data is called a +'programming language binding' for that data and language. Perf supports +two programming language bindings, one for Python and one for Perl. + +.. admonition:: Tying it Together + + Language bindings for manipulating and aggregating trace data are of + course not a new idea. One of the first projects to do this was IBM's + DProbes dpcc compiler, an ANSI C compiler which targeted a low-level + assembly language running on an in-kernel interpreter on the target + system. This is exactly analogous to what Sun's DTrace did, except + that DTrace invented its own language for the purpose. Systemtap, + heavily inspired by DTrace, also created its own one-off language, + but rather than running the product on an in-kernel interpreter, + created an elaborate compiler-based machinery to translate its + language into kernel modules written in C. + +Now that we have the trace data in perf.data, we can use 'perf script +-g' to generate a skeleton script with handlers for the read/write +entry/exit events we recorded: :: + + root@crownbay:~# perf script -g python + generated Python script: perf-script.py + +The skeleton script simply creates a python function for each event type in the +perf.data file. The body of each function simply prints the event name along +with its parameters. For example: + +.. code-block:: python + + def net__netif_rx(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + skbaddr, len, name): + print_header(event_name, common_cpu, common_secs, common_nsecs, + common_pid, common_comm) + + print "skbaddr=%u, len=%u, name=%s\n" % (skbaddr, len, name), + +We can run that script directly to print all of the events contained in the +perf.data file: :: + + root@crownbay:~# perf script -s perf-script.py + + in trace_begin + syscalls__sys_exit_read 0 11624.857082795 1262 perf nr=3, ret=0 + sched__sched_wakeup 0 11624.857193498 1262 perf comm=migration/0, pid=6, prio=0, success=1, target_cpu=0 + irq__softirq_raise 1 11624.858021635 1262 wget vec=TIMER + irq__softirq_entry 1 11624.858074075 1262 wget vec=TIMER + irq__softirq_exit 1 11624.858081389 1262 wget vec=TIMER + syscalls__sys_enter_read 1 11624.858166434 1262 wget nr=3, fd=3, buf=3213019456, count=512 + syscalls__sys_exit_read 1 11624.858177924 1262 wget nr=3, ret=512 + skb__kfree_skb 1 11624.858878188 1262 wget skbaddr=3945041280, location=3243922184, protocol=0 + skb__kfree_skb 1 11624.858945608 1262 wget skbaddr=3945037824, location=3243922184, protocol=0 + irq__softirq_raise 1 11624.859020942 1262 wget vec=TIMER + irq__softirq_entry 1 11624.859076935 1262 wget vec=TIMER + irq__softirq_exit 1 11624.859083469 1262 wget vec=TIMER + syscalls__sys_enter_read 1 11624.859167565 1262 wget nr=3, fd=3, buf=3077701632, count=1024 + syscalls__sys_exit_read 1 11624.859192533 1262 wget nr=3, ret=471 + syscalls__sys_enter_read 1 11624.859228072 1262 wget nr=3, fd=3, buf=3077701632, count=1024 + syscalls__sys_exit_read 1 11624.859233707 1262 wget nr=3, ret=0 + syscalls__sys_enter_read 1 11624.859573008 1262 wget nr=3, fd=3, buf=3213018496, count=512 + syscalls__sys_exit_read 1 11624.859584818 1262 wget nr=3, ret=512 + syscalls__sys_enter_read 1 11624.859864562 1262 wget nr=3, fd=3, buf=3077701632, count=1024 + syscalls__sys_exit_read 1 11624.859888770 1262 wget nr=3, ret=1024 + syscalls__sys_enter_read 1 11624.859935140 1262 wget nr=3, fd=3, buf=3077701632, count=1024 + syscalls__sys_exit_read 1 11624.859944032 1262 wget nr=3, ret=1024 + +That in itself isn't very useful; after all, we can accomplish pretty much the +same thing by simply running 'perf script' without arguments in the same +directory as the perf.data file. + +We can however replace the print statements in the generated function +bodies with whatever we want, and thereby make it infinitely more +useful. + +As a simple example, let's just replace the print statements in the +function bodies with a simple function that does nothing but increment a +per-event count. When the program is run against a perf.data file, each +time a particular event is encountered, a tally is incremented for that +event. For example: + +.. code-block:: python + + def net__netif_rx(event_name, context, common_cpu, + common_secs, common_nsecs, common_pid, common_comm, + skbaddr, len, name): + inc_counts(event_name) + +Each event handler function in the generated code +is modified to do this. For convenience, we define a common function +called inc_counts() that each handler calls; inc_counts() simply tallies +a count for each event using the 'counts' hash, which is a specialized +hash function that does Perl-like autovivification, a capability that's +extremely useful for kinds of multi-level aggregation commonly used in +processing traces (see perf's documentation on the Python language +binding for details): + +.. code-block:: python + + counts = autodict() + + def inc_counts(event_name): + try: + counts[event_name] += 1 + except TypeError: + counts[event_name] = 1 + +Finally, at the end of the trace processing run, we want to print the +result of all the per-event tallies. For that, we use the special +'trace_end()' function: + +.. code-block:: python + + def trace_end(): + for event_name, count in counts.iteritems(): + print "%-40s %10s\n" % (event_name, count) + +The end result is a summary of all the events recorded in the trace: :: + + skb__skb_copy_datagram_iovec 13148 + irq__softirq_entry 4796 + irq__irq_handler_exit 3805 + irq__softirq_exit 4795 + syscalls__sys_enter_write 8990 + net__net_dev_xmit 652 + skb__kfree_skb 4047 + sched__sched_wakeup 1155 + irq__irq_handler_entry 3804 + irq__softirq_raise 4799 + net__net_dev_queue 652 + syscalls__sys_enter_read 17599 + net__netif_receive_skb 1743 + syscalls__sys_exit_read 17598 + net__netif_rx 2 + napi__napi_poll 1877 + syscalls__sys_exit_write 8990 + +Note that this is +pretty much exactly the same information we get from 'perf stat', which +goes a little way to support the idea mentioned previously that given +the right kind of trace data, higher-level profiling-type summaries can +be derived from it. + +Documentation on using the `'perf script' python +binding `__. + +System-Wide Tracing and Profiling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The examples so far have focused on tracing a particular program or +workload - in other words, every profiling run has specified the program +to profile in the command-line e.g. 'perf record wget ...'. + +It's also possible, and more interesting in many cases, to run a +system-wide profile or trace while running the workload in a separate +shell. + +To do system-wide profiling or tracing, you typically use the -a flag to +'perf record'. + +To demonstrate this, open up one window and start the profile using the +-a flag (press Ctrl-C to stop tracing): :: + + root@crownbay:~# perf record -g -a + ^C[ perf record: Woken up 6 times to write data ] + [ perf record: Captured and wrote 1.400 MB perf.data (~61172 samples) ] + +In another window, run the wget test: :: + + root@crownbay:~# wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2 + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA + +Here we see entries not only for our wget load, but for +other processes running on the system as well: + +.. image:: figures/perf-systemwide.png + :align: center + +In the snapshot above, we can see callchains that originate in libc, and +a callchain from Xorg that demonstrates that we're using a proprietary X +driver in userspace (notice the presence of 'PVR' and some other +unresolvable symbols in the expanded Xorg callchain). + +Note also that we have both kernel and userspace entries in the above +snapshot. We can also tell perf to focus on userspace but providing a +modifier, in this case 'u', to the 'cycles' hardware counter when we +record a profile: :: + + root@crownbay:~# perf record -g -a -e cycles:u + ^C[ perf record: Woken up 2 times to write data ] + [ perf record: Captured and wrote 0.376 MB perf.data (~16443 samples) ] + +.. image:: figures/perf-report-cycles-u.png + :align: center + +Notice in the screenshot above, we see only userspace entries ([.]) + +Finally, we can press 'enter' on a leaf node and select the 'Zoom into +DSO' menu item to show only entries associated with a specific DSO. In +the screenshot below, we've zoomed into the 'libc' DSO which shows all +the entries associated with the libc-xxx.so DSO. + +.. image:: figures/perf-systemwide-libc.png + :align: center + +We can also use the system-wide -a switch to do system-wide tracing. +Here we'll trace a couple of scheduler events: :: + + root@crownbay:~# perf record -a -e sched:sched_switch -e sched:sched_wakeup + ^C[ perf record: Woken up 38 times to write data ] + [ perf record: Captured and wrote 9.780 MB perf.data (~427299 samples) ] + +We can look at the raw output using 'perf script' with no arguments: :: + + root@crownbay:~# perf script + + perf 1383 [001] 6171.460045: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1383 [001] 6171.460066: sched_switch: prev_comm=perf prev_pid=1383 prev_prio=120 prev_state=R+ ==> next_comm=kworker/1:1 next_pid=21 next_prio=120 + kworker/1:1 21 [001] 6171.460093: sched_switch: prev_comm=kworker/1:1 prev_pid=21 prev_prio=120 prev_state=S ==> next_comm=perf next_pid=1383 next_prio=120 + swapper 0 [000] 6171.468063: sched_wakeup: comm=kworker/0:3 pid=1209 prio=120 success=1 target_cpu=000 + swapper 0 [000] 6171.468107: sched_switch: prev_comm=swapper/0 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=kworker/0:3 next_pid=1209 next_prio=120 + kworker/0:3 1209 [000] 6171.468143: sched_switch: prev_comm=kworker/0:3 prev_pid=1209 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120 + perf 1383 [001] 6171.470039: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1383 [001] 6171.470058: sched_switch: prev_comm=perf prev_pid=1383 prev_prio=120 prev_state=R+ ==> next_comm=kworker/1:1 next_pid=21 next_prio=120 + kworker/1:1 21 [001] 6171.470082: sched_switch: prev_comm=kworker/1:1 prev_pid=21 prev_prio=120 prev_state=S ==> next_comm=perf next_pid=1383 next_prio=120 + perf 1383 [001] 6171.480035: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + +.. _perf-filtering: + +Filtering +^^^^^^^^^ + +Notice that there are a lot of events that don't really have anything to +do with what we're interested in, namely events that schedule 'perf' +itself in and out or that wake perf up. We can get rid of those by using +the '--filter' option - for each event we specify using -e, we can add a +--filter after that to filter out trace events that contain fields with +specific values: :: + + root@crownbay:~# perf record -a -e sched:sched_switch --filter 'next_comm != perf && prev_comm != perf' -e sched:sched_wakeup --filter 'comm != perf' + ^C[ perf record: Woken up 38 times to write data ] + [ perf record: Captured and wrote 9.688 MB perf.data (~423279 samples) ] + + + root@crownbay:~# perf script + + swapper 0 [000] 7932.162180: sched_switch: prev_comm=swapper/0 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=kworker/0:3 next_pid=1209 next_prio=120 + kworker/0:3 1209 [000] 7932.162236: sched_switch: prev_comm=kworker/0:3 prev_pid=1209 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120 + perf 1407 [001] 7932.170048: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1407 [001] 7932.180044: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1407 [001] 7932.190038: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1407 [001] 7932.200044: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1407 [001] 7932.210044: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + perf 1407 [001] 7932.220044: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + swapper 0 [001] 7932.230111: sched_wakeup: comm=kworker/1:1 pid=21 prio=120 success=1 target_cpu=001 + swapper 0 [001] 7932.230146: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=kworker/1:1 next_pid=21 next_prio=120 + kworker/1:1 21 [001] 7932.230205: sched_switch: prev_comm=kworker/1:1 prev_pid=21 prev_prio=120 prev_state=S ==> next_comm=swapper/1 next_pid=0 next_prio=120 + swapper 0 [000] 7932.326109: sched_wakeup: comm=kworker/0:3 pid=1209 prio=120 success=1 target_cpu=000 + swapper 0 [000] 7932.326171: sched_switch: prev_comm=swapper/0 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=kworker/0:3 next_pid=1209 next_prio=120 + kworker/0:3 1209 [000] 7932.326214: sched_switch: prev_comm=kworker/0:3 prev_pid=1209 prev_prio=120 prev_state=S ==> next_comm=swapper/0 next_pid=0 next_prio=120 + +In this case, we've filtered out all events that have +'perf' in their 'comm' or 'comm_prev' or 'comm_next' fields. Notice that +there are still events recorded for perf, but notice that those events +don't have values of 'perf' for the filtered fields. To completely +filter out anything from perf will require a bit more work, but for the +purpose of demonstrating how to use filters, it's close enough. + +.. admonition:: Tying it Together + + These are exactly the same set of event filters defined by the trace + event subsystem. See the ftrace/tracecmd/kernelshark section for more + discussion about these event filters. + +.. admonition:: Tying it Together + + These event filters are implemented by a special-purpose + pseudo-interpreter in the kernel and are an integral and + indispensable part of the perf design as it relates to tracing. + kernel-based event filters provide a mechanism to precisely throttle + the event stream that appears in user space, where it makes sense to + provide bindings to real programming languages for postprocessing the + event stream. This architecture allows for the intelligent and + flexible partitioning of processing between the kernel and user + space. Contrast this with other tools such as SystemTap, which does + all of its processing in the kernel and as such requires a special + project-defined language in order to accommodate that design, or + LTTng, where everything is sent to userspace and as such requires a + super-efficient kernel-to-userspace transport mechanism in order to + function properly. While perf certainly can benefit from for instance + advances in the design of the transport, it doesn't fundamentally + depend on them. Basically, if you find that your perf tracing + application is causing buffer I/O overruns, it probably means that + you aren't taking enough advantage of the kernel filtering engine. + +Using Dynamic Tracepoints +~~~~~~~~~~~~~~~~~~~~~~~~~ + +perf isn't restricted to the fixed set of static tracepoints listed by +'perf list'. Users can also add their own 'dynamic' tracepoints anywhere +in the kernel. For instance, suppose we want to define our own +tracepoint on do_fork(). We can do that using the 'perf probe' perf +subcommand: :: + + root@crownbay:~# perf probe do_fork + Added new event: + probe:do_fork (on do_fork) + + You can now use it in all perf tools, such as: + + perf record -e probe:do_fork -aR sleep 1 + +Adding a new tracepoint via +'perf probe' results in an event with all the expected files and format +in /sys/kernel/debug/tracing/events, just the same as for static +tracepoints (as discussed in more detail in the trace events subsystem +section: :: + + root@crownbay:/sys/kernel/debug/tracing/events/probe/do_fork# ls -al + drwxr-xr-x 2 root root 0 Oct 28 11:42 . + drwxr-xr-x 3 root root 0 Oct 28 11:42 .. + -rw-r--r-- 1 root root 0 Oct 28 11:42 enable + -rw-r--r-- 1 root root 0 Oct 28 11:42 filter + -r--r--r-- 1 root root 0 Oct 28 11:42 format + -r--r--r-- 1 root root 0 Oct 28 11:42 id + + root@crownbay:/sys/kernel/debug/tracing/events/probe/do_fork# cat format + name: do_fork + ID: 944 + format: + field:unsigned short common_type; offset:0; size:2; signed:0; + field:unsigned char common_flags; offset:2; size:1; signed:0; + field:unsigned char common_preempt_count; offset:3; size:1; signed:0; + field:int common_pid; offset:4; size:4; signed:1; + field:int common_padding; offset:8; size:4; signed:1; + + field:unsigned long __probe_ip; offset:12; size:4; signed:0; + + print fmt: "(%lx)", REC->__probe_ip + +We can list all dynamic tracepoints currently in +existence: :: + + root@crownbay:~# perf probe -l + probe:do_fork (on do_fork) + probe:schedule (on schedule) + +Let's record system-wide ('sleep 30' is a +trick for recording system-wide but basically do nothing and then wake +up after 30 seconds): :: + + root@crownbay:~# perf record -g -a -e probe:do_fork sleep 30 + [ perf record: Woken up 1 times to write data ] + [ perf record: Captured and wrote 0.087 MB perf.data (~3812 samples) ] + +Using 'perf script' we can see each do_fork event that fired: :: + + root@crownbay:~# perf script + + # ======== + # captured on: Sun Oct 28 11:55:18 2012 + # hostname : crownbay + # os release : 3.4.11-yocto-standard + # perf version : 3.4.11 + # arch : i686 + # nrcpus online : 2 + # nrcpus avail : 2 + # cpudesc : Intel(R) Atom(TM) CPU E660 @ 1.30GHz + # cpuid : GenuineIntel,6,38,1 + # total memory : 1017184 kB + # cmdline : /usr/bin/perf record -g -a -e probe:do_fork sleep 30 + # event : name = probe:do_fork, type = 2, config = 0x3b0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern + = 0, id = { 5, 6 } + # HEADER_CPU_TOPOLOGY info available, use -I to display + # ======== + # + matchbox-deskto 1197 [001] 34211.378318: do_fork: (c1028460) + matchbox-deskto 1295 [001] 34211.380388: do_fork: (c1028460) + pcmanfm 1296 [000] 34211.632350: do_fork: (c1028460) + pcmanfm 1296 [000] 34211.639917: do_fork: (c1028460) + matchbox-deskto 1197 [001] 34217.541603: do_fork: (c1028460) + matchbox-deskto 1299 [001] 34217.543584: do_fork: (c1028460) + gthumb 1300 [001] 34217.697451: do_fork: (c1028460) + gthumb 1300 [001] 34219.085734: do_fork: (c1028460) + gthumb 1300 [000] 34219.121351: do_fork: (c1028460) + gthumb 1300 [001] 34219.264551: do_fork: (c1028460) + pcmanfm 1296 [000] 34219.590380: do_fork: (c1028460) + matchbox-deskto 1197 [001] 34224.955965: do_fork: (c1028460) + matchbox-deskto 1306 [001] 34224.957972: do_fork: (c1028460) + matchbox-termin 1307 [000] 34225.038214: do_fork: (c1028460) + matchbox-termin 1307 [001] 34225.044218: do_fork: (c1028460) + matchbox-termin 1307 [000] 34225.046442: do_fork: (c1028460) + matchbox-deskto 1197 [001] 34237.112138: do_fork: (c1028460) + matchbox-deskto 1311 [001] 34237.114106: do_fork: (c1028460) + gaku 1312 [000] 34237.202388: do_fork: (c1028460) + +And using 'perf report' on the same file, we can see the +callgraphs from starting a few programs during those 30 seconds: + +.. image:: figures/perf-probe-do_fork-profile.png + :align: center + +.. admonition:: Tying it Together + + The trace events subsystem accommodate static and dynamic tracepoints + in exactly the same way - there's no difference as far as the + infrastructure is concerned. See the ftrace section for more details + on the trace event subsystem. + +.. admonition:: Tying it Together + + Dynamic tracepoints are implemented under the covers by kprobes and + uprobes. kprobes and uprobes are also used by and in fact are the + main focus of SystemTap. + +.. _perf-documentation: + +Perf Documentation +------------------ + +Online versions of the man pages for the commands discussed in this +section can be found here: + +- The `'perf stat' manpage `__. + +- The `'perf record' + manpage `__. + +- The `'perf report' + manpage `__. + +- The `'perf probe' manpage `__. + +- The `'perf script' + manpage `__. + +- Documentation on using the `'perf script' python + binding `__. + +- The top-level `perf(1) manpage `__. + +Normally, you should be able to invoke the man pages via perf itself +e.g. 'perf help' or 'perf help record'. + +However, by default Yocto doesn't install man pages, but perf invokes +the man pages for most help functionality. This is a bug and is being +addressed by a Yocto bug: `Bug 3388 - perf: enable man pages for basic +'help' +functionality `__. + +The man pages in text form, along with some other files, such as a set +of examples, can be found in the 'perf' directory of the kernel tree: :: + + tools/perf/Documentation + +There's also a nice perf tutorial on the perf +wiki that goes into more detail than we do here in certain areas: `Perf +Tutorial `__ + +.. _profile-manual-ftrace: + +ftrace +====== + +'ftrace' literally refers to the 'ftrace function tracer' but in reality +this encompasses a number of related tracers along with the +infrastructure that they all make use of. + +.. _ftrace-setup: + +ftrace Setup +------------ + +For this section, we'll assume you've already performed the basic setup +outlined in the ":ref:`profile-manual/profile-manual-intro:General Setup`" section. + +ftrace, trace-cmd, and kernelshark run on the target system, and are +ready to go out-of-the-box - no additional setup is necessary. For the +rest of this section we assume you've ssh'ed to the host and will be +running ftrace on the target. kernelshark is a GUI application and if +you use the '-X' option to ssh you can have the kernelshark GUI run on +the target but display remotely on the host if you want. + +Basic ftrace usage +------------------ + +'ftrace' essentially refers to everything included in the /tracing +directory of the mounted debugfs filesystem (Yocto follows the standard +convention and mounts it at /sys/kernel/debug). Here's a listing of all +the files found in /sys/kernel/debug/tracing on a Yocto system: :: + + root@sugarbay:/sys/kernel/debug/tracing# ls + README kprobe_events trace + available_events kprobe_profile trace_clock + available_filter_functions options trace_marker + available_tracers per_cpu trace_options + buffer_size_kb printk_formats trace_pipe + buffer_total_size_kb saved_cmdlines tracing_cpumask + current_tracer set_event tracing_enabled + dyn_ftrace_total_info set_ftrace_filter tracing_on + enabled_functions set_ftrace_notrace tracing_thresh + events set_ftrace_pid + free_buffer set_graph_function + +The files listed above are used for various purposes +- some relate directly to the tracers themselves, others are used to set +tracing options, and yet others actually contain the tracing output when +a tracer is in effect. Some of the functions can be guessed from their +names, others need explanation; in any case, we'll cover some of the +files we see here below but for an explanation of the others, please see +the ftrace documentation. + +We'll start by looking at some of the available built-in tracers. + +cat'ing the 'available_tracers' file lists the set of available tracers: :: + + root@sugarbay:/sys/kernel/debug/tracing# cat available_tracers + blk function_graph function nop + +The 'current_tracer' file contains the tracer currently in effect: :: + + root@sugarbay:/sys/kernel/debug/tracing# cat current_tracer + nop + +The above listing of current_tracer shows that the +'nop' tracer is in effect, which is just another way of saying that +there's actually no tracer currently in effect. + +echo'ing one of the available_tracers into current_tracer makes the +specified tracer the current tracer: :: + + root@sugarbay:/sys/kernel/debug/tracing# echo function > current_tracer + root@sugarbay:/sys/kernel/debug/tracing# cat current_tracer + function + +The above sets the current tracer to be the 'function tracer'. This tracer +traces every function call in the kernel and makes it available as the +contents of the 'trace' file. Reading the 'trace' file lists the +currently buffered function calls that have been traced by the function +tracer: :: + + root@sugarbay:/sys/kernel/debug/tracing# cat trace | less + + # tracer: function + # + # entries-in-buffer/entries-written: 310629/766471 #P:8 + # + # _-----=> irqs-off + # / _----=> need-resched + # | / _---=> hardirq/softirq + # || / _--=> preempt-depth + # ||| / delay + # TASK-PID CPU# |||| TIMESTAMP FUNCTION + # | | | |||| | | + -0 [004] d..1 470.867169: ktime_get_real <-intel_idle + -0 [004] d..1 470.867170: getnstimeofday <-ktime_get_real + -0 [004] d..1 470.867171: ns_to_timeval <-intel_idle + -0 [004] d..1 470.867171: ns_to_timespec <-ns_to_timeval + -0 [004] d..1 470.867172: smp_apic_timer_interrupt <-apic_timer_interrupt + -0 [004] d..1 470.867172: native_apic_mem_write <-smp_apic_timer_interrupt + -0 [004] d..1 470.867172: irq_enter <-smp_apic_timer_interrupt + -0 [004] d..1 470.867172: rcu_irq_enter <-irq_enter + -0 [004] d..1 470.867173: rcu_idle_exit_common.isra.33 <-rcu_irq_enter + -0 [004] d..1 470.867173: local_bh_disable <-irq_enter + -0 [004] d..1 470.867173: add_preempt_count <-local_bh_disable + -0 [004] d.s1 470.867174: tick_check_idle <-irq_enter + -0 [004] d.s1 470.867174: tick_check_oneshot_broadcast <-tick_check_idle + -0 [004] d.s1 470.867174: ktime_get <-tick_check_idle + -0 [004] d.s1 470.867174: tick_nohz_stop_idle <-tick_check_idle + -0 [004] d.s1 470.867175: update_ts_time_stats <-tick_nohz_stop_idle + -0 [004] d.s1 470.867175: nr_iowait_cpu <-update_ts_time_stats + -0 [004] d.s1 470.867175: tick_do_update_jiffies64 <-tick_check_idle + -0 [004] d.s1 470.867175: _raw_spin_lock <-tick_do_update_jiffies64 + -0 [004] d.s1 470.867176: add_preempt_count <-_raw_spin_lock + -0 [004] d.s2 470.867176: do_timer <-tick_do_update_jiffies64 + -0 [004] d.s2 470.867176: _raw_spin_lock <-do_timer + -0 [004] d.s2 470.867176: add_preempt_count <-_raw_spin_lock + -0 [004] d.s3 470.867177: ntp_tick_length <-do_timer + -0 [004] d.s3 470.867177: _raw_spin_lock_irqsave <-ntp_tick_length + . + . + . + +Each line in the trace above shows what was happening in the kernel on a given +cpu, to the level of detail of function calls. Each entry shows the function +called, followed by its caller (after the arrow). + +The function tracer gives you an extremely detailed idea of what the +kernel was doing at the point in time the trace was taken, and is a +great way to learn about how the kernel code works in a dynamic sense. + +.. admonition:: Tying it Together + + The ftrace function tracer is also available from within perf, as the + ftrace:function tracepoint. + +It is a little more difficult to follow the call chains than it needs to +be - luckily there's a variant of the function tracer that displays the +callchains explicitly, called the 'function_graph' tracer: :: + + root@sugarbay:/sys/kernel/debug/tracing# echo function_graph > current_tracer + root@sugarbay:/sys/kernel/debug/tracing# cat trace | less + + tracer: function_graph + + CPU DURATION FUNCTION CALLS + | | | | | | | + 7) 0.046 us | pick_next_task_fair(); + 7) 0.043 us | pick_next_task_stop(); + 7) 0.042 us | pick_next_task_rt(); + 7) 0.032 us | pick_next_task_fair(); + 7) 0.030 us | pick_next_task_idle(); + 7) | _raw_spin_unlock_irq() { + 7) 0.033 us | sub_preempt_count(); + 7) 0.258 us | } + 7) 0.032 us | sub_preempt_count(); + 7) + 13.341 us | } /* __schedule */ + 7) 0.095 us | } /* sub_preempt_count */ + 7) | schedule() { + 7) | __schedule() { + 7) 0.060 us | add_preempt_count(); + 7) 0.044 us | rcu_note_context_switch(); + 7) | _raw_spin_lock_irq() { + 7) 0.033 us | add_preempt_count(); + 7) 0.247 us | } + 7) | idle_balance() { + 7) | _raw_spin_unlock() { + 7) 0.031 us | sub_preempt_count(); + 7) 0.246 us | } + 7) | update_shares() { + 7) 0.030 us | __rcu_read_lock(); + 7) 0.029 us | __rcu_read_unlock(); + 7) 0.484 us | } + 7) 0.030 us | __rcu_read_lock(); + 7) | load_balance() { + 7) | find_busiest_group() { + 7) 0.031 us | idle_cpu(); + 7) 0.029 us | idle_cpu(); + 7) 0.035 us | idle_cpu(); + 7) 0.906 us | } + 7) 1.141 us | } + 7) 0.022 us | msecs_to_jiffies(); + 7) | load_balance() { + 7) | find_busiest_group() { + 7) 0.031 us | idle_cpu(); + . + . + . + 4) 0.062 us | msecs_to_jiffies(); + 4) 0.062 us | __rcu_read_unlock(); + 4) | _raw_spin_lock() { + 4) 0.073 us | add_preempt_count(); + 4) 0.562 us | } + 4) + 17.452 us | } + 4) 0.108 us | put_prev_task_fair(); + 4) 0.102 us | pick_next_task_fair(); + 4) 0.084 us | pick_next_task_stop(); + 4) 0.075 us | pick_next_task_rt(); + 4) 0.062 us | pick_next_task_fair(); + 4) 0.066 us | pick_next_task_idle(); + ------------------------------------------ + 4) kworker-74 => -0 + ------------------------------------------ + + 4) | finish_task_switch() { + 4) | _raw_spin_unlock_irq() { + 4) 0.100 us | sub_preempt_count(); + 4) 0.582 us | } + 4) 1.105 us | } + 4) 0.088 us | sub_preempt_count(); + 4) ! 100.066 us | } + . + . + . + 3) | sys_ioctl() { + 3) 0.083 us | fget_light(); + 3) | security_file_ioctl() { + 3) 0.066 us | cap_file_ioctl(); + 3) 0.562 us | } + 3) | do_vfs_ioctl() { + 3) | drm_ioctl() { + 3) 0.075 us | drm_ut_debug_printk(); + 3) | i915_gem_pwrite_ioctl() { + 3) | i915_mutex_lock_interruptible() { + 3) 0.070 us | mutex_lock_interruptible(); + 3) 0.570 us | } + 3) | drm_gem_object_lookup() { + 3) | _raw_spin_lock() { + 3) 0.080 us | add_preempt_count(); + 3) 0.620 us | } + 3) | _raw_spin_unlock() { + 3) 0.085 us | sub_preempt_count(); + 3) 0.562 us | } + 3) 2.149 us | } + 3) 0.133 us | i915_gem_object_pin(); + 3) | i915_gem_object_set_to_gtt_domain() { + 3) 0.065 us | i915_gem_object_flush_gpu_write_domain(); + 3) 0.065 us | i915_gem_object_wait_rendering(); + 3) 0.062 us | i915_gem_object_flush_cpu_write_domain(); + 3) 1.612 us | } + 3) | i915_gem_object_put_fence() { + 3) 0.097 us | i915_gem_object_flush_fence.constprop.36(); + 3) 0.645 us | } + 3) 0.070 us | add_preempt_count(); + 3) 0.070 us | sub_preempt_count(); + 3) 0.073 us | i915_gem_object_unpin(); + 3) 0.068 us | mutex_unlock(); + 3) 9.924 us | } + 3) + 11.236 us | } + 3) + 11.770 us | } + 3) + 13.784 us | } + 3) | sys_ioctl() { + +As you can see, the function_graph display is much easier +to follow. Also note that in addition to the function calls and +associated braces, other events such as scheduler events are displayed +in context. In fact, you can freely include any tracepoint available in +the trace events subsystem described in the next section by simply +enabling those events, and they'll appear in context in the function +graph display. Quite a powerful tool for understanding kernel dynamics. + +Also notice that there are various annotations on the left hand side of +the display. For example if the total time it took for a given function +to execute is above a certain threshold, an exclamation point or plus +sign appears on the left hand side. Please see the ftrace documentation +for details on all these fields. + +The 'trace events' Subsystem +---------------------------- + +One especially important directory contained within the +/sys/kernel/debug/tracing directory is the 'events' subdirectory, which +contains representations of every tracepoint in the system. Listing out +the contents of the 'events' subdirectory, we see mainly another set of +subdirectories: :: + + root@sugarbay:/sys/kernel/debug/tracing# cd events + root@sugarbay:/sys/kernel/debug/tracing/events# ls -al + drwxr-xr-x 38 root root 0 Nov 14 23:19 . + drwxr-xr-x 5 root root 0 Nov 14 23:19 .. + drwxr-xr-x 19 root root 0 Nov 14 23:19 block + drwxr-xr-x 32 root root 0 Nov 14 23:19 btrfs + drwxr-xr-x 5 root root 0 Nov 14 23:19 drm + -rw-r--r-- 1 root root 0 Nov 14 23:19 enable + drwxr-xr-x 40 root root 0 Nov 14 23:19 ext3 + drwxr-xr-x 79 root root 0 Nov 14 23:19 ext4 + drwxr-xr-x 14 root root 0 Nov 14 23:19 ftrace + drwxr-xr-x 8 root root 0 Nov 14 23:19 hda + -r--r--r-- 1 root root 0 Nov 14 23:19 header_event + -r--r--r-- 1 root root 0 Nov 14 23:19 header_page + drwxr-xr-x 25 root root 0 Nov 14 23:19 i915 + drwxr-xr-x 7 root root 0 Nov 14 23:19 irq + drwxr-xr-x 12 root root 0 Nov 14 23:19 jbd + drwxr-xr-x 14 root root 0 Nov 14 23:19 jbd2 + drwxr-xr-x 14 root root 0 Nov 14 23:19 kmem + drwxr-xr-x 7 root root 0 Nov 14 23:19 module + drwxr-xr-x 3 root root 0 Nov 14 23:19 napi + drwxr-xr-x 6 root root 0 Nov 14 23:19 net + drwxr-xr-x 3 root root 0 Nov 14 23:19 oom + drwxr-xr-x 12 root root 0 Nov 14 23:19 power + drwxr-xr-x 3 root root 0 Nov 14 23:19 printk + drwxr-xr-x 8 root root 0 Nov 14 23:19 random + drwxr-xr-x 4 root root 0 Nov 14 23:19 raw_syscalls + drwxr-xr-x 3 root root 0 Nov 14 23:19 rcu + drwxr-xr-x 6 root root 0 Nov 14 23:19 rpm + drwxr-xr-x 20 root root 0 Nov 14 23:19 sched + drwxr-xr-x 7 root root 0 Nov 14 23:19 scsi + drwxr-xr-x 4 root root 0 Nov 14 23:19 signal + drwxr-xr-x 5 root root 0 Nov 14 23:19 skb + drwxr-xr-x 4 root root 0 Nov 14 23:19 sock + drwxr-xr-x 10 root root 0 Nov 14 23:19 sunrpc + drwxr-xr-x 538 root root 0 Nov 14 23:19 syscalls + drwxr-xr-x 4 root root 0 Nov 14 23:19 task + drwxr-xr-x 14 root root 0 Nov 14 23:19 timer + drwxr-xr-x 3 root root 0 Nov 14 23:19 udp + drwxr-xr-x 21 root root 0 Nov 14 23:19 vmscan + drwxr-xr-x 3 root root 0 Nov 14 23:19 vsyscall + drwxr-xr-x 6 root root 0 Nov 14 23:19 workqueue + drwxr-xr-x 26 root root 0 Nov 14 23:19 writeback + +Each one of these subdirectories +corresponds to a 'subsystem' and contains yet again more subdirectories, +each one of those finally corresponding to a tracepoint. For example, +here are the contents of the 'kmem' subsystem: :: + + root@sugarbay:/sys/kernel/debug/tracing/events# cd kmem + root@sugarbay:/sys/kernel/debug/tracing/events/kmem# ls -al + drwxr-xr-x 14 root root 0 Nov 14 23:19 . + drwxr-xr-x 38 root root 0 Nov 14 23:19 .. + -rw-r--r-- 1 root root 0 Nov 14 23:19 enable + -rw-r--r-- 1 root root 0 Nov 14 23:19 filter + drwxr-xr-x 2 root root 0 Nov 14 23:19 kfree + drwxr-xr-x 2 root root 0 Nov 14 23:19 kmalloc + drwxr-xr-x 2 root root 0 Nov 14 23:19 kmalloc_node + drwxr-xr-x 2 root root 0 Nov 14 23:19 kmem_cache_alloc + drwxr-xr-x 2 root root 0 Nov 14 23:19 kmem_cache_alloc_node + drwxr-xr-x 2 root root 0 Nov 14 23:19 kmem_cache_free + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_alloc + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_alloc_extfrag + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_alloc_zone_locked + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_free + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_free_batched + drwxr-xr-x 2 root root 0 Nov 14 23:19 mm_page_pcpu_drain + +Let's see what's inside the subdirectory for a +specific tracepoint, in this case the one for kmalloc: :: + + root@sugarbay:/sys/kernel/debug/tracing/events/kmem# cd kmalloc + root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# ls -al + drwxr-xr-x 2 root root 0 Nov 14 23:19 . + drwxr-xr-x 14 root root 0 Nov 14 23:19 .. + -rw-r--r-- 1 root root 0 Nov 14 23:19 enable + -rw-r--r-- 1 root root 0 Nov 14 23:19 filter + -r--r--r-- 1 root root 0 Nov 14 23:19 format + -r--r--r-- 1 root root 0 Nov 14 23:19 id + +The 'format' file for the +tracepoint describes the event in memory, which is used by the various +tracing tools that now make use of these tracepoint to parse the event +and make sense of it, along with a 'print fmt' field that allows tools +like ftrace to display the event as text. Here's what the format of the +kmalloc event looks like: :: + + root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# cat format + name: kmalloc + ID: 313 + format: + field:unsigned short common_type; offset:0; size:2; signed:0; + field:unsigned char common_flags; offset:2; size:1; signed:0; + field:unsigned char common_preempt_count; offset:3; size:1; signed:0; + field:int common_pid; offset:4; size:4; signed:1; + field:int common_padding; offset:8; size:4; signed:1; + + field:unsigned long call_site; offset:16; size:8; signed:0; + field:const void * ptr; offset:24; size:8; signed:0; + field:size_t bytes_req; offset:32; size:8; signed:0; + field:size_t bytes_alloc; offset:40; size:8; signed:0; + field:gfp_t gfp_flags; offset:48; size:4; signed:0; + + print fmt: "call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", REC->call_site, REC->ptr, REC->bytes_req, REC->bytes_alloc, + (REC->gfp_flags) ? __print_flags(REC->gfp_flags, "|", {(unsigned long)(((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( + gfp_t)0x20000u) | (( gfp_t)0x02u) | (( gfp_t)0x08u)) | (( gfp_t)0x4000u) | (( gfp_t)0x10000u) | (( gfp_t)0x1000u) | (( gfp_t)0x200u) | (( + gfp_t)0x400000u)), "GFP_TRANSHUGE"}, {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x20000u) | (( + gfp_t)0x02u) | (( gfp_t)0x08u)), "GFP_HIGHUSER_MOVABLE"}, {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( + gfp_t)0x20000u) | (( gfp_t)0x02u)), "GFP_HIGHUSER"}, {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( + gfp_t)0x20000u)), "GFP_USER"}, {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u) | (( gfp_t)0x80000u)), GFP_TEMPORARY"}, + {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u)), "GFP_KERNEL"}, {(unsigned long)((( gfp_t)0x10u) | (( gfp_t)0x40u)), + "GFP_NOFS"}, {(unsigned long)((( gfp_t)0x20u)), "GFP_ATOMIC"}, {(unsigned long)((( gfp_t)0x10u)), "GFP_NOIO"}, {(unsigned long)(( + gfp_t)0x20u), "GFP_HIGH"}, {(unsigned long)(( gfp_t)0x10u), "GFP_WAIT"}, {(unsigned long)(( gfp_t)0x40u), "GFP_IO"}, {(unsigned long)(( + gfp_t)0x100u), "GFP_COLD"}, {(unsigned long)(( gfp_t)0x200u), "GFP_NOWARN"}, {(unsigned long)(( gfp_t)0x400u), "GFP_REPEAT"}, {(unsigned + long)(( gfp_t)0x800u), "GFP_NOFAIL"}, {(unsigned long)(( gfp_t)0x1000u), "GFP_NORETRY"}, {(unsigned long)(( gfp_t)0x4000u), "GFP_COMP"}, + {(unsigned long)(( gfp_t)0x8000u), "GFP_ZERO"}, {(unsigned long)(( gfp_t)0x10000u), "GFP_NOMEMALLOC"}, {(unsigned long)(( gfp_t)0x20000u), + "GFP_HARDWALL"}, {(unsigned long)(( gfp_t)0x40000u), "GFP_THISNODE"}, {(unsigned long)(( gfp_t)0x80000u), "GFP_RECLAIMABLE"}, {(unsigned + long)(( gfp_t)0x08u), "GFP_MOVABLE"}, {(unsigned long)(( gfp_t)0), "GFP_NOTRACK"}, {(unsigned long)(( gfp_t)0x400000u), "GFP_NO_KSWAPD"}, + {(unsigned long)(( gfp_t)0x800000u), "GFP_OTHER_NODE"} ) : "GFP_NOWAIT" + +The 'enable' file +in the tracepoint directory is what allows the user (or tools such as +trace-cmd) to actually turn the tracepoint on and off. When enabled, the +corresponding tracepoint will start appearing in the ftrace 'trace' file +described previously. For example, this turns on the kmalloc tracepoint: :: + + root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# echo 1 > enable + +At the moment, we're not interested in the function tracer or +some other tracer that might be in effect, so we first turn it off, but +if we do that, we still need to turn tracing on in order to see the +events in the output buffer: :: + + root@sugarbay:/sys/kernel/debug/tracing# echo nop > current_tracer + root@sugarbay:/sys/kernel/debug/tracing# echo 1 > tracing_on + +Now, if we look at the the 'trace' file, we see nothing +but the kmalloc events we just turned on: :: + + root@sugarbay:/sys/kernel/debug/tracing# cat trace | less + # tracer: nop + # + # entries-in-buffer/entries-written: 1897/1897 #P:8 + # + # _-----=> irqs-off + # / _----=> need-resched + # | / _---=> hardirq/softirq + # || / _--=> preempt-depth + # ||| / delay + # TASK-PID CPU# |||| TIMESTAMP FUNCTION + # | | | |||| | | + dropbear-1465 [000] ...1 18154.620753: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18154.621640: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + -0 [000] ..s3 18154.621656: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + matchbox-termin-1361 [001] ...1 18154.755472: kmalloc: call_site=ffffffff81614050 ptr=ffff88006d5f0e00 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_KERNEL|GFP_REPEAT + Xorg-1264 [002] ...1 18154.755581: kmalloc: call_site=ffffffff8141abe8 ptr=ffff8800734f4cc0 bytes_req=168 bytes_alloc=192 gfp_flags=GFP_KERNEL|GFP_NOWARN|GFP_NORETRY + Xorg-1264 [002] ...1 18154.755583: kmalloc: call_site=ffffffff814192a3 ptr=ffff88001f822520 bytes_req=24 bytes_alloc=32 gfp_flags=GFP_KERNEL|GFP_ZERO + Xorg-1264 [002] ...1 18154.755589: kmalloc: call_site=ffffffff81419edb ptr=ffff8800721a2f00 bytes_req=64 bytes_alloc=64 gfp_flags=GFP_KERNEL|GFP_ZERO + matchbox-termin-1361 [001] ...1 18155.354594: kmalloc: call_site=ffffffff81614050 ptr=ffff88006db35400 bytes_req=576 bytes_alloc=1024 gfp_flags=GFP_KERNEL|GFP_REPEAT + Xorg-1264 [002] ...1 18155.354703: kmalloc: call_site=ffffffff8141abe8 ptr=ffff8800734f4cc0 bytes_req=168 bytes_alloc=192 gfp_flags=GFP_KERNEL|GFP_NOWARN|GFP_NORETRY + Xorg-1264 [002] ...1 18155.354705: kmalloc: call_site=ffffffff814192a3 ptr=ffff88001f822520 bytes_req=24 bytes_alloc=32 gfp_flags=GFP_KERNEL|GFP_ZERO + Xorg-1264 [002] ...1 18155.354711: kmalloc: call_site=ffffffff81419edb ptr=ffff8800721a2f00 bytes_req=64 bytes_alloc=64 gfp_flags=GFP_KERNEL|GFP_ZERO + -0 [000] ..s3 18155.673319: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + dropbear-1465 [000] ...1 18155.673525: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18155.674821: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + -0 [000] ..s3 18155.793014: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + dropbear-1465 [000] ...1 18155.793219: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18155.794147: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + -0 [000] ..s3 18155.936705: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + dropbear-1465 [000] ...1 18155.936910: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18155.937869: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + matchbox-termin-1361 [001] ...1 18155.953667: kmalloc: call_site=ffffffff81614050 ptr=ffff88006d5f2000 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_KERNEL|GFP_REPEAT + Xorg-1264 [002] ...1 18155.953775: kmalloc: call_site=ffffffff8141abe8 ptr=ffff8800734f4cc0 bytes_req=168 bytes_alloc=192 gfp_flags=GFP_KERNEL|GFP_NOWARN|GFP_NORETRY + Xorg-1264 [002] ...1 18155.953777: kmalloc: call_site=ffffffff814192a3 ptr=ffff88001f822520 bytes_req=24 bytes_alloc=32 gfp_flags=GFP_KERNEL|GFP_ZERO + Xorg-1264 [002] ...1 18155.953783: kmalloc: call_site=ffffffff81419edb ptr=ffff8800721a2f00 bytes_req=64 bytes_alloc=64 gfp_flags=GFP_KERNEL|GFP_ZERO + -0 [000] ..s3 18156.176053: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + dropbear-1465 [000] ...1 18156.176257: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18156.177717: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + -0 [000] ..s3 18156.399229: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d555800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + dropbear-1465 [000] ...1 18156.399434: kmalloc: call_site=ffffffff816650d4 ptr=ffff8800729c3000 bytes_http://rostedt.homelinux.com/kernelshark/req=2048 bytes_alloc=2048 gfp_flags=GFP_KERNEL + -0 [000] ..s3 18156.400660: kmalloc: call_site=ffffffff81619b36 ptr=ffff88006d554800 bytes_req=512 bytes_alloc=512 gfp_flags=GFP_ATOMIC + matchbox-termin-1361 [001] ...1 18156.552800: kmalloc: call_site=ffffffff81614050 ptr=ffff88006db34800 bytes_req=576 bytes_alloc=1024 gfp_flags=GFP_KERNEL|GFP_REPEAT + +To again disable the kmalloc event, we need to send 0 to the enable file: :: + + root@sugarbay:/sys/kernel/debug/tracing/events/kmem/kmalloc# echo 0 > enable + +You can enable any number of events or complete subsystems (by +using the 'enable' file in the subsystem directory) and get an +arbitrarily fine-grained idea of what's going on in the system by +enabling as many of the appropriate tracepoints as applicable. + +A number of the tools described in this HOWTO do just that, including +trace-cmd and kernelshark in the next section. + +.. admonition:: Tying it Together + + These tracepoints and their representation are used not only by + ftrace, but by many of the other tools covered in this document and + they form a central point of integration for the various tracers + available in Linux. They form a central part of the instrumentation + for the following tools: perf, lttng, ftrace, blktrace and SystemTap + +.. admonition:: Tying it Together + + Eventually all the special-purpose tracers currently available in + /sys/kernel/debug/tracing will be removed and replaced with + equivalent tracers based on the 'trace events' subsystem. + +.. _trace-cmd-kernelshark: + +trace-cmd/kernelshark +--------------------- + +trace-cmd is essentially an extensive command-line 'wrapper' interface +that hides the details of all the individual files in +/sys/kernel/debug/tracing, allowing users to specify specific particular +events within the /sys/kernel/debug/tracing/events/ subdirectory and to +collect traces and avoid having to deal with those details directly. + +As yet another layer on top of that, kernelshark provides a GUI that +allows users to start and stop traces and specify sets of events using +an intuitive interface, and view the output as both trace events and as +a per-CPU graphical display. It directly uses 'trace-cmd' as the +plumbing that accomplishes all that underneath the covers (and actually +displays the trace-cmd command it uses, as we'll see). + +To start a trace using kernelshark, first start kernelshark: :: + + root@sugarbay:~# kernelshark + +Then bring up the 'Capture' dialog by +choosing from the kernelshark menu: :: + + Capture | Record + +That will display the following dialog, which allows you to choose one or more +events (or even one or more complete subsystems) to trace: + +.. image:: figures/kernelshark-choose-events.png + :align: center + +Note that these are exactly the same sets of events described in the +previous trace events subsystem section, and in fact is where trace-cmd +gets them for kernelshark. + +In the above screenshot, we've decided to explore the graphics subsystem +a bit and so have chosen to trace all the tracepoints contained within +the 'i915' and 'drm' subsystems. + +After doing that, we can start and stop the trace using the 'Run' and +'Stop' button on the lower right corner of the dialog (the same button +will turn into the 'Stop' button after the trace has started): + +.. image:: figures/kernelshark-output-display.png + :align: center + +Notice that the right-hand pane shows the exact trace-cmd command-line +that's used to run the trace, along with the results of the trace-cmd +run. + +Once the 'Stop' button is pressed, the graphical view magically fills up +with a colorful per-cpu display of the trace data, along with the +detailed event listing below that: + +.. image:: figures/kernelshark-i915-display.png + :align: center + +Here's another example, this time a display resulting from tracing 'all +events': + +.. image:: figures/kernelshark-all.png + :align: center + +The tool is pretty self-explanatory, but for more detailed information +on navigating through the data, see the `kernelshark +website `__. + +.. _ftrace-documentation: + +ftrace Documentation +-------------------- + +The documentation for ftrace can be found in the kernel Documentation +directory: :: + + Documentation/trace/ftrace.txt + +The documentation for the trace event subsystem can also be found in the kernel +Documentation directory: :: + + Documentation/trace/events.txt + +There is a nice series of articles on using ftrace and trace-cmd at LWN: + +- `Debugging the kernel using Ftrace - part + 1 `__ + +- `Debugging the kernel using Ftrace - part + 2 `__ + +- `Secrets of the Ftrace function + tracer `__ + +- `trace-cmd: A front-end for + Ftrace `__ + +There's more detailed documentation kernelshark usage here: +`KernelShark `__ + +An amusing yet useful README (a tracing mini-HOWTO) can be found in +``/sys/kernel/debug/tracing/README``. + +.. _profile-manual-systemtap: + +systemtap +========= + +SystemTap is a system-wide script-based tracing and profiling tool. + +SystemTap scripts are C-like programs that are executed in the kernel to +gather/print/aggregate data extracted from the context they end up being +invoked under. + +For example, this probe from the `SystemTap +tutorial `__ simply prints a +line every time any process on the system open()s a file. For each line, +it prints the executable name of the program that opened the file, along +with its PID, and the name of the file it opened (or tried to open), +which it extracts from the open syscall's argstr. + +.. code-block:: none + + probe syscall.open + { + printf ("%s(%d) open (%s)\n", execname(), pid(), argstr) + } + + probe timer.ms(4000) # after 4 seconds + { + exit () + } + +Normally, to execute this +probe, you'd simply install systemtap on the system you want to probe, +and directly run the probe on that system e.g. assuming the name of the +file containing the above text is trace_open.stp: :: + + # stap trace_open.stp + +What systemtap does under the covers to run this probe is 1) parse and +convert the probe to an equivalent 'C' form, 2) compile the 'C' form +into a kernel module, 3) insert the module into the kernel, which arms +it, and 4) collect the data generated by the probe and display it to the +user. + +In order to accomplish steps 1 and 2, the 'stap' program needs access to +the kernel build system that produced the kernel that the probed system +is running. In the case of a typical embedded system (the 'target'), the +kernel build system unfortunately isn't typically part of the image +running on the target. It is normally available on the 'host' system +that produced the target image however; in such cases, steps 1 and 2 are +executed on the host system, and steps 3 and 4 are executed on the +target system, using only the systemtap 'runtime'. + +The systemtap support in Yocto assumes that only steps 3 and 4 are run +on the target; it is possible to do everything on the target, but this +section assumes only the typical embedded use-case. + +So basically what you need to do in order to run a systemtap script on +the target is to 1) on the host system, compile the probe into a kernel +module that makes sense to the target, 2) copy the module onto the +target system and 3) insert the module into the target kernel, which +arms it, and 4) collect the data generated by the probe and display it +to the user. + +.. _systemtap-setup: + +systemtap Setup +--------------- + +Those are a lot of steps and a lot of details, but fortunately Yocto +includes a script called 'crosstap' that will take care of those +details, allowing you to simply execute a systemtap script on the remote +target, with arguments if necessary. + +In order to do this from a remote host, however, you need to have access +to the build for the image you booted. The 'crosstap' script provides +details on how to do this if you run the script on the host without +having done a build: :: + + $ crosstap root@192.168.1.88 trace_open.stp + + Error: No target kernel build found. + Did you forget to create a local build of your image? + + 'crosstap' requires a local sdk build of the target system + (or a build that includes 'tools-profile') in order to build + kernel modules that can probe the target system. + + Practically speaking, that means you need to do the following: + - If you're running a pre-built image, download the release + and/or BSP tarballs used to build the image. + - If you're working from git sources, just clone the metadata + and BSP layers needed to build the image you'll be booting. + - Make sure you're properly set up to build a new image (see + the BSP README and/or the widely available basic documentation + that discusses how to build images). + - Build an -sdk version of the image e.g.: + $ bitbake core-image-sato-sdk + OR + - Build a non-sdk image but include the profiling tools: + [ edit local.conf and add 'tools-profile' to the end of + the EXTRA_IMAGE_FEATURES variable ] + $ bitbake core-image-sato + + Once you've build the image on the host system, you're ready to + boot it (or the equivalent pre-built image) and use 'crosstap' + to probe it (you need to source the environment as usual first): + + $ source oe-init-build-env + $ cd ~/my/systemtap/scripts + $ crosstap root@192.168.1.xxx myscript.stp + +.. note:: + + SystemTap, which uses 'crosstap', assumes you can establish an ssh + connection to the remote target. Please refer to the crosstap wiki + page for details on verifying ssh connections at + . Also, the ability to ssh into the target system is not enabled by + default in \*-minimal images. + +So essentially what you need to +do is build an SDK image or image with 'tools-profile' as detailed in +the ":ref:`profile-manual/profile-manual-intro:General Setup`" section of this +manual, and boot the resulting target image. + +.. note:: + + If you have a build directory containing multiple machines, you need + to have the MACHINE you're connecting to selected in local.conf, and + the kernel in that machine's build directory must match the kernel on + the booted system exactly, or you'll get the above 'crosstap' message + when you try to invoke a script. + +Running a Script on a Target +---------------------------- + +Once you've done that, you should be able to run a systemtap script on +the target: :: + + $ cd /path/to/yocto + $ source oe-init-build-env + + ### Shell environment set up for builds. ### + + You can now run 'bitbake ' + + Common targets are: + core-image-minimal + core-image-sato + meta-toolchain + meta-ide-support + + You can also run generated qemu images with a command like 'runqemu qemux86-64' + +Once you've done that, you can cd to whatever +directory contains your scripts and use 'crosstap' to run the script: :: + + $ cd /path/to/my/systemap/script + $ crosstap root@192.168.7.2 trace_open.stp + +If you get an error connecting to the target e.g.: :: + + $ crosstap root@192.168.7.2 trace_open.stp + error establishing ssh connection on remote 'root@192.168.7.2' + +Try ssh'ing to the target and see what happens: :: + + $ ssh root@192.168.7.2 + +A lot of the time, connection +problems are due specifying a wrong IP address or having a 'host key +verification error'. + +If everything worked as planned, you should see something like this +(enter the password when prompted, or press enter if it's set up to use +no password): + +.. code-block:: none + + $ crosstap root@192.168.7.2 trace_open.stp + root@192.168.7.2's password: + matchbox-termin(1036) open ("/tmp/vte3FS2LW", O_RDWR|O_CREAT|O_EXCL|O_LARGEFILE, 0600) + matchbox-termin(1036) open ("/tmp/vteJMC7LW", O_RDWR|O_CREAT|O_EXCL|O_LARGEFILE, 0600) + +.. _systemtap-documentation: + +systemtap Documentation +----------------------- + +The SystemTap language reference can be found here: `SystemTap Language +Reference `__ + +Links to other SystemTap documents, tutorials, and examples can be found +here: `SystemTap documentation +page `__ + +.. _profile-manual-sysprof: + +Sysprof +======= + +Sysprof is a very easy to use system-wide profiler that consists of a +single window with three panes and a few buttons which allow you to +start, stop, and view the profile from one place. + +.. _sysprof-setup: + +Sysprof Setup +------------- + +For this section, we'll assume you've already performed the basic setup +outlined in the ":ref:`profile-manual/profile-manual-intro:General Setup`" section. + +Sysprof is a GUI-based application that runs on the target system. For +the rest of this document we assume you've ssh'ed to the host and will +be running Sysprof on the target (you can use the '-X' option to ssh and +have the Sysprof GUI run on the target but display remotely on the host +if you want). + +.. _sysprof-basic-usage: + +Basic Sysprof Usage +------------------- + +To start profiling the system, you simply press the 'Start' button. To +stop profiling and to start viewing the profile data in one easy step, +press the 'Profile' button. + +Once you've pressed the profile button, the three panes will fill up +with profiling data: + +.. image:: figures/sysprof-copy-to-user.png + :align: center + +The left pane shows a list of functions and processes. Selecting one of +those expands that function in the right pane, showing all its callees. +Note that this caller-oriented display is essentially the inverse of +perf's default callee-oriented callchain display. + +In the screenshot above, we're focusing on ``__copy_to_user_ll()`` and +looking up the callchain we can see that one of the callers of +``__copy_to_user_ll`` is sys_read() and the complete callpath between them. +Notice that this is essentially a portion of the same information we saw +in the perf display shown in the perf section of this page. + +.. image:: figures/sysprof-copy-from-user.png + :align: center + +Similarly, the above is a snapshot of the Sysprof display of a +copy-from-user callchain. + +Finally, looking at the third Sysprof pane in the lower left, we can see +a list of all the callers of a particular function selected in the top +left pane. In this case, the lower pane is showing all the callers of +``__mark_inode_dirty``: + +.. image:: figures/sysprof-callers.png + :align: center + +Double-clicking on one of those functions will in turn change the focus +to the selected function, and so on. + +.. admonition:: Tying it Together + + If you like sysprof's 'caller-oriented' display, you may be able to + approximate it in other tools as well. For example, 'perf report' has + the -g (--call-graph) option that you can experiment with; one of the + options is 'caller' for an inverted caller-based callgraph display. + +.. _sysprof-documentation: + +Sysprof Documentation +--------------------- + +There doesn't seem to be any documentation for Sysprof, but maybe that's +because it's pretty self-explanatory. The Sysprof website, however, is +here: `Sysprof, System-wide Performance Profiler for +Linux `__ + +LTTng (Linux Trace Toolkit, next generation) +============================================ + +.. _lttng-setup: + +LTTng Setup +----------- + +For this section, we'll assume you've already performed the basic setup +outlined in the ":ref:`profile-manual/profile-manual-intro:General Setup`" section. +LTTng is run on the target system by ssh'ing to it. + +Collecting and Viewing Traces +----------------------------- + +Once you've applied the above commits and built and booted your image +(you need to build the core-image-sato-sdk image or use one of the other +methods described in the ":ref:`profile-manual/profile-manual-intro:General Setup`" section), you're ready to start +tracing. + +Collecting and viewing a trace on the target (inside a shell) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +First, from the host, ssh to the target: :: + + $ ssh -l root 192.168.1.47 + The authenticity of host '192.168.1.47 (192.168.1.47)' can't be established. + RSA key fingerprint is 23:bd:c8:b1:a8:71:52:00:ee:00:4f:64:9e:10:b9:7e. + Are you sure you want to continue connecting (yes/no)? yes + Warning: Permanently added '192.168.1.47' (RSA) to the list of known hosts. + root@192.168.1.47's password: + +Once on the target, use these steps to create a trace: :: + + root@crownbay:~# lttng create + Spawning a session daemon + Session auto-20121015-232120 created. + Traces will be written in /home/root/lttng-traces/auto-20121015-232120 + +Enable the events you want to trace (in this case all kernel events): :: + + root@crownbay:~# lttng enable-event --kernel --all + All kernel events are enabled in channel channel0 + +Start the trace: :: + + root@crownbay:~# lttng start + Tracing started for session auto-20121015-232120 + +And then stop the trace after awhile or after running a particular workload that +you want to trace: :: + + root@crownbay:~# lttng stop + Tracing stopped for session auto-20121015-232120 + +You can now view the trace in text form on the target: :: + + root@crownbay:~# lttng view + [23:21:56.989270399] (+?.?????????) sys_geteuid: { 1 }, { } + [23:21:56.989278081] (+0.000007682) exit_syscall: { 1 }, { ret = 0 } + [23:21:56.989286043] (+0.000007962) sys_pipe: { 1 }, { fildes = 0xB77B9E8C } + [23:21:56.989321802] (+0.000035759) exit_syscall: { 1 }, { ret = 0 } + [23:21:56.989329345] (+0.000007543) sys_mmap_pgoff: { 1 }, { addr = 0x0, len = 10485760, prot = 3, flags = 131362, fd = 4294967295, pgoff = 0 } + [23:21:56.989351694] (+0.000022349) exit_syscall: { 1 }, { ret = -1247805440 } + [23:21:56.989432989] (+0.000081295) sys_clone: { 1 }, { clone_flags = 0x411, newsp = 0xB5EFFFE4, parent_tid = 0xFFFFFFFF, child_tid = 0x0 } + [23:21:56.989477129] (+0.000044140) sched_stat_runtime: { 1 }, { comm = "lttng-consumerd", tid = 1193, runtime = 681660, vruntime = 43367983388 } + [23:21:56.989486697] (+0.000009568) sched_migrate_task: { 1 }, { comm = "lttng-consumerd", tid = 1193, prio = 20, orig_cpu = 1, dest_cpu = 1 } + [23:21:56.989508418] (+0.000021721) hrtimer_init: { 1 }, { hrtimer = 3970832076, clockid = 1, mode = 1 } + [23:21:56.989770462] (+0.000262044) hrtimer_cancel: { 1 }, { hrtimer = 3993865440 } + [23:21:56.989771580] (+0.000001118) hrtimer_cancel: { 0 }, { hrtimer = 3993812192 } + [23:21:56.989776957] (+0.000005377) hrtimer_expire_entry: { 1 }, { hrtimer = 3993865440, now = 79815980007057, function = 3238465232 } + [23:21:56.989778145] (+0.000001188) hrtimer_expire_entry: { 0 }, { hrtimer = 3993812192, now = 79815980008174, function = 3238465232 } + [23:21:56.989791695] (+0.000013550) softirq_raise: { 1 }, { vec = 1 } + [23:21:56.989795396] (+0.000003701) softirq_raise: { 0 }, { vec = 1 } + [23:21:56.989800635] (+0.000005239) softirq_raise: { 0 }, { vec = 9 } + [23:21:56.989807130] (+0.000006495) sched_stat_runtime: { 1 }, { comm = "lttng-consumerd", tid = 1193, runtime = 330710, vruntime = 43368314098 } + [23:21:56.989809993] (+0.000002863) sched_stat_runtime: { 0 }, { comm = "lttng-sessiond", tid = 1181, runtime = 1015313, vruntime = 36976733240 } + [23:21:56.989818514] (+0.000008521) hrtimer_expire_exit: { 0 }, { hrtimer = 3993812192 } + [23:21:56.989819631] (+0.000001117) hrtimer_expire_exit: { 1 }, { hrtimer = 3993865440 } + [23:21:56.989821866] (+0.000002235) hrtimer_start: { 0 }, { hrtimer = 3993812192, function = 3238465232, expires = 79815981000000, softexpires = 79815981000000 } + [23:21:56.989822984] (+0.000001118) hrtimer_start: { 1 }, { hrtimer = 3993865440, function = 3238465232, expires = 79815981000000, softexpires = 79815981000000 } + [23:21:56.989832762] (+0.000009778) softirq_entry: { 1 }, { vec = 1 } + [23:21:56.989833879] (+0.000001117) softirq_entry: { 0 }, { vec = 1 } + [23:21:56.989838069] (+0.000004190) timer_cancel: { 1 }, { timer = 3993871956 } + [23:21:56.989839187] (+0.000001118) timer_cancel: { 0 }, { timer = 3993818708 } + [23:21:56.989841492] (+0.000002305) timer_expire_entry: { 1 }, { timer = 3993871956, now = 79515980, function = 3238277552 } + [23:21:56.989842819] (+0.000001327) timer_expire_entry: { 0 }, { timer = 3993818708, now = 79515980, function = 3238277552 } + [23:21:56.989854831] (+0.000012012) sched_stat_runtime: { 1 }, { comm = "lttng-consumerd", tid = 1193, runtime = 49237, vruntime = 43368363335 } + [23:21:56.989855949] (+0.000001118) sched_stat_runtime: { 0 }, { comm = "lttng-sessiond", tid = 1181, runtime = 45121, vruntime = 36976778361 } + [23:21:56.989861257] (+0.000005308) sched_stat_sleep: { 1 }, { comm = "kworker/1:1", tid = 21, delay = 9451318 } + [23:21:56.989862374] (+0.000001117) sched_stat_sleep: { 0 }, { comm = "kworker/0:0", tid = 4, delay = 9958820 } + [23:21:56.989868241] (+0.000005867) sched_wakeup: { 0 }, { comm = "kworker/0:0", tid = 4, prio = 120, success = 1, target_cpu = 0 } + [23:21:56.989869358] (+0.000001117) sched_wakeup: { 1 }, { comm = "kworker/1:1", tid = 21, prio = 120, success = 1, target_cpu = 1 } + [23:21:56.989877460] (+0.000008102) timer_expire_exit: { 1 }, { timer = 3993871956 } + [23:21:56.989878577] (+0.000001117) timer_expire_exit: { 0 }, { timer = 3993818708 } + . + . + . + +You can now safely destroy the trace +session (note that this doesn't delete the trace - it's still there in +~/lttng-traces): :: + + root@crownbay:~# lttng destroy + Session auto-20121015-232120 destroyed at /home/root + +Note that the trace is saved in a directory of the same name as returned by +'lttng create', under the ~/lttng-traces directory (note that you can change this by +supplying your own name to 'lttng create'): :: + + root@crownbay:~# ls -al ~/lttng-traces + drwxrwx--- 3 root root 1024 Oct 15 23:21 . + drwxr-xr-x 5 root root 1024 Oct 15 23:57 .. + drwxrwx--- 3 root root 1024 Oct 15 23:21 auto-20121015-232120 + +Collecting and viewing a userspace trace on the target (inside a shell) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For LTTng userspace tracing, you need to have a properly instrumented +userspace program. For this example, we'll use the 'hello' test program +generated by the lttng-ust build. + +The 'hello' test program isn't installed on the rootfs by the lttng-ust +build, so we need to copy it over manually. First cd into the build +directory that contains the hello executable: :: + + $ cd build/tmp/work/core2_32-poky-linux/lttng-ust/2.0.5-r0/git/tests/hello/.libs + +Copy that over to the target machine: :: + + $ scp hello root@192.168.1.20: + +You now have the instrumented lttng 'hello world' test program on the +target, ready to test. + +First, from the host, ssh to the target: :: + + $ ssh -l root 192.168.1.47 + The authenticity of host '192.168.1.47 (192.168.1.47)' can't be established. + RSA key fingerprint is 23:bd:c8:b1:a8:71:52:00:ee:00:4f:64:9e:10:b9:7e. + Are you sure you want to continue connecting (yes/no)? yes + Warning: Permanently added '192.168.1.47' (RSA) to the list of known hosts. + root@192.168.1.47's password: + +Once on the target, use these steps to create a trace: :: + + root@crownbay:~# lttng create + Session auto-20190303-021943 created. + Traces will be written in /home/root/lttng-traces/auto-20190303-021943 + +Enable the events you want to trace (in this case all userspace events): :: + + root@crownbay:~# lttng enable-event --userspace --all + All UST events are enabled in channel channel0 + +Start the trace: :: + + root@crownbay:~# lttng start + Tracing started for session auto-20190303-021943 + +Run the instrumented hello world program: :: + + root@crownbay:~# ./hello + Hello, World! + Tracing... done. + +And then stop the trace after awhile or after running a particular workload +that you want to trace: :: + + root@crownbay:~# lttng stop + Tracing stopped for session auto-20190303-021943 + +You can now view the trace in text form on the target: :: + + root@crownbay:~# lttng view + [02:31:14.906146544] (+?.?????????) hello:1424 ust_tests_hello:tptest: { cpu_id = 1 }, { intfield = 0, intfield2 = 0x0, longfield = 0, netintfield = 0, netintfieldhex = 0x0, arrfield1 = [ [0] = 1, [1] = 2, [2] = 3 ], arrfield2 = "test", _seqfield1_length = 4, seqfield1 = [ [0] = 116, [1] = 101, [2] = 115, [3] = 116 ], _seqfield2_length = 4, seqfield2 = "test", stringfield = "test", floatfield = 2222, doublefield = 2, boolfield = 1 } + [02:31:14.906170360] (+0.000023816) hello:1424 ust_tests_hello:tptest: { cpu_id = 1 }, { intfield = 1, intfield2 = 0x1, longfield = 1, netintfield = 1, netintfieldhex = 0x1, arrfield1 = [ [0] = 1, [1] = 2, [2] = 3 ], arrfield2 = "test", _seqfield1_length = 4, seqfield1 = [ [0] = 116, [1] = 101, [2] = 115, [3] = 116 ], _seqfield2_length = 4, seqfield2 = "test", stringfield = "test", floatfield = 2222, doublefield = 2, boolfield = 1 } + [02:31:14.906183140] (+0.000012780) hello:1424 ust_tests_hello:tptest: { cpu_id = 1 }, { intfield = 2, intfield2 = 0x2, longfield = 2, netintfield = 2, netintfieldhex = 0x2, arrfield1 = [ [0] = 1, [1] = 2, [2] = 3 ], arrfield2 = "test", _seqfield1_length = 4, seqfield1 = [ [0] = 116, [1] = 101, [2] = 115, [3] = 116 ], _seqfield2_length = 4, seqfield2 = "test", stringfield = "test", floatfield = 2222, doublefield = 2, boolfield = 1 } + [02:31:14.906194385] (+0.000011245) hello:1424 ust_tests_hello:tptest: { cpu_id = 1 }, { intfield = 3, intfield2 = 0x3, longfield = 3, netintfield = 3, netintfieldhex = 0x3, arrfield1 = [ [0] = 1, [1] = 2, [2] = 3 ], arrfield2 = "test", _seqfield1_length = 4, seqfield1 = [ [0] = 116, [1] = 101, [2] = 115, [3] = 116 ], _seqfield2_length = 4, seqfield2 = "test", stringfield = "test", floatfield = 2222, doublefield = 2, boolfield = 1 } + . + . + . + +You can now safely destroy the trace session (note that this doesn't delete the +trace - it's still there in ~/lttng-traces): :: + + root@crownbay:~# lttng destroy + Session auto-20190303-021943 destroyed at /home/root + +.. _lltng-documentation: + +LTTng Documentation +------------------- + +You can find the primary LTTng Documentation on the `LTTng +Documentation `__ site. The documentation on +this site is appropriate for intermediate to advanced software +developers who are working in a Linux environment and are interested in +efficient software tracing. + +For information on LTTng in general, visit the `LTTng +Project `__ site. You can find a "Getting +Started" link on this site that takes you to an LTTng Quick Start. + +.. _profile-manual-blktrace: + +blktrace +======== + +blktrace is a tool for tracing and reporting low-level disk I/O. +blktrace provides the tracing half of the equation; its output can be +piped into the blkparse program, which renders the data in a +human-readable form and does some basic analysis: + +.. _blktrace-setup: + +blktrace Setup +-------------- + +For this section, we'll assume you've already performed the basic setup +outlined in the ":ref:`profile-manual/profile-manual-intro:General Setup`" +section. + +blktrace is an application that runs on the target system. You can run +the entire blktrace and blkparse pipeline on the target, or you can run +blktrace in 'listen' mode on the target and have blktrace and blkparse +collect and analyze the data on the host (see the +":ref:`profile-manual/profile-manual-usage:Using blktrace Remotely`" section +below). For the rest of this section we assume you've ssh'ed to the host and +will be running blkrace on the target. + +.. _blktrace-basic-usage: + +Basic blktrace Usage +-------------------- + +To record a trace, simply run the 'blktrace' command, giving it the name +of the block device you want to trace activity on: :: + + root@crownbay:~# blktrace /dev/sdc + +In another shell, execute a workload you want to trace. :: + + root@crownbay:/media/sdc# rm linux-2.6.19.2.tar.bz2; wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2; sync + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA + +Press Ctrl-C in the blktrace shell to stop the trace. It +will display how many events were logged, along with the per-cpu file +sizes (blktrace records traces in per-cpu kernel buffers and simply +dumps them to userspace for blkparse to merge and sort later). :: + + ^C=== sdc === + CPU 0: 7082 events, 332 KiB data + CPU 1: 1578 events, 74 KiB data + Total: 8660 events (dropped 0), 406 KiB data + +If you examine the files saved to disk, you see multiple files, one per CPU and +with the device name as the first part of the filename: :: + + root@crownbay:~# ls -al + drwxr-xr-x 6 root root 1024 Oct 27 22:39 . + drwxr-sr-x 4 root root 1024 Oct 26 18:24 .. + -rw-r--r-- 1 root root 339938 Oct 27 22:40 sdc.blktrace.0 + -rw-r--r-- 1 root root 75753 Oct 27 22:40 sdc.blktrace.1 + +To view the trace events, simply invoke 'blkparse' in the directory +containing the trace files, giving it the device name that forms the +first part of the filenames: :: + + root@crownbay:~# blkparse sdc + + 8,32 1 1 0.000000000 1225 Q WS 3417048 + 8 [jbd2/sdc-8] + 8,32 1 2 0.000025213 1225 G WS 3417048 + 8 [jbd2/sdc-8] + 8,32 1 3 0.000033384 1225 P N [jbd2/sdc-8] + 8,32 1 4 0.000043301 1225 I WS 3417048 + 8 [jbd2/sdc-8] + 8,32 1 0 0.000057270 0 m N cfq1225 insert_request + 8,32 1 0 0.000064813 0 m N cfq1225 add_to_rr + 8,32 1 5 0.000076336 1225 U N [jbd2/sdc-8] 1 + 8,32 1 0 0.000088559 0 m N cfq workload slice:150 + 8,32 1 0 0.000097359 0 m N cfq1225 set_active wl_prio:0 wl_type:1 + 8,32 1 0 0.000104063 0 m N cfq1225 Not idling. st->count:1 + 8,32 1 0 0.000112584 0 m N cfq1225 fifo= (null) + 8,32 1 0 0.000118730 0 m N cfq1225 dispatch_insert + 8,32 1 0 0.000127390 0 m N cfq1225 dispatched a request + 8,32 1 0 0.000133536 0 m N cfq1225 activate rq, drv=1 + 8,32 1 6 0.000136889 1225 D WS 3417048 + 8 [jbd2/sdc-8] + 8,32 1 7 0.000360381 1225 Q WS 3417056 + 8 [jbd2/sdc-8] + 8,32 1 8 0.000377422 1225 G WS 3417056 + 8 [jbd2/sdc-8] + 8,32 1 9 0.000388876 1225 P N [jbd2/sdc-8] + 8,32 1 10 0.000397886 1225 Q WS 3417064 + 8 [jbd2/sdc-8] + 8,32 1 11 0.000404800 1225 M WS 3417064 + 8 [jbd2/sdc-8] + 8,32 1 12 0.000412343 1225 Q WS 3417072 + 8 [jbd2/sdc-8] + 8,32 1 13 0.000416533 1225 M WS 3417072 + 8 [jbd2/sdc-8] + 8,32 1 14 0.000422121 1225 Q WS 3417080 + 8 [jbd2/sdc-8] + 8,32 1 15 0.000425194 1225 M WS 3417080 + 8 [jbd2/sdc-8] + 8,32 1 16 0.000431968 1225 Q WS 3417088 + 8 [jbd2/sdc-8] + 8,32 1 17 0.000435251 1225 M WS 3417088 + 8 [jbd2/sdc-8] + 8,32 1 18 0.000440279 1225 Q WS 3417096 + 8 [jbd2/sdc-8] + 8,32 1 19 0.000443911 1225 M WS 3417096 + 8 [jbd2/sdc-8] + 8,32 1 20 0.000450336 1225 Q WS 3417104 + 8 [jbd2/sdc-8] + 8,32 1 21 0.000454038 1225 M WS 3417104 + 8 [jbd2/sdc-8] + 8,32 1 22 0.000462070 1225 Q WS 3417112 + 8 [jbd2/sdc-8] + 8,32 1 23 0.000465422 1225 M WS 3417112 + 8 [jbd2/sdc-8] + 8,32 1 24 0.000474222 1225 I WS 3417056 + 64 [jbd2/sdc-8] + 8,32 1 0 0.000483022 0 m N cfq1225 insert_request + 8,32 1 25 0.000489727 1225 U N [jbd2/sdc-8] 1 + 8,32 1 0 0.000498457 0 m N cfq1225 Not idling. st->count:1 + 8,32 1 0 0.000503765 0 m N cfq1225 dispatch_insert + 8,32 1 0 0.000512914 0 m N cfq1225 dispatched a request + 8,32 1 0 0.000518851 0 m N cfq1225 activate rq, drv=2 + . + . + . + 8,32 0 0 58.515006138 0 m N cfq3551 complete rqnoidle 1 + 8,32 0 2024 58.516603269 3 C WS 3156992 + 16 [0] + 8,32 0 0 58.516626736 0 m N cfq3551 complete rqnoidle 1 + 8,32 0 0 58.516634558 0 m N cfq3551 arm_idle: 8 group_idle: 0 + 8,32 0 0 58.516636933 0 m N cfq schedule dispatch + 8,32 1 0 58.516971613 0 m N cfq3551 slice expired t=0 + 8,32 1 0 58.516982089 0 m N cfq3551 sl_used=13 disp=6 charge=13 iops=0 sect=80 + 8,32 1 0 58.516985511 0 m N cfq3551 del_from_rr + 8,32 1 0 58.516990819 0 m N cfq3551 put_queue + + CPU0 (sdc): + Reads Queued: 0, 0KiB Writes Queued: 331, 26,284KiB + Read Dispatches: 0, 0KiB Write Dispatches: 485, 40,484KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 0, 0KiB Writes Completed: 511, 41,000KiB + Read Merges: 0, 0KiB Write Merges: 13, 160KiB + Read depth: 0 Write depth: 2 + IO unplugs: 23 Timer unplugs: 0 + CPU1 (sdc): + Reads Queued: 0, 0KiB Writes Queued: 249, 15,800KiB + Read Dispatches: 0, 0KiB Write Dispatches: 42, 1,600KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 0, 0KiB Writes Completed: 16, 1,084KiB + Read Merges: 0, 0KiB Write Merges: 40, 276KiB + Read depth: 0 Write depth: 2 + IO unplugs: 30 Timer unplugs: 1 + + Total (sdc): + Reads Queued: 0, 0KiB Writes Queued: 580, 42,084KiB + Read Dispatches: 0, 0KiB Write Dispatches: 527, 42,084KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 0, 0KiB Writes Completed: 527, 42,084KiB + Read Merges: 0, 0KiB Write Merges: 53, 436KiB + IO unplugs: 53 Timer unplugs: 1 + + Throughput (R/W): 0KiB/s / 719KiB/s + Events (sdc): 6,592 entries + Skips: 0 forward (0 - 0.0%) + Input file sdc.blktrace.0 added + Input file sdc.blktrace.1 added + +The report shows each event that was +found in the blktrace data, along with a summary of the overall block +I/O traffic during the run. You can look at the +`blkparse `__ manpage to learn the +meaning of each field displayed in the trace listing. + +.. _blktrace-live-mode: + +Live Mode +~~~~~~~~~ + +blktrace and blkparse are designed from the ground up to be able to +operate together in a 'pipe mode' where the stdout of blktrace can be +fed directly into the stdin of blkparse: :: + + root@crownbay:~# blktrace /dev/sdc -o - | blkparse -i - + +This enables long-lived tracing sessions +to run without writing anything to disk, and allows the user to look for +certain conditions in the trace data in 'real-time' by viewing the trace +output as it scrolls by on the screen or by passing it along to yet +another program in the pipeline such as grep which can be used to +identify and capture conditions of interest. + +There's actually another blktrace command that implements the above +pipeline as a single command, so the user doesn't have to bother typing +in the above command sequence: :: + + root@crownbay:~# btrace /dev/sdc + +Using blktrace Remotely +~~~~~~~~~~~~~~~~~~~~~~~ + +Because blktrace traces block I/O and at the same time normally writes +its trace data to a block device, and in general because it's not really +a great idea to make the device being traced the same as the device the +tracer writes to, blktrace provides a way to trace without perturbing +the traced device at all by providing native support for sending all +trace data over the network. + +To have blktrace operate in this mode, start blktrace on the target +system being traced with the -l option, along with the device to trace: :: + + root@crownbay:~# blktrace -l /dev/sdc + server: waiting for connections... + +On the host system, use the -h option to connect to the target system, +also passing it the device to trace: :: + + $ blktrace -d /dev/sdc -h 192.168.1.43 + blktrace: connecting to 192.168.1.43 + blktrace: connected! + +On the target system, you should see this: :: + + server: connection from 192.168.1.43 + +In another shell, execute a workload you want to trace. :: + + root@crownbay:/media/sdc# rm linux-2.6.19.2.tar.bz2; wget http://downloads.yoctoproject.org/mirror/sources/linux-2.6.19.2.tar.bz2; sync + Connecting to downloads.yoctoproject.org (140.211.169.59:80) + linux-2.6.19.2.tar.b 100% \|*******************************\| 41727k 0:00:00 ETA + +When it's done, do a Ctrl-C on the host system to stop the +trace: :: + + ^C=== sdc === + CPU 0: 7691 events, 361 KiB data + CPU 1: 4109 events, 193 KiB data + Total: 11800 events (dropped 0), 554 KiB data + +On the target system, you should also see a trace summary for the trace +just ended: :: + + server: end of run for 192.168.1.43:sdc + === sdc === + CPU 0: 7691 events, 361 KiB data + CPU 1: 4109 events, 193 KiB data + Total: 11800 events (dropped 0), 554 KiB data + +The blktrace instance on the host will +save the target output inside a hostname-timestamp directory: :: + + $ ls -al + drwxr-xr-x 10 root root 1024 Oct 28 02:40 . + drwxr-sr-x 4 root root 1024 Oct 26 18:24 .. + drwxr-xr-x 2 root root 1024 Oct 28 02:40 192.168.1.43-2012-10-28-02:40:56 + +cd into that directory to see the output files: :: + + $ ls -l + -rw-r--r-- 1 root root 369193 Oct 28 02:44 sdc.blktrace.0 + -rw-r--r-- 1 root root 197278 Oct 28 02:44 sdc.blktrace.1 + +And run blkparse on the host system using the device name: :: + + $ blkparse sdc + + 8,32 1 1 0.000000000 1263 Q RM 6016 + 8 [ls] + 8,32 1 0 0.000036038 0 m N cfq1263 alloced + 8,32 1 2 0.000039390 1263 G RM 6016 + 8 [ls] + 8,32 1 3 0.000049168 1263 I RM 6016 + 8 [ls] + 8,32 1 0 0.000056152 0 m N cfq1263 insert_request + 8,32 1 0 0.000061600 0 m N cfq1263 add_to_rr + 8,32 1 0 0.000075498 0 m N cfq workload slice:300 + . + . + . + 8,32 0 0 177.266385696 0 m N cfq1267 arm_idle: 8 group_idle: 0 + 8,32 0 0 177.266388140 0 m N cfq schedule dispatch + 8,32 1 0 177.266679239 0 m N cfq1267 slice expired t=0 + 8,32 1 0 177.266689297 0 m N cfq1267 sl_used=9 disp=6 charge=9 iops=0 sect=56 + 8,32 1 0 177.266692649 0 m N cfq1267 del_from_rr + 8,32 1 0 177.266696560 0 m N cfq1267 put_queue + + CPU0 (sdc): + Reads Queued: 0, 0KiB Writes Queued: 270, 21,708KiB + Read Dispatches: 59, 2,628KiB Write Dispatches: 495, 39,964KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 90, 2,752KiB Writes Completed: 543, 41,596KiB + Read Merges: 0, 0KiB Write Merges: 9, 344KiB + Read depth: 2 Write depth: 2 + IO unplugs: 20 Timer unplugs: 1 + CPU1 (sdc): + Reads Queued: 688, 2,752KiB Writes Queued: 381, 20,652KiB + Read Dispatches: 31, 124KiB Write Dispatches: 59, 2,396KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 0, 0KiB Writes Completed: 11, 764KiB + Read Merges: 598, 2,392KiB Write Merges: 88, 448KiB + Read depth: 2 Write depth: 2 + IO unplugs: 52 Timer unplugs: 0 + + Total (sdc): + Reads Queued: 688, 2,752KiB Writes Queued: 651, 42,360KiB + Read Dispatches: 90, 2,752KiB Write Dispatches: 554, 42,360KiB + Reads Requeued: 0 Writes Requeued: 0 + Reads Completed: 90, 2,752KiB Writes Completed: 554, 42,360KiB + Read Merges: 598, 2,392KiB Write Merges: 97, 792KiB + IO unplugs: 72 Timer unplugs: 1 + + Throughput (R/W): 15KiB/s / 238KiB/s + Events (sdc): 9,301 entries + Skips: 0 forward (0 - 0.0%) + +You should see the trace events and summary just as you would have if you'd run +the same command on the target. + +Tracing Block I/O via 'ftrace' +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +It's also possible to trace block I/O using only +:ref:`profile-manual/profile-manual-usage:The 'trace events' Subsystem`, which +can be useful for casual tracing if you don't want to bother dealing with the +userspace tools. + +To enable tracing for a given device, use /sys/block/xxx/trace/enable, +where xxx is the device name. This for example enables tracing for +/dev/sdc: :: + + root@crownbay:/sys/kernel/debug/tracing# echo 1 > /sys/block/sdc/trace/enable + +Once you've selected the device(s) you want +to trace, selecting the 'blk' tracer will turn the blk tracer on: :: + + root@crownbay:/sys/kernel/debug/tracing# cat available_tracers + blk function_graph function nop + + root@crownbay:/sys/kernel/debug/tracing# echo blk > current_tracer + +Execute the workload you're interested in: :: + + root@crownbay:/sys/kernel/debug/tracing# cat /media/sdc/testfile.txt + +And look at the output (note here that we're using 'trace_pipe' instead of +trace to capture this trace - this allows us to wait around on the pipe +for data to appear): :: + + root@crownbay:/sys/kernel/debug/tracing# cat trace_pipe + cat-3587 [001] d..1 3023.276361: 8,32 Q R 1699848 + 8 [cat] + cat-3587 [001] d..1 3023.276410: 8,32 m N cfq3587 alloced + cat-3587 [001] d..1 3023.276415: 8,32 G R 1699848 + 8 [cat] + cat-3587 [001] d..1 3023.276424: 8,32 P N [cat] + cat-3587 [001] d..2 3023.276432: 8,32 I R 1699848 + 8 [cat] + cat-3587 [001] d..1 3023.276439: 8,32 m N cfq3587 insert_request + cat-3587 [001] d..1 3023.276445: 8,32 m N cfq3587 add_to_rr + cat-3587 [001] d..2 3023.276454: 8,32 U N [cat] 1 + cat-3587 [001] d..1 3023.276464: 8,32 m N cfq workload slice:150 + cat-3587 [001] d..1 3023.276471: 8,32 m N cfq3587 set_active wl_prio:0 wl_type:2 + cat-3587 [001] d..1 3023.276478: 8,32 m N cfq3587 fifo= (null) + cat-3587 [001] d..1 3023.276483: 8,32 m N cfq3587 dispatch_insert + cat-3587 [001] d..1 3023.276490: 8,32 m N cfq3587 dispatched a request + cat-3587 [001] d..1 3023.276497: 8,32 m N cfq3587 activate rq, drv=1 + cat-3587 [001] d..2 3023.276500: 8,32 D R 1699848 + 8 [cat] + +And this turns off tracing for the specified device: :: + + root@crownbay:/sys/kernel/debug/tracing# echo 0 > /sys/block/sdc/trace/enable + +.. _blktrace-documentation: + +blktrace Documentation +---------------------- + +Online versions of the man pages for the commands discussed in this +section can be found here: + +- http://linux.die.net/man/8/blktrace + +- http://linux.die.net/man/1/blkparse + +- http://linux.die.net/man/8/btrace + +The above manpages, along with manpages for the other blktrace utilities +(btt, blkiomon, etc) can be found in the /doc directory of the blktrace +tools git repo: :: + + $ git clone git://git.kernel.dk/blktrace.git diff --git a/poky/documentation/profile-manual/profile-manual.rst b/poky/documentation/profile-manual/profile-manual.rst new file mode 100644 index 000000000..2c8fcf3e6 --- /dev/null +++ b/poky/documentation/profile-manual/profile-manual.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +========================================== +Yocto Project Profiling and Tracing Manual +========================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + profile-manual-intro + profile-manual-arch + profile-manual-usage + profile-manual-examples + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/ref-manual/examples/hello-autotools/hello_2.10.bb b/poky/documentation/ref-manual/examples/hello-autotools/hello_2.10.bb new file mode 100644 index 000000000..aa2beb9a9 --- /dev/null +++ b/poky/documentation/ref-manual/examples/hello-autotools/hello_2.10.bb @@ -0,0 +1,9 @@ +DESCRIPTION = "GNU Helloworld application" +SECTION = "examples" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +SRC_URI = "${GNU_MIRROR}/hello/hello-${PV}.tar.gz" +SRC_URI[sha256sum] = "31e066137a962676e89f69d1b65382de95a7ef7d914b8cb956f41ea72e0f516b" + +inherit autotools-brokensep gettext diff --git a/poky/documentation/ref-manual/examples/hello-autotools/hello_2.3.bb b/poky/documentation/ref-manual/examples/hello-autotools/hello_2.3.bb deleted file mode 100644 index 5dfb0b30c..000000000 --- a/poky/documentation/ref-manual/examples/hello-autotools/hello_2.3.bb +++ /dev/null @@ -1,8 +0,0 @@ -DESCRIPTION = "GNU Helloworld application" -SECTION = "examples" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=adefda309052235aa5d1e99ce7557010" - -SRC_URI = "${GNU_MIRROR}/hello/hello-${PV}.tar.bz2" - -inherit autotools diff --git a/poky/documentation/ref-manual/examples/libxpm/libxpm_3.5.6.bb b/poky/documentation/ref-manual/examples/libxpm/libxpm_3.5.6.bb index b58d4d7bd..c0c898640 100644 --- a/poky/documentation/ref-manual/examples/libxpm/libxpm_3.5.6.bb +++ b/poky/documentation/ref-manual/examples/libxpm/libxpm_3.5.6.bb @@ -1,4 +1,4 @@ -require xorg-lib-common.inc +require recipes-graphics/xorg-lib/xorg-lib-common.inc DESCRIPTION = "X11 Pixmap library" LICENSE = "X-BSD" diff --git a/poky/documentation/ref-manual/faq.rst b/poky/documentation/ref-manual/faq.rst new file mode 100644 index 000000000..2d2aaad0a --- /dev/null +++ b/poky/documentation/ref-manual/faq.rst @@ -0,0 +1,451 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*** +FAQ +*** + +**Q:** How does Poky differ from `OpenEmbedded `__? + +**A:** The term ``Poky`` refers to the specific reference build +system that the Yocto Project provides. Poky is based on +:term:`OpenEmbedded-Core (OE-Core)` and :term:`BitBake`. Thus, the +generic term used here for the build system is the "OpenEmbedded build +system." Development in the Yocto Project using Poky is closely tied to +OpenEmbedded, with changes always being merged to OE-Core or BitBake +first before being pulled back into Poky. This practice benefits both +projects immediately. + +**Q:** My development system does not meet the required Git, tar, and +Python versions. In particular, I do not have Python 3.5.0 or greater. +Can I still use the Yocto Project? + +**A:** You can get the required tools on your host development system a +couple different ways (i.e. building a tarball or downloading a +tarball). See the "`Required Git, tar, Python and gcc +Versions <#required-git-tar-python-and-gcc-versions>`__" section for +steps on how to update your build tools. + +**Q:** How can you claim Poky / OpenEmbedded-Core is stable? + +**A:** There are three areas that help with stability; + +- The Yocto Project team keeps :term:`OpenEmbedded-Core (OE-Core)` small and + focused, containing around 830 recipes as opposed to the thousands + available in other OpenEmbedded community layers. Keeping it small + makes it easy to test and maintain. + +- The Yocto Project team runs manual and automated tests using a small, + fixed set of reference hardware as well as emulated targets. + +- The Yocto Project uses an autobuilder, which provides continuous + build and integration tests. + +**Q:** How do I get support for my board added to the Yocto Project? + +**A:** Support for an additional board is added by creating a Board +Support Package (BSP) layer for it. For more information on how to +create a BSP layer, see the +":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section in the Yocto Project Development Tasks Manual and the +:doc:`../bsp-guide/bsp-guide`. + +Usually, if the board is not completely exotic, adding support in the +Yocto Project is fairly straightforward. + +**Q:** Are there any products built using the OpenEmbedded build system? + +**A:** The software running on the `Vernier +LabQuest `__ is built using the +OpenEmbedded build system. See the `Vernier +LabQuest `__ website +for more information. There are a number of pre-production devices using +the OpenEmbedded build system and the Yocto Project team announces them +as soon as they are released. + +**Q:** What does the OpenEmbedded build system produce as output? + +**A:** Because you can use the same set of recipes to create output of +various formats, the output of an OpenEmbedded build depends on how you +start it. Usually, the output is a flashable image ready for the target +device. + +**Q:** How do I add my package to the Yocto Project? + +**A:** To add a package, you need to create a BitBake recipe. For +information on how to create a BitBake recipe, see the +":ref:`dev-manual/dev-manual-common-tasks:writing a new recipe`" +section in the Yocto Project Development Tasks Manual. + +**Q:** Do I have to reflash my entire board with a new Yocto Project +image when recompiling a package? + +**A:** The OpenEmbedded build system can build packages in various +formats such as IPK for OPKG, Debian package (``.deb``), or RPM. You can +then upgrade the packages using the package tools on the device, much +like on a desktop distribution such as Ubuntu or Fedora. However, +package management on the target is entirely optional. + +**Q:** I see the error +'``chmod: XXXXX new permissions are r-xrwxrwx, not r-xr-xr-x``'. What is +wrong? + +**A:** You are probably running the build on an NTFS filesystem. Use +``ext2``, ``ext3``, or ``ext4`` instead. + +**Q:** I see lots of 404 responses for files when the OpenEmbedded build +system is trying to download sources. Is something wrong? + +**A:** Nothing is wrong. The OpenEmbedded build system checks any +configured source mirrors before downloading from the upstream sources. +The build system does this searching for both source archives and +pre-checked out versions of SCM-managed software. These checks help in +large installations because it can reduce load on the SCM servers +themselves. The address above is one of the default mirrors configured +into the build system. Consequently, if an upstream source disappears, +the team can place sources there so builds continue to work. + +**Q:** I have machine-specific data in a package for one machine only +but the package is being marked as machine-specific in all cases, how do +I prevent this? + +**A:** Set ``SRC_URI_OVERRIDES_PACKAGE_ARCH`` = "0" in the ``.bb`` file +but make sure the package is manually marked as machine-specific for the +case that needs it. The code that handles +``SRC_URI_OVERRIDES_PACKAGE_ARCH`` is in the +``meta/classes/base.bbclass`` file. + +**Q:** I'm behind a firewall and need to use a proxy server. How do I do +that? + +**A:** Most source fetching by the OpenEmbedded build system is done by +``wget`` and you therefore need to specify the proxy settings in a +``.wgetrc`` file, which can be in your home directory if you are a +single user or can be in ``/usr/local/etc/wgetrc`` as a global user +file. + +Following is the applicable code for setting various proxy types in the +``.wgetrc`` file. By default, these settings are disabled with comments. +To use them, remove the comments: :: + + # You can set the default proxies for Wget to use for http, https, and ftp. + # They will override the value in the environment. + #https_proxy = http://proxy.yoyodyne.com:18023/ + #http_proxy = http://proxy.yoyodyne.com:18023/ + #ftp_proxy = http://proxy.yoyodyne.com:18023/ + + # If you do not want to use proxy at all, set this to off. + #use_proxy = on + +The Yocto Project also includes a +``meta-poky/conf/site.conf.sample`` file that shows how to configure CVS +and Git proxy servers if needed. For more information on setting up +various proxy types and configuring proxy servers, see the +":yocto_wiki:`Working Behind a Network Proxy `" +Wiki page. + +**Q:** What's the difference between target and target\ ``-native``? + +**A:** The ``*-native`` targets are designed to run on the system being +used for the build. These are usually tools that are needed to assist +the build in some way such as ``quilt-native``, which is used to apply +patches. The non-native version is the one that runs on the target +device. + +**Q:** I'm seeing random build failures. Help?! + +**A:** If the same build is failing in totally different and random +ways, the most likely explanation is: + +- The hardware you are running the build on has some problem. + +- You are running the build under virtualization, in which case the + virtualization probably has bugs. + +The OpenEmbedded build system processes a massive amount of data that +causes lots of network, disk and CPU activity and is sensitive to even +single-bit failures in any of these areas. True random failures have +always been traced back to hardware or virtualization issues. + +**Q:** When I try to build a native recipe, the build fails with +``iconv.h`` problems. + +**A:** If you get an error message that indicates GNU ``libiconv`` is +not in use but ``iconv.h`` has been included from ``libiconv``, you need +to check to see if you have a previously installed version of the header +file in ``/usr/local/include``. +:: + + #error GNU libiconv not in use but included iconv.h is from libiconv + +If you find a previously installed +file, you should either uninstall it or temporarily rename it and try +the build again. + +This issue is just a single manifestation of "system leakage" issues +caused when the OpenEmbedded build system finds and uses previously +installed files during a native build. This type of issue might not be +limited to ``iconv.h``. Be sure that leakage cannot occur from +``/usr/local/include`` and ``/opt`` locations. + +**Q:** What do we need to ship for license compliance? + +**A:** This is a difficult question and you need to consult your lawyer +for the answer for your specific case. It is worth bearing in mind that +for GPL compliance, there needs to be enough information shipped to +allow someone else to rebuild and produce the same end result you are +shipping. This means sharing the source code, any patches applied to it, +and also any configuration information about how that package was +configured and built. + +You can find more information on licensing in the +":ref:`overview-manual/overview-manual-development-environment:licensing`" +section in the Yocto +Project Overview and Concepts Manual and also in the +":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. + +**Q:** How do I disable the cursor on my touchscreen device? + +**A:** You need to create a form factor file as described in the +":ref:`bsp-filelayout-misc-recipes`" section in +the Yocto Project Board Support Packages (BSP) Developer's Guide. Set +the ``HAVE_TOUCHSCREEN`` variable equal to one as follows: +:: + + HAVE_TOUCHSCREEN=1 + +**Q:** How do I make sure connected network interfaces are brought up by +default? + +**A:** The default interfaces file provided by the netbase recipe does +not automatically bring up network interfaces. Therefore, you will need +to add a BSP-specific netbase that includes an interfaces file. See the +":ref:`bsp-filelayout-misc-recipes`" section in +the Yocto Project Board Support Packages (BSP) Developer's Guide for +information on creating these types of miscellaneous recipe files. + +For example, add the following files to your layer: :: + + meta-MACHINE/recipes-bsp/netbase/netbase/MACHINE/interfaces + meta-MACHINE/recipes-bsp/netbase/netbase_5.0.bbappend + +**Q:** How do I create images with more free space? + +**A:** By default, the OpenEmbedded build system creates images that are +1.3 times the size of the populated root filesystem. To affect the image +size, you need to set various configurations: + +- *Image Size:* The OpenEmbedded build system uses the + :term:`IMAGE_ROOTFS_SIZE` variable to define + the size of the image in Kbytes. The build system determines the size + by taking into account the initial root filesystem size before any + modifications such as requested size for the image and any requested + additional free disk space to be added to the image. + +- *Overhead:* Use the + :term:`IMAGE_OVERHEAD_FACTOR` variable + to define the multiplier that the build system applies to the initial + image size, which is 1.3 by default. + +- *Additional Free Space:* Use the + :term:`IMAGE_ROOTFS_EXTRA_SPACE` + variable to add additional free space to the image. The build system + adds this space to the image after it determines its + ``IMAGE_ROOTFS_SIZE``. + +**Q:** Why don't you support directories with spaces in the pathnames? + +**A:** The Yocto Project team has tried to do this before but too many +of the tools the OpenEmbedded build system depends on, such as +``autoconf``, break when they find spaces in pathnames. Until that +situation changes, the team will not support spaces in pathnames. + +**Q:** How do I use an external toolchain? + +**A:** The toolchain configuration is very flexible and customizable. It +is primarily controlled with the ``TCMODE`` variable. This variable +controls which ``tcmode-*.inc`` file to include from the +``meta/conf/distro/include`` directory within the :term:`Source Directory`. + +The default value of ``TCMODE`` is "default", which tells the +OpenEmbedded build system to use its internally built toolchain (i.e. +``tcmode-default.inc``). However, other patterns are accepted. In +particular, "external-\*" refers to external toolchains. One example is +the Sourcery G++ Toolchain. The support for this toolchain resides in +the separate ``meta-sourcery`` layer at +http://github.com/MentorEmbedded/meta-sourcery/. + +In addition to the toolchain configuration, you also need a +corresponding toolchain recipe file. This recipe file needs to package +up any pre-built objects in the toolchain such as ``libgcc``, +``libstdcc++``, any locales, and ``libc``. + +**Q:** How does the OpenEmbedded build system obtain source code and +will it work behind my firewall or proxy server? + +**A:** The way the build system obtains source code is highly +configurable. You can setup the build system to get source code in most +environments if HTTP transport is available. + +When the build system searches for source code, it first tries the local +download directory. If that location fails, Poky tries +:term:`PREMIRRORS`, the upstream source, and then +:term:`MIRRORS` in that order. + +Assuming your distribution is "poky", the OpenEmbedded build system uses +the Yocto Project source ``PREMIRRORS`` by default for SCM-based +sources, upstreams for normal tarballs, and then falls back to a number +of other mirrors including the Yocto Project source mirror if those +fail. + +As an example, you could add a specific server for the build system to +attempt before any others by adding something like the following to the +``local.conf`` configuration file: :: + + PREMIRRORS_prepend = "\ + git://.*/.* http://www.yoctoproject.org/sources/ \n \ + ftp://.*/.* http://www.yoctoproject.org/sources/ \n \ + http://.*/.* http://www.yoctoproject.org/sources/ \n \ + https://.*/.* http://www.yoctoproject.org/sources/ \n" + +These changes cause the build system to intercept Git, FTP, HTTP, and +HTTPS requests and direct them to the ``http://`` sources mirror. You +can use ``file://`` URLs to point to local directories or network shares +as well. + +Aside from the previous technique, these options also exist: +:: + + BB_NO_NETWORK = "1" + +This statement tells BitBake to issue an error +instead of trying to access the Internet. This technique is useful if +you want to ensure code builds only from local sources. + +Here is another technique: +:: + + BB_FETCH_PREMIRRORONLY = "1" + +This statement +limits the build system to pulling source from the ``PREMIRRORS`` only. +Again, this technique is useful for reproducing builds. + +Here is another technique: +:: + + BB_GENERATE_MIRROR_TARBALLS = "1" + +This +statement tells the build system to generate mirror tarballs. This +technique is useful if you want to create a mirror server. If not, +however, the technique can simply waste time during the build. + +Finally, consider an example where you are behind an HTTP-only firewall. +You could make the following changes to the ``local.conf`` configuration +file as long as the ``PREMIRRORS`` server is current: :: + + PREMIRRORS_prepend = "\ + ftp://.*/.* http://www.yoctoproject.org/sources/ \n \ + http://.*/.* http://www.yoctoproject.org/sources/ \n \ + https://.*/.* http://www.yoctoproject.org/sources/ \n" + BB_FETCH_PREMIRRORONLY = "1" + +These changes would cause the build system to successfully fetch source +over HTTP and any network accesses to anything other than the +``PREMIRRORS`` would fail. + +The build system also honors the standard shell environment variables +``http_proxy``, ``ftp_proxy``, ``https_proxy``, and ``all_proxy`` to +redirect requests through proxy servers. + +.. note:: + + You can find more information on the + ":yocto_wiki:`Working Behind a Network Proxy `" + Wiki page. + +**Q:** Can I get rid of build output so I can start over? + +**A:** Yes - you can easily do this. When you use BitBake to build an +image, all the build output goes into the directory created when you run +the build environment setup script (i.e. +````` <#structure-core-script>`__). By default, this :term:`Build Directory` +is named ``build`` but can be named +anything you want. + +Within the Build Directory, is the ``tmp`` directory. To remove all the +build output yet preserve any source code or downloaded files from +previous builds, simply remove the ``tmp`` directory. + +**Q:** Why do ``${bindir}`` and ``${libdir}`` have strange values for +``-native`` recipes? + +**A:** Executables and libraries might need to be used from a directory +other than the directory into which they were initially installed. +Complicating this situation is the fact that sometimes these executables +and libraries are compiled with the expectation of being run from that +initial installation target directory. If this is the case, moving them +causes problems. + +This scenario is a fundamental problem for package maintainers of +mainstream Linux distributions as well as for the OpenEmbedded build +system. As such, a well-established solution exists. Makefiles, +Autotools configuration scripts, and other build systems are expected to +respect environment variables such as ``bindir``, ``libdir``, and +``sysconfdir`` that indicate where executables, libraries, and data +reside when a program is actually run. They are also expected to respect +a ``DESTDIR`` environment variable, which is prepended to all the other +variables when the build system actually installs the files. It is +understood that the program does not actually run from within +``DESTDIR``. + +When the OpenEmbedded build system uses a recipe to build a +target-architecture program (i.e. one that is intended for inclusion on +the image being built), that program eventually runs from the root file +system of that image. Thus, the build system provides a value of +"/usr/bin" for ``bindir``, a value of "/usr/lib" for ``libdir``, and so +forth. + +Meanwhile, ``DESTDIR`` is a path within the :term:`Build Directory`. +However, when the recipe builds a +native program (i.e. one that is intended to run on the build machine), +that program is never installed directly to the build machine's root +file system. Consequently, the build system uses paths within the Build +Directory for ``DESTDIR``, ``bindir`` and related variables. To better +understand this, consider the following two paths where the first is +relatively normal and the second is not: :: + + /home/maxtothemax/poky-bootchart2/build/tmp/work/i586-poky-linux/zlib/ + 1.2.8-r0/sysroot-destdir/usr/bin + + /home/maxtothemax/poky-bootchart2/build/tmp/work/x86_64-linux/ + zlib-native/1.2.8-r0/sysroot-destdir/home/maxtothemax/poky-bootchart2/ + build/tmp/sysroots/x86_64-linux/usr/bin + +.. note:: + + Due to these lengthy examples, the paths are artificially broken + across lines for readability. + +Even if the paths look unusual, +they both are correct - the first for a target and the second for a +native recipe. These paths are a consequence of the ``DESTDIR`` +mechanism and while they appear strange, they are correct and in +practice very effective. + +**Q:** The files provided by my ``*-native`` recipe do not appear to be +available to other recipes. Files are missing from the native sysroot, +my recipe is installing to the wrong place, or I am getting permissions +errors during the do_install task in my recipe! What is wrong? + +**A:** This situation results when a build system does not recognize the +environment variables supplied to it by :term:`BitBake`. The +incident that prompted this FAQ entry involved a Makefile that used an +environment variable named ``BINDIR`` instead of the more standard +variable ``bindir``. The makefile's hardcoded default value of +"/usr/bin" worked most of the time, but not for the recipe's ``-native`` +variant. For another example, permissions errors might be caused by a +Makefile that ignores ``DESTDIR`` or uses a different name for that +environment variable. Check the the build system to see if these kinds +of issues exist. diff --git a/poky/documentation/ref-manual/faq.xml b/poky/documentation/ref-manual/faq.xml index 98ae0a975..2f8fcf324 100644 --- a/poky/documentation/ref-manual/faq.xml +++ b/poky/documentation/ref-manual/faq.xml @@ -323,7 +323,7 @@ - What’s the difference between target and target-native? + What's the difference between target and target-native? diff --git a/poky/documentation/ref-manual/history.rst b/poky/documentation/ref-manual/history.rst new file mode 100644 index 000000000..e962d9297 --- /dev/null +++ b/poky/documentation/ref-manual/history.rst @@ -0,0 +1,74 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 0.9 + - November 2010 + - The initial document released with the Yocto Project 0.9 Release + * - 1.0 + - April 2011 + - Released with the Yocto Project 1.0 Release. + * - 1.1 + - October 2011 + - Released with the Yocto Project 1.1 Release. + * - 1.2 + - April 2012 + - Released with the Yocto Project 1.2 Release. + * - 1.3 + - October 2012 + - Released with the Yocto Project 1.3 Release. + * - 1.4 + - April 2013 + - Released with the Yocto Project 1.4 Release. + * - 1.5 + - October 2013 + - Released with the Yocto Project 1.5 Release. + * - 1.6 + - April 2014 + - Released with the Yocto Project 1.6 Release. + * - 1.7 + - October 2014 + - Released with the Yocto Project 1.7 Release. + * - 1.8 + - April 2015 + - Released with the Yocto Project 1.8 Release. + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. + diff --git a/poky/documentation/ref-manual/migration-1.3.rst b/poky/documentation/ref-manual/migration-1.3.rst new file mode 100644 index 000000000..ebbc23887 --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.3.rst @@ -0,0 +1,195 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +Moving to the Yocto Project 1.3 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.3 Release from the prior release. + +.. _1.3-local-configuration: + +Local Configuration +------------------- + +Differences include changes for +:term:`SSTATE_MIRRORS` and ``bblayers.conf``. + +.. _migration-1.3-sstate-mirrors: + +SSTATE_MIRRORS +~~~~~~~~~~~~~~ + +The shared state cache (sstate-cache), as pointed to by +:term:`SSTATE_DIR`, by default now has two-character +subdirectories to prevent issues arising from too many files in the same +directory. Also, native sstate-cache packages, which are built to run on +the host system, will go into a subdirectory named using the distro ID +string. If you copy the newly structured sstate-cache to a mirror +location (either local or remote) and then point to it in +:term:`SSTATE_MIRRORS`, you need to append "PATH" +to the end of the mirror URL so that the path used by BitBake before the +mirror substitution is appended to the path used to access the mirror. +Here is an example: :: + + SSTATE_MIRRORS = "file://.* http://someserver.tld/share/sstate/PATH" + +.. _migration-1.3-bblayers-conf: + +bblayers.conf +~~~~~~~~~~~~~ + +The ``meta-yocto`` layer consists of two parts that correspond to the +Poky reference distribution and the reference hardware Board Support +Packages (BSPs), respectively: ``meta-yocto`` and ``meta-yocto-bsp``. +When running BitBake for the first time after upgrading, your +``conf/bblayers.conf`` file will be updated to handle this change and +you will be asked to re-run or restart for the changes to take effect. + +.. _1.3-recipes: + +Recipes +------- + +Differences include changes for the following: + +.. _migration-1.3-python-function-whitespace: + +Python Function Whitespace +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +All Python functions must now use four spaces for indentation. +Previously, an inconsistent mix of spaces and tabs existed, which made +extending these functions using ``_append`` or ``_prepend`` complicated +given that Python treats whitespace as syntactically significant. If you +are defining or extending any Python functions (e.g. +``populate_packages``, ``do_unpack``, ``do_patch`` and so forth) in +custom recipes or classes, you need to ensure you are using consistent +four-space indentation. + +.. _migration-1.3-proto=-in-src-uri: + +proto= in SRC_URI +~~~~~~~~~~~~~~~~~ + +Any use of ``proto=`` in :term:`SRC_URI` needs to be +changed to ``protocol=``. In particular, this applies to the following +URIs: + +- ``svn://`` + +- ``bzr://`` + +- ``hg://`` + +- ``osc://`` + +Other URIs were already using ``protocol=``. This change improves +consistency. + +.. _migration-1.3-nativesdk: + +nativesdk +~~~~~~~~~ + +The suffix ``nativesdk`` is now implemented as a prefix, which +simplifies a lot of the packaging code for ``nativesdk`` recipes. All +custom ``nativesdk`` recipes, which are relocatable packages that are +native to :term:`SDK_ARCH`, and any references need to +be updated to use ``nativesdk-*`` instead of ``*-nativesdk``. + +.. _migration-1.3-task-recipes: + +Task Recipes +~~~~~~~~~~~~ + +"Task" recipes are now known as "Package groups" and have been renamed +from ``task-*.bb`` to ``packagegroup-*.bb``. Existing references to the +previous ``task-*`` names should work in most cases as there is an +automatic upgrade path for most packages. However, you should update +references in your own recipes and configurations as they could be +removed in future releases. You should also rename any custom ``task-*`` +recipes to ``packagegroup-*``, and change them to inherit +``packagegroup`` instead of ``task``, as well as taking the opportunity +to remove anything now handled by ``packagegroup.bbclass``, such as +providing ``-dev`` and ``-dbg`` packages, setting +:term:`LIC_FILES_CHKSUM`, and so forth. See the +":ref:`packagegroup.bbclass `" section for +further details. + +.. _migration-1.3-image-features: + +IMAGE_FEATURES +~~~~~~~~~~~~~~ + +Image recipes that previously included "apps-console-core" in +:term:`IMAGE_FEATURES` should now include "splash" +instead to enable the boot-up splash screen. Retaining +"apps-console-core" will still include the splash screen but generates a +warning. The "apps-x11-core" and "apps-x11-games" ``IMAGE_FEATURES`` +features have been removed. + +.. _migration-1.3-removed-recipes: + +Removed Recipes +~~~~~~~~~~~~~~~ + +The following recipes have been removed. For most of them, it is +unlikely that you would have any references to them in your own +:term:`Metadata`. However, you should check your metadata +against this list to be sure: + +- ``libx11-trim``: Replaced by ``libx11``, which has a negligible + size difference with modern Xorg. + +- ``xserver-xorg-lite``: Use ``xserver-xorg``, which has a negligible + size difference when DRI and GLX modules are not installed. + +- ``xserver-kdrive``: Effectively unmaintained for many years. + +- ``mesa-xlib``: No longer serves any purpose. + +- ``galago``: Replaced by telepathy. + +- ``gail``: Functionality was integrated into GTK+ 2.13. + +- ``eggdbus``: No longer needed. + +- ``gcc-*-intermediate``: The build has been restructured to avoid + the need for this step. + +- ``libgsmd``: Unmaintained for many years. Functionality now + provided by ``ofono`` instead. + +- *contacts, dates, tasks, eds-tools*: Largely unmaintained PIM + application suite. It has been moved to ``meta-gnome`` in + ``meta-openembedded``. + +In addition to the previously listed changes, the ``meta-demoapps`` +directory has also been removed because the recipes in it were not being +maintained and many had become obsolete or broken. Additionally, these +recipes were not parsed in the default configuration. Many of these +recipes are already provided in an updated and maintained form within +the OpenEmbedded community layers such as ``meta-oe`` and +``meta-gnome``. For the remainder, you can now find them in the +``meta-extras`` repository, which is in the +:yocto_git:`Source Repositories <>` at +http://git.yoctoproject.org/cgit/cgit.cgi/meta-extras/. + +.. _1.3-linux-kernel-naming: + +Linux Kernel Naming +------------------- + +The naming scheme for kernel output binaries has been changed to now +include :term:`PE` as part of the filename: +:: + + KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PE}-${PV}-${PR}-${MACHINE}-${DATETIME}" + +Because the ``PE`` variable is not set by default, these binary files +could result with names that include two dash characters. Here is an +example: :: + + bzImage--3.10.9+git0+cd502a8814_7144bcc4b8-r0-qemux86-64-20130830085431.bin + + diff --git a/poky/documentation/ref-manual/migration-1.4.rst b/poky/documentation/ref-manual/migration-1.4.rst new file mode 100644 index 000000000..a658bdff6 --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.4.rst @@ -0,0 +1,237 @@ +Moving to the Yocto Project 1.4 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.4 Release from the prior release. + +.. _migration-1.4-bitbake: + +BitBake +------- + +Differences include the following: + +- *Comment Continuation:* If a comment ends with a line continuation + (\) character, then the next line must also be a comment. Any + instance where this is not the case, now triggers a warning. You must + either remove the continuation character, or be sure the next line is + a comment. + +- *Package Name Overrides:* The runtime package specific variables + :term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, + :term:`RPROVIDES`, + :term:`RCONFLICTS`, + :term:`RREPLACES`, :term:`FILES`, + :term:`ALLOW_EMPTY`, and the pre, post, install, + and uninstall script functions ``pkg_preinst``, ``pkg_postinst``, + ``pkg_prerm``, and ``pkg_postrm`` should always have a package name + override. For example, use ``RDEPENDS_${PN}`` for the main package + instead of ``RDEPENDS``. BitBake uses more strict checks when it + parses recipes. + +.. _migration-1.4-build-behavior: + +Build Behavior +-------------- + +Differences include the following: + +- *Shared State Code:* The shared state code has been optimized to + avoid running unnecessary tasks. For example, the following no longer + populates the target sysroot since that is not necessary: + :: + + $ bitbake -c rootfs some-image + + Instead, the system just needs to extract the + output package contents, re-create the packages, and construct the + root filesystem. This change is unlikely to cause any problems unless + you have missing declared dependencies. + +- *Scanning Directory Names:* When scanning for files in + :term:`SRC_URI`, the build system now uses + :term:`FILESOVERRIDES` instead of + :term:`OVERRIDES` for the directory names. In + general, the values previously in ``OVERRIDES`` are now in + ``FILESOVERRIDES`` as well. However, if you relied upon an additional + value you previously added to ``OVERRIDES``, you might now need to + add it to ``FILESOVERRIDES`` unless you are already adding it through + the :term:`MACHINEOVERRIDES` or + :term:`DISTROOVERRIDES` variables, as + appropriate. For more related changes, see the + "`Variables <#migration-1.4-variables>`__" section. + +.. _migration-1.4-proxies-and-fetching-source: + +Proxies and Fetching Source +--------------------------- + +A new ``oe-git-proxy`` script has been added to replace previous methods +of handling proxies and fetching source from Git. See the +``meta-yocto/conf/site.conf.sample`` file for information on how to use +this script. + +.. _migration-1.4-custom-interfaces-file-netbase-change: + +Custom Interfaces File (netbase change) +--------------------------------------- + +If you have created your own custom ``etc/network/interfaces`` file by +creating an append file for the ``netbase`` recipe, you now need to +create an append file for the ``init-ifupdown`` recipe instead, which +you can find in the :term:`Source Directory` at +``meta/recipes-core/init-ifupdown``. For information on how to use +append files, see the +":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-1.4-remote-debugging: + +Remote Debugging +---------------- + +Support for remote debugging with the Eclipse IDE is now separated into +an image feature (``eclipse-debug``) that corresponds to the +``packagegroup-core-eclipse-debug`` package group. Previously, the +debugging feature was included through the ``tools-debug`` image +feature, which corresponds to the ``packagegroup-core-tools-debug`` +package group. + +.. _migration-1.4-variables: + +Variables +--------- + +The following variables have changed: + +- ``SANITY_TESTED_DISTROS``: This variable now uses a distribution + ID, which is composed of the host distributor ID followed by the + release. Previously, + :term:`SANITY_TESTED_DISTROS` was + composed of the description field. For example, "Ubuntu 12.10" + becomes "Ubuntu-12.10". You do not need to worry about this change if + you are not specifically setting this variable, or if you are + specifically setting it to "". + +- ``SRC_URI``: The ``${``\ :term:`PN`\ ``}``, + ``${``\ :term:`PF`\ ``}``, + ``${``\ :term:`P`\ ``}``, and ``FILE_DIRNAME`` directories + have been dropped from the default value of the + :term:`FILESPATH` variable, which is used as the + search path for finding files referred to in + :term:`SRC_URI`. If you have a recipe that relied upon + these directories, which would be unusual, then you will need to add + the appropriate paths within the recipe or, alternatively, rearrange + the files. The most common locations are still covered by ``${BP}``, + ``${BPN}``, and "files", which all remain in the default value of + :term:`FILESPATH`. + +.. _migration-target-package-management-with-rpm: + +Target Package Management with RPM +---------------------------------- + +If runtime package management is enabled and the RPM backend is +selected, Smart is now installed for package download, dependency +resolution, and upgrades instead of Zypper. For more information on how +to use Smart, run the following command on the target: +:: + + smart --help + +.. _migration-1.4-recipes-moved: + +Recipes Moved +------------- + +The following recipes were moved from their previous locations because +they are no longer used by anything in the OpenEmbedded-Core: + +- ``clutter-box2d``: Now resides in the ``meta-oe`` layer. + +- ``evolution-data-server``: Now resides in the ``meta-gnome`` layer. + +- ``gthumb``: Now resides in the ``meta-gnome`` layer. + +- ``gtkhtml2``: Now resides in the ``meta-oe`` layer. + +- ``gupnp``: Now resides in the ``meta-multimedia`` layer. + +- ``gypsy``: Now resides in the ``meta-oe`` layer. + +- ``libcanberra``: Now resides in the ``meta-gnome`` layer. + +- ``libgdata``: Now resides in the ``meta-gnome`` layer. + +- ``libmusicbrainz``: Now resides in the ``meta-multimedia`` layer. + +- ``metacity``: Now resides in the ``meta-gnome`` layer. + +- ``polkit``: Now resides in the ``meta-oe`` layer. + +- ``zeroconf``: Now resides in the ``meta-networking`` layer. + +.. _migration-1.4-removals-and-renames: + +Removals and Renames +-------------------- + +The following list shows what has been removed or renamed: + +- ``evieext``: Removed because it has been removed from ``xserver`` + since 2008. + +- *Gtk+ DirectFB:* Removed support because upstream Gtk+ no longer + supports it as of version 2.18. + +- ``libxfontcache / xfontcacheproto``: Removed because they were + removed from the Xorg server in 2008. + +- ``libxp / libxprintapputil / libxprintutil / printproto``: Removed + because the XPrint server was removed from Xorg in 2008. + +- ``libxtrap / xtrapproto``: Removed because their functionality was + broken upstream. + +- *linux-yocto 3.0 kernel:* Removed with linux-yocto 3.8 kernel being + added. The linux-yocto 3.2 and linux-yocto 3.4 kernels remain as part + of the release. + +- ``lsbsetup``: Removed with functionality now provided by + ``lsbtest``. + +- ``matchbox-stroke``: Removed because it was never more than a + proof-of-concept. + +- ``matchbox-wm-2 / matchbox-theme-sato-2``: Removed because they are + not maintained. However, ``matchbox-wm`` and ``matchbox-theme-sato`` + are still provided. + +- ``mesa-dri``: Renamed to ``mesa``. + +- ``mesa-xlib``: Removed because it was no longer useful. + +- ``mutter``: Removed because nothing ever uses it and the recipe is + very old. + +- ``orinoco-conf``: Removed because it has become obsolete. + +- ``update-modules``: Removed because it is no longer used. The + kernel module ``postinstall`` and ``postrm`` scripts can now do the + same task without the use of this script. + +- ``web``: Removed because it is not maintained. Superseded by + ``web-webkit``. + +- ``xf86bigfontproto``: Removed because upstream it has been disabled + by default since 2007. Nothing uses ``xf86bigfontproto``. + +- ``xf86rushproto``: Removed because its dependency in ``xserver`` + was spurious and it was removed in 2005. + +- ``zypper / libzypp / sat-solver``: Removed and been functionally + replaced with Smart (``python-smartpm``) when RPM packaging is used + and package management is enabled on the target. + diff --git a/poky/documentation/ref-manual/migration-1.5.rst b/poky/documentation/ref-manual/migration-1.5.rst new file mode 100644 index 000000000..ce55199df --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.5.rst @@ -0,0 +1,355 @@ +Moving to the Yocto Project 1.5 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.5 Release from the prior release. + +.. _migration-1.5-host-dependency-changes: + +Host Dependency Changes +----------------------- + +The OpenEmbedded build system now has some additional requirements on +the host system: + +- Python 2.7.3+ + +- Tar 1.24+ + +- Git 1.7.8+ + +- Patched version of Make if you are using 3.82. Most distributions + that provide Make 3.82 use the patched version. + +If the Linux distribution you are using on your build host does not +provide packages for these, you can install and use the Buildtools +tarball, which provides an SDK-like environment containing them. + +For more information on this requirement, see the "`Required Git, tar, +Python and gcc Versions <#required-git-tar-python-and-gcc-versions>`__" +section. + +.. _migration-1.5-atom-pc-bsp: + +``atom-pc`` Board Support Package (BSP) +--------------------------------------- + +The ``atom-pc`` hardware reference BSP has been replaced by a +``genericx86`` BSP. This BSP is not necessarily guaranteed to work on +all x86 hardware, but it will run on a wider range of systems than the +``atom-pc`` did. + +.. note:: + + Additionally, a + genericx86-64 + BSP has been added for 64-bit Atom systems. + +.. _migration-1.5-bitbake: + +BitBake +------- + +The following changes have been made that relate to BitBake: + +- BitBake now supports a ``_remove`` operator. The addition of this + operator means you will have to rename any items in recipe space + (functions, variables) whose names currently contain ``_remove_`` or + end with ``_remove`` to avoid unexpected behavior. + +- BitBake's global method pool has been removed. This method is not + particularly useful and led to clashes between recipes containing + functions that had the same name. + +- The "none" server backend has been removed. The "process" server + backend has been serving well as the default for a long time now. + +- The ``bitbake-runtask`` script has been removed. + +- ``${``\ :term:`P`\ ``}`` and + ``${``\ :term:`PF`\ ``}`` are no longer added to + :term:`PROVIDES` by default in ``bitbake.conf``. + These version-specific ``PROVIDES`` items were seldom used. + Attempting to use them could result in two versions being built + simultaneously rather than just one version due to the way BitBake + resolves dependencies. + +.. _migration-1.5-qa-warnings: + +QA Warnings +----------- + +The following changes have been made to the package QA checks: + +- If you have customized :term:`ERROR_QA` or + :term:`WARN_QA` values in your configuration, check + that they contain all of the issues that you wish to be reported. + Previous Yocto Project versions contained a bug that meant that any + item not mentioned in ``ERROR_QA`` or ``WARN_QA`` would be treated as + a warning. Consequently, several important items were not already in + the default value of ``WARN_QA``. All of the possible QA checks are + now documented in the ":ref:`insane.bbclass `" + section. + +- An additional QA check has been added to check if + ``/usr/share/info/dir`` is being installed. Your recipe should delete + this file within :ref:`ref-tasks-install` if "make + install" is installing it. + +- If you are using the buildhistory class, the check for the package + version going backwards is now controlled using a standard QA check. + Thus, if you have customized your ``ERROR_QA`` or ``WARN_QA`` values + and still wish to have this check performed, you should add + "version-going-backwards" to your value for one or the other + variables depending on how you wish it to be handled. See the + documented QA checks in the + ":ref:`insane.bbclass `" section. + +.. _migration-1.5-directory-layout-changes: + +Directory Layout Changes +------------------------ + +The following directory changes exist: + +- Output SDK installer files are now named to include the image name + and tuning architecture through the :term:`SDK_NAME` + variable. + +- Images and related files are now installed into a directory that is + specific to the machine, instead of a parent directory containing + output files for multiple machines. The + :term:`DEPLOY_DIR_IMAGE` variable continues + to point to the directory containing images for the current + :term:`MACHINE` and should be used anywhere there is a + need to refer to this directory. The ``runqemu`` script now uses this + variable to find images and kernel binaries and will use BitBake to + determine the directory. Alternatively, you can set the + ``DEPLOY_DIR_IMAGE`` variable in the external environment. + +- When buildhistory is enabled, its output is now written under the + :term:`Build Directory` rather than + :term:`TMPDIR`. Doing so makes it easier to delete + ``TMPDIR`` and preserve the build history. Additionally, data for + produced SDKs is now split by :term:`IMAGE_NAME`. + +- The ``pkgdata`` directory produced as part of the packaging process + has been collapsed into a single machine-specific directory. This + directory is located under ``sysroots`` and uses a machine-specific + name (i.e. ``tmp/sysroots/machine/pkgdata``). + +.. _migration-1.5-shortened-git-srcrev-values: + +Shortened Git ``SRCREV`` Values +------------------------------- + +BitBake will now shorten revisions from Git repositories from the normal +40 characters down to 10 characters within :term:`SRCPV` +for improved usability in path and file names. This change should be +safe within contexts where these revisions are used because the chances +of spatially close collisions is very low. Distant collisions are not a +major issue in the way the values are used. + +.. _migration-1.5-image-features: + +``IMAGE_FEATURES`` +------------------ + +The following changes have been made that relate to +:term:`IMAGE_FEATURES`: + +- The value of ``IMAGE_FEATURES`` is now validated to ensure invalid + feature items are not added. Some users mistakenly add package names + to this variable instead of using + :term:`IMAGE_INSTALL` in order to have the + package added to the image, which does not work. This change is + intended to catch those kinds of situations. Valid ``IMAGE_FEATURES`` + are drawn from ``PACKAGE_GROUP`` definitions, + :term:`COMPLEMENTARY_GLOB` and a new + "validitems" varflag on ``IMAGE_FEATURES``. The "validitems" varflag + change allows additional features to be added if they are not + provided using the previous two mechanisms. + +- The previously deprecated "apps-console-core" ``IMAGE_FEATURES`` item + is no longer supported. Add "splash" to ``IMAGE_FEATURES`` if you + wish to have the splash screen enabled, since this is all that + apps-console-core was doing. + +.. _migration-1.5-run: + +``/run`` +-------- + +The ``/run`` directory from the Filesystem Hierarchy Standard 3.0 has +been introduced. You can find some of the implications for this change +`here `__. +The change also means that recipes that install files to ``/var/run`` +must be changed. You can find a guide on how to make these changes +`here `__. + +.. _migration-1.5-removal-of-package-manager-database-within-image-recipes: + +Removal of Package Manager Database Within Image Recipes +-------------------------------------------------------- + +The image ``core-image-minimal`` no longer adds +``remove_packaging_data_files`` to +:term:`ROOTFS_POSTPROCESS_COMMAND`. +This addition is now handled automatically when "package-management" is +not in :term:`IMAGE_FEATURES`. If you have custom +image recipes that make this addition, you should remove the lines, as +they are not needed and might interfere with correct operation of +postinstall scripts. + +.. _migration-1.5-images-now-rebuild-only-on-changes-instead-of-every-time: + +Images Now Rebuild Only on Changes Instead of Every Time +-------------------------------------------------------- + +The :ref:`ref-tasks-rootfs` and other related image +construction tasks are no longer marked as "nostamp". Consequently, they +will only be re-executed when their inputs have changed. Previous +versions of the OpenEmbedded build system always rebuilt the image when +requested rather when necessary. + +.. _migration-1.5-task-recipes: + +Task Recipes +------------ + +The previously deprecated ``task.bbclass`` has now been dropped. For +recipes that previously inherited from this class, you should rename +them from ``task-*`` to ``packagegroup-*`` and inherit packagegroup +instead. + +For more information, see the +":ref:`packagegroup.bbclass `" section. + +.. _migration-1.5-busybox: + +BusyBox +------- + +By default, we now split BusyBox into two binaries: one that is suid +root for those components that need it, and another for the rest of the +components. Splitting BusyBox allows for optimization that eliminates +the ``tinylogin`` recipe as recommended by upstream. You can disable +this split by setting +:term:`BUSYBOX_SPLIT_SUID` to "0". + +.. _migration-1.5-automated-image-testing: + +Automated Image Testing +----------------------- + +A new automated image testing framework has been added through the +:ref:`testimage.bbclass ` class. This +framework replaces the older ``imagetest-qemu`` framework. + +You can learn more about performing automated image tests in the +":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-1.5-build-history: + +Build History +------------- + +Following are changes to Build History: + +- Installed package sizes: ``installed-package-sizes.txt`` for an image + now records the size of the files installed by each package instead + of the size of each compressed package archive file. + +- The dependency graphs (``depends*.dot``) now use the actual package + names instead of replacing dashes, dots and plus signs with + underscores. + +- The ``buildhistory-diff`` and ``buildhistory-collect-srcrevs`` + utilities have improved command-line handling. Use the ``--help`` + option for each utility for more information on the new syntax. + +For more information on Build History, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining build output quality`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-1.5-udev: + +``udev`` +-------- + +Following are changes to ``udev``: + +- ``udev`` no longer brings in ``udev-extraconf`` automatically through + :term:`RRECOMMENDS`, since this was originally + intended to be optional. If you need the extra rules, then add + ``udev-extraconf`` to your image. + +- ``udev`` no longer brings in ``pciutils-ids`` or ``usbutils-ids`` + through ``RRECOMMENDS``. These are not needed by ``udev`` itself and + removing them saves around 350KB. + +.. _migration-1.5-removed-renamed-recipes: + +Removed and Renamed Recipes +--------------------------- + +- The ``linux-yocto`` 3.2 kernel has been removed. + +- ``libtool-nativesdk`` has been renamed to ``nativesdk-libtool``. + +- ``tinylogin`` has been removed. It has been replaced by a suid + portion of Busybox. See the "`BusyBox <#migration-1.5-busybox>`__" + section for more information. + +- ``external-python-tarball`` has been renamed to + ``buildtools-tarball``. + +- ``web-webkit`` has been removed. It has been functionally replaced by + ``midori``. + +- ``imake`` has been removed. It is no longer needed by any other + recipe. + +- ``transfig-native`` has been removed. It is no longer needed by any + other recipe. + +- ``anjuta-remote-run`` has been removed. Anjuta IDE integration has + not been officially supported for several releases. + +.. _migration-1.5-other-changes: + +Other Changes +------------- + +Following is a list of short entries describing other changes: + +- ``run-postinsts``: Make this generic. + +- ``base-files``: Remove the unnecessary ``media/``\ xxx directories. + +- ``alsa-state``: Provide an empty ``asound.conf`` by default. + +- ``classes/image``: Ensure + :term:`BAD_RECOMMENDATIONS` supports + pre-renamed package names. + +- ``classes/rootfs_rpm``: Implement ``BAD_RECOMMENDATIONS`` for RPM. + +- ``systemd``: Remove ``systemd_unitdir`` if ``systemd`` is not in + :term:`DISTRO_FEATURES`. + +- ``systemd``: Remove ``init.d`` dir if ``systemd`` unit file is + present and ``sysvinit`` is not a distro feature. + +- ``libpam``: Deny all services for the ``OTHER`` entries. + +- ``image.bbclass``: Move ``runtime_mapping_rename`` to avoid conflict + with ``multilib``. See + `YOCTO #4993 `_ + in Bugzilla for more information. + +- ``linux-dtb``: Use kernel build system to generate the ``dtb`` files. + +- ``kern-tools``: Switch from guilt to new ``kgit-s2q`` tool. + diff --git a/poky/documentation/ref-manual/migration-1.6.rst b/poky/documentation/ref-manual/migration-1.6.rst new file mode 100644 index 000000000..b55be46e5 --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.6.rst @@ -0,0 +1,417 @@ +Moving to the Yocto Project 1.6 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.6 Release from the prior release. + +.. _migration-1.6-archiver-class: + +``archiver`` Class +------------------ + +The :ref:`archiver ` class has been rewritten +and its configuration has been simplified. For more details on the +source archiver, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-1.6-packaging-changes: + +Packaging Changes +----------------- + +The following packaging changes have been made: + +- The ``binutils`` recipe no longer produces a ``binutils-symlinks`` + package. ``update-alternatives`` is now used to handle the preferred + ``binutils`` variant on the target instead. + +- The tc (traffic control) utilities have been split out of the main + ``iproute2`` package and put into the ``iproute2-tc`` package. + +- The ``gtk-engines`` schemas have been moved to a dedicated + ``gtk-engines-schemas`` package. + +- The ``armv7a`` with thumb package architecture suffix has changed. + The suffix for these packages with the thumb optimization enabled is + "t2" as it should be. Use of this suffix was not the case in the 1.5 + release. Architecture names will change within package feeds as a + result. + +.. _migration-1.6-bitbake: + +BitBake +------- + +The following changes have been made to :term:`BitBake`. + +.. _migration-1.6-matching-branch-requirement-for-git-fetching: + +Matching Branch Requirement for Git Fetching +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +When fetching source from a Git repository using +:term:`SRC_URI`, BitBake will now validate the +:term:`SRCREV` value against the branch. You can specify +the branch using the following form: SRC_URI = +"git://server.name/repository;branch=branchname" If you do not specify a +branch, BitBake looks in the default "master" branch. + +Alternatively, if you need to bypass this check (e.g. if you are +fetching a revision corresponding to a tag that is not on any branch), +you can add ";nobranch=1" to the end of the URL within ``SRC_URI``. + +.. _migration-1.6-bitbake-deps: + +Python Definition substitutions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BitBake had some previously deprecated Python definitions within its +``bb`` module removed. You should use their sub-module counterparts +instead: + +- ``bb.MalformedUrl``: Use ``bb.fetch.MalformedUrl``. + +- ``bb.encodeurl``: Use ``bb.fetch.encodeurl``. + +- ``bb.decodeurl``: Use ``bb.fetch.decodeurl`` + +- ``bb.mkdirhier``: Use ``bb.utils.mkdirhier``. + +- ``bb.movefile``: Use ``bb.utils.movefile``. + +- ``bb.copyfile``: Use ``bb.utils.copyfile``. + +- ``bb.which``: Use ``bb.utils.which``. + +- ``bb.vercmp_string``: Use ``bb.utils.vercmp_string``. + +- ``bb.vercmp``: Use ``bb.utils.vercmp``. + +.. _migration-1.6-bitbake-fetcher: + +SVK Fetcher +~~~~~~~~~~~ + +The SVK fetcher has been removed from BitBake. + +.. _migration-1.6-bitbake-console-output: + +Console Output Error Redirection +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The BitBake console UI will now output errors to ``stderr`` instead of +``stdout``. Consequently, if you are piping or redirecting the output of +``bitbake`` to somewhere else, and you wish to retain the errors, you +will need to add ``2>&1`` (or something similar) to the end of your +``bitbake`` command line. + +.. _migration-1.6-task-taskname-overrides: + +``task-``\ taskname Overrides +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``task-``\ taskname overrides have been adjusted so that tasks whose +names contain underscores have the underscores replaced by hyphens for +the override so that they now function properly. For example, the task +override for :ref:`ref-tasks-populate_sdk` is +``task-populate-sdk``. + +.. _migration-1.6-variable-changes: + +Changes to Variables +-------------------- + +The following variables have changed. For information on the +OpenEmbedded build system variables, see the "`Variables +Glossary <#ref-variables-glos>`__" Chapter. + +.. _migration-1.6-variable-changes-TMPDIR: + +``TMPDIR`` +~~~~~~~~~~ + +:term:`TMPDIR` can no longer be on an NFS mount. NFS does +not offer full POSIX locking and inode consistency and can cause +unexpected issues if used to store ``TMPDIR``. + +The check for this occurs on startup. If ``TMPDIR`` is detected on an +NFS mount, an error occurs. + +.. _migration-1.6-variable-changes-PRINC: + +``PRINC`` +~~~~~~~~~ + +The ``PRINC`` variable has been deprecated and triggers a warning if +detected during a build. For :term:`PR` increments on changes, +use the PR service instead. You can find out more about this service in +the ":ref:`dev-manual/dev-manual-common-tasks:working with a pr service`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-1.6-variable-changes-IMAGE_TYPES: + +``IMAGE_TYPES`` +~~~~~~~~~~~~~~~ + +The "sum.jffs2" option for :term:`IMAGE_TYPES` has +been replaced by the "jffs2.sum" option, which fits the processing +order. + +.. _migration-1.6-variable-changes-COPY_LIC_MANIFEST: + +``COPY_LIC_MANIFEST`` +~~~~~~~~~~~~~~~~~~~~~ + +The :term:`COPY_LIC_MANIFEST` variable must now +be set to "1" rather than any value in order to enable it. + +.. _migration-1.6-variable-changes-COPY_LIC_DIRS: + +``COPY_LIC_DIRS`` +~~~~~~~~~~~~~~~~~ + +The :term:`COPY_LIC_DIRS` variable must now be set +to "1" rather than any value in order to enable it. + +.. _migration-1.6-variable-changes-PACKAGE_GROUP: + +``PACKAGE_GROUP`` +~~~~~~~~~~~~~~~~~ + +The ``PACKAGE_GROUP`` variable has been renamed to +:term:`FEATURE_PACKAGES` to more accurately +reflect its purpose. You can still use ``PACKAGE_GROUP`` but the +OpenEmbedded build system produces a warning message when it encounters +the variable. + +.. _migration-1.6-variable-changes-variable-entry-behavior: + +Preprocess and Post Process Command Variable Behavior +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following variables now expect a semicolon separated list of +functions to call and not arbitrary shell commands: + + - :term:`ROOTFS_PREPROCESS_COMMAND` + - :term:`ROOTFS_POSTPROCESS_COMMAND` + - :term:`SDK_POSTPROCESS_COMMAND` + - :term:`POPULATE_SDK_POST_TARGET_COMMAND` + - :term:`POPULATE_SDK_POST_HOST_COMMAND` + - :term:`IMAGE_POSTPROCESS_COMMAND` + - :term:`IMAGE_PREPROCESS_COMMAND` + - :term:`ROOTFS_POSTUNINSTALL_COMMAND` + - :term:`ROOTFS_POSTINSTALL_COMMAND` + +For +migration purposes, you can simply wrap shell commands in a shell +function and then call the function. Here is an example: :: + + my_postprocess_function() { + echo "hello" > ${IMAGE_ROOTFS}/hello.txt + } + ROOTFS_POSTPROCESS_COMMAND += "my_postprocess_function; " + +.. _migration-1.6-package-test-ptest: + +Package Test (ptest) +-------------------- + +Package Tests (ptest) are built but not installed by default. For +information on using Package Tests, see the +":ref:`dev-manual/dev-manual-common-tasks:testing packages with ptest`" +section in the Yocto Project Development Tasks Manual. For information on the +``ptest`` class, see the ":ref:`ptest.bbclass `" +section. + +.. _migration-1.6-build-changes: + +Build Changes +------------- + +Separate build and source directories have been enabled by default for +selected recipes where it is known to work (a whitelist) and for all +recipes that inherit the :ref:`cmake ` class. In +future releases the :ref:`autotools ` class +will enable a separate build directory by default as well. Recipes +building Autotools-based software that fails to build with a separate +build directory should be changed to inherit from the +:ref:`autotools-brokensep ` class instead of +the ``autotools`` or ``autotools_stage``\ classes. + +.. _migration-1.6-building-qemu-native: + +``qemu-native`` +--------------- + +``qemu-native`` now builds without SDL-based graphical output support by +default. The following additional lines are needed in your +``local.conf`` to enable it: +:: + + PACKAGECONFIG_pn-qemu-native = "sdl" + ASSUME_PROVIDED += "libsdl-native" + +.. note:: + + The default + local.conf + contains these statements. Consequently, if you are building a + headless system and using a default + local.conf + file, you will need comment these two lines out. + +.. _migration-1.6-core-image-basic: + +``core-image-basic`` +-------------------- + +``core-image-basic`` has been renamed to ``core-image-full-cmdline``. + +In addition to ``core-image-basic`` being renamed, +``packagegroup-core-basic`` has been renamed to +``packagegroup-core-full-cmdline`` to match. + +.. _migration-1.6-licensing: + +Licensing +--------- + +The top-level ``LICENSE`` file has been changed to better describe the +license of the various components of :term:`OpenEmbedded-Core (OE-Core)`. However, +the licensing itself remains unchanged. + +Normally, this change would not cause any side-effects. However, some +recipes point to this file within +:term:`LIC_FILES_CHKSUM` (as +``${COREBASE}/LICENSE``) and thus the accompanying checksum must be +changed from 3f40d7994397109285ec7b81fdeb3b58 to +4d92cd373abda3937c2bc47fbc49d690. A better alternative is to have +``LIC_FILES_CHKSUM`` point to a file describing the license that is +distributed with the source that the recipe is building, if possible, +rather than pointing to ``${COREBASE}/LICENSE``. + +.. _migration-1.6-cflags-options: + +``CFLAGS`` Options +------------------ + +The "-fpermissive" option has been removed from the default +:term:`CFLAGS` value. You need to take action on +individual recipes that fail when building with this option. You need to +either patch the recipes to fix the issues reported by the compiler, or +you need to add "-fpermissive" to ``CFLAGS`` in the recipes. + +.. _migration-1.6-custom-images: + +Custom Image Output Types +------------------------- + +Custom image output types, as selected using +:term:`IMAGE_FSTYPES`, must declare their +dependencies on other image types (if any) using a new +:term:`IMAGE_TYPEDEP` variable. + +.. _migration-1.6-do-package-write-task: + +Tasks +----- + +The ``do_package_write`` task has been removed. The task is no longer +needed. + +.. _migration-1.6-update-alternatives-provider: + +``update-alternative`` Provider +------------------------------- + +The default ``update-alternatives`` provider has been changed from +``opkg`` to ``opkg-utils``. This change resolves some troublesome +circular dependencies. The runtime package has also been renamed from +``update-alternatives-cworth`` to ``update-alternatives-opkg``. + +.. _migration-1.6-virtclass-overrides: + +``virtclass`` Overrides +----------------------- + +The ``virtclass`` overrides are now deprecated. Use the equivalent class +overrides instead (e.g. ``virtclass-native`` becomes ``class-native``.) + +.. _migration-1.6-removed-renamed-recipes: + +Removed and Renamed Recipes +--------------------------- + +The following recipes have been removed: + +- ``packagegroup-toolset-native`` - This recipe is largely unused. + +- ``linux-yocto-3.8`` - Support for the Linux yocto 3.8 kernel has been + dropped. Support for the 3.10 and 3.14 kernels have been added with + the ``linux-yocto-3.10`` and ``linux-yocto-3.14`` recipes. + +- ``ocf-linux`` - This recipe has been functionally replaced using + ``cryptodev-linux``. + +- ``genext2fs`` - ``genext2fs`` is no longer used by the build system + and is unmaintained upstream. + +- ``js`` - This provided an ancient version of Mozilla's javascript + engine that is no longer needed. + +- ``zaurusd`` - The recipe has been moved to the ``meta-handheld`` + layer. + +- ``eglibc 2.17`` - Replaced by the ``eglibc 2.19`` recipe. + +- ``gcc 4.7.2`` - Replaced by the now stable ``gcc 4.8.2``. + +- ``external-sourcery-toolchain`` - this recipe is now maintained in + the ``meta-sourcery`` layer. + +- ``linux-libc-headers-yocto 3.4+git`` - Now using version 3.10 of the + ``linux-libc-headers`` by default. + +- ``meta-toolchain-gmae`` - This recipe is obsolete. + +- ``packagegroup-core-sdk-gmae`` - This recipe is obsolete. + +- ``packagegroup-core-standalone-gmae-sdk-target`` - This recipe is + obsolete. + +.. _migration-1.6-removed-classes: + +Removed Classes +--------------- + +The following classes have become obsolete and have been removed: + +- ``module_strip`` + +- ``pkg_metainfo`` + +- ``pkg_distribute`` + +- ``image-empty`` + +.. _migration-1.6-reference-bsps: + +Reference Board Support Packages (BSPs) +--------------------------------------- + +The following reference BSPs changes occurred: + +- The BeagleBoard (``beagleboard``) ARM reference hardware has been + replaced by the BeagleBone (``beaglebone``) hardware. + +- The RouterStation Pro (``routerstationpro``) MIPS reference hardware + has been replaced by the EdgeRouter Lite (``edgerouter``) hardware. + +The previous reference BSPs for the ``beagleboard`` and +``routerstationpro`` machines are still available in a new +``meta-yocto-bsp-old`` layer in the +:yocto_git:`Source Repositories <>` at +http://git.yoctoproject.org/cgit/cgit.cgi/meta-yocto-bsp-old/. + + diff --git a/poky/documentation/ref-manual/migration-1.7.rst b/poky/documentation/ref-manual/migration-1.7.rst new file mode 100644 index 000000000..82fd37d3a --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.7.rst @@ -0,0 +1,225 @@ +Moving to the Yocto Project 1.7 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.7 Release from the prior release. + +.. _migration-1.7-changes-to-setting-qemu-packageconfig-options: + +Changes to Setting QEMU ``PACKAGECONFIG`` Options in ``local.conf`` +------------------------------------------------------------------- + +The QEMU recipe now uses a number of +:term:`PACKAGECONFIG` options to enable various +optional features. The method used to set defaults for these options +means that existing ``local.conf`` files will need to be be modified to +append to ``PACKAGECONFIG`` for ``qemu-native`` and ``nativesdk-qemu`` +instead of setting it. In other words, to enable graphical output for +QEMU, you should now have these lines in ``local.conf``: +:: + + PACKAGECONFIG_append_pn-qemu-native = " sdl" + PACKAGECONFIG_append_pn-nativesdk-qemu = " sdl" + +.. _migration-1.7-minimum-git-version: + +Minimum Git version +------------------- + +The minimum :ref:`overview-manual/overview-manual-development-environment:git` +version required on the +build host is now 1.7.8 because the ``--list`` option is now required by +BitBake's Git fetcher. As always, if your host distribution does not +provide a version of Git that meets this requirement, you can use the +``buildtools-tarball`` that does. See the "`Required Git, tar, Python +and gcc Versions <#required-git-tar-python-and-gcc-versions>`__" section +for more information. + +.. _migration-1.7-autotools-class-changes: + +Autotools Class Changes +----------------------- + +The following :ref:`autotools ` class changes +occurred: + +- *A separate build directory is now used by default:* The + ``autotools`` class has been changed to use a directory for building + (:term:`B`), which is separate from the source directory + (:term:`S`). This is commonly referred to as ``B != S``, or + an out-of-tree build. + + If the software being built is already capable of building in a + directory separate from the source, you do not need to do anything. + However, if the software is not capable of being built in this + manner, you will need to either patch the software so that it can + build separately, or you will need to change the recipe to inherit + the :ref:`autotools-brokensep ` class + instead of the ``autotools`` or ``autotools_stage`` classes. + +- The ``--foreign`` option is no longer passed to ``automake`` when + running ``autoconf``: This option tells ``automake`` that a + particular software package does not follow the GNU standards and + therefore should not be expected to distribute certain files such as + ``ChangeLog``, ``AUTHORS``, and so forth. Because the majority of + upstream software packages already tell ``automake`` to enable + foreign mode themselves, the option is mostly superfluous. However, + some recipes will need patches for this change. You can easily make + the change by patching ``configure.ac`` so that it passes "foreign" + to ``AM_INIT_AUTOMAKE()``. See `this + commit `__ + for an example showing how to make the patch. + +.. _migration-1.7-binary-configuration-scripts-disabled: + +Binary Configuration Scripts Disabled +------------------------------------- + +Some of the core recipes that package binary configuration scripts now +disable the scripts due to the scripts previously requiring error-prone +path substitution. Software that links against these libraries using +these scripts should use the much more robust ``pkg-config`` instead. +The list of recipes changed in this version (and their configuration +scripts) is as follows: +:: + + directfb (directfb-config) + freetype (freetype-config) + gpgme (gpgme-config) + libassuan (libassuan-config) + libcroco (croco-6.0-config) + libgcrypt (libgcrypt-config) + libgpg-error (gpg-error-config) + libksba (ksba-config) + libpcap (pcap-config) + libpcre (pcre-config) + libpng (libpng-config, libpng16-config) + libsdl (sdl-config) + libusb-compat (libusb-config) + libxml2 (xml2-config) + libxslt (xslt-config) + ncurses (ncurses-config) + neon (neon-config) + npth (npth-config) + pth (pth-config) + taglib (taglib-config) + +Additionally, support for ``pkg-config`` has been added to some recipes in the +previous list in the rare cases where the upstream software package does +not already provide it. + +.. _migration-1.7-glibc-replaces-eglibc: + +``eglibc 2.19`` Replaced with ``glibc 2.20`` +-------------------------------------------- + +Because ``eglibc`` and ``glibc`` were already fairly close, this +replacement should not require any significant changes to other software +that links to ``eglibc``. However, there were a number of minor changes +in ``glibc 2.20`` upstream that could require patching some software +(e.g. the removal of the ``_BSD_SOURCE`` feature test macro). + +``glibc 2.20`` requires version 2.6.32 or greater of the Linux kernel. +Thus, older kernels will no longer be usable in conjunction with it. + +For full details on the changes in ``glibc 2.20``, see the upstream +release notes +`here `__. + +.. _migration-1.7-kernel-module-autoloading: + +Kernel Module Autoloading +------------------------- + +The :term:`module_autoload_* ` variable is now +deprecated and a new +:term:`KERNEL_MODULE_AUTOLOAD` variable +should be used instead. Also, :term:`module_conf_* ` +must now be used in conjunction with a new +:term:`KERNEL_MODULE_PROBECONF` variable. +The new variables no longer require you to specify the module name as +part of the variable name. This change not only simplifies usage but +also allows the values of these variables to be appropriately +incorporated into task signatures and thus trigger the appropriate tasks +to re-execute when changed. You should replace any references to +``module_autoload_*`` with ``KERNEL_MODULE_AUTOLOAD``, and add any +modules for which ``module_conf_*`` is specified to +``KERNEL_MODULE_PROBECONF``. + +.. _migration-1.7-qa-check-changes: + +QA Check Changes +---------------- + +The following changes have occurred to the QA check process: + +- Additional QA checks ``file-rdeps`` and ``build-deps`` have been + added in order to verify that file dependencies are satisfied (e.g. + package contains a script requiring ``/bin/bash``) and build-time + dependencies are declared, respectively. For more information, please + see the "`QA Error and Warning Messages <#ref-qa-checks>`__" chapter. + +- Package QA checks are now performed during a new + :ref:`ref-tasks-package_qa` task rather than being + part of the :ref:`ref-tasks-package` task. This allows + more parallel execution. This change is unlikely to be an issue + except for highly customized recipes that disable packaging tasks + themselves by marking them as ``noexec``. For those packages, you + will need to disable the ``do_package_qa`` task as well. + +- Files being overwritten during the + :ref:`ref-tasks-populate_sysroot` task now + trigger an error instead of a warning. Recipes should not be + overwriting files written to the sysroot by other recipes. If you + have these types of recipes, you need to alter them so that they do + not overwrite these files. + + You might now receive this error after changes in configuration or + metadata resulting in orphaned files being left in the sysroot. If + you do receive this error, the way to resolve the issue is to delete + your :term:`TMPDIR` or to move it out of the way and + then re-start the build. Anything that has been fully built up to + that point and does not need rebuilding will be restored from the + shared state cache and the rest of the build will be able to proceed + as normal. + +.. _migration-1.7-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``x-load``: This recipe has been superseded by U-boot SPL for all + Cortex-based TI SoCs. For legacy boards, the ``meta-ti`` layer, which + contains a maintained recipe, should be used instead. + +- ``ubootchart``: This recipe is obsolete. A ``bootchart2`` recipe has + been added to functionally replace it. + +- ``linux-yocto 3.4``: Support for the linux-yocto 3.4 kernel has been + dropped. Support for the 3.10 and 3.14 kernels remains, while support + for version 3.17 has been added. + +- ``eglibc`` has been removed in favor of ``glibc``. See the + "```eglibc 2.19`` Replaced with + ``glibc 2.20`` <#migration-1.7-glibc-replaces-eglibc>`__" section for + more information. + +.. _migration-1.7-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous change occurred: + +- The build history feature now writes ``build-id.txt`` instead of + ``build-id``. Additionally, ``build-id.txt`` now contains the full + build header as printed by BitBake upon starting the build. You + should manually remove old "build-id" files from your existing build + history repositories to avoid confusion. For information on the build + history feature, see the + ":ref:`dev-manual/dev-manual-common-tasks:maintaining build output quality`" + section in the Yocto Project Development Tasks Manual. + + diff --git a/poky/documentation/ref-manual/migration-1.8.rst b/poky/documentation/ref-manual/migration-1.8.rst new file mode 100644 index 000000000..d601e6b63 --- /dev/null +++ b/poky/documentation/ref-manual/migration-1.8.rst @@ -0,0 +1,183 @@ +Moving to the Yocto Project 1.8 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 1.8 Release from the prior release. + +.. _migration-1.8-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``owl-video``: Functionality replaced by ``gst-player``. + +- ``gaku``: Functionality replaced by ``gst-player``. + +- ``gnome-desktop``: This recipe is now available in ``meta-gnome`` and + is no longer needed. + +- ``gsettings-desktop-schemas``: This recipe is now available in + ``meta-gnome`` and is no longer needed. + +- ``python-argparse``: The ``argparse`` module is already provided in + the default Python distribution in a package named + ``python-argparse``. Consequently, the separate ``python-argparse`` + recipe is no longer needed. + +- ``telepathy-python, libtelepathy, telepathy-glib, telepathy-idle, telepathy-mission-control``: + All these recipes have moved to ``meta-oe`` and are consequently no + longer needed by any recipes in OpenEmbedded-Core. + +- ``linux-yocto_3.10`` and ``linux-yocto_3.17``: Support for the + linux-yocto 3.10 and 3.17 kernels has been dropped. Support for the + 3.14 kernel remains, while support for 3.19 kernel has been added. + +- ``poky-feed-config-opkg``: This recipe has become obsolete and is no + longer needed. Use ``distro-feed-config`` from ``meta-oe`` instead. + +- ``libav 0.8.x``: ``libav 9.x`` is now used. + +- ``sed-native``: No longer needed. A working version of ``sed`` is + expected to be provided by the host distribution. + +.. _migration-1.8-bluez: + +BlueZ 4.x / 5.x Selection +------------------------- + +Proper built-in support for selecting BlueZ 5.x in preference to the +default of 4.x now exists. To use BlueZ 5.x, simply add "bluez5" to your +:term:`DISTRO_FEATURES` value. If you had +previously added append files (``*.bbappend``) to make this selection, +you can now remove them. + +Additionally, a ``bluetooth`` class has been added to make selection of +the appropriate bluetooth support within a recipe a little easier. If +you wish to make use of this class in a recipe, add something such as +the following: :: + + inherit bluetooth + PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'bluetooth', '${BLUEZ}', '', d)}" + PACKAGECONFIG[bluez4] = "--enable-bluetooth,--disable-bluetooth,bluez4" + PACKAGECONFIG[bluez5] = "--enable-bluez5,--disable-bluez5,bluez5" + +.. _migration-1.8-kernel-build-changes: + +Kernel Build Changes +-------------------- + +The kernel build process was changed to place the source in a common +shared work area and to place build artifacts separately in the source +code tree. In theory, migration paths have been provided for most common +usages in kernel recipes but this might not work in all cases. In +particular, users need to ensure that ``${S}`` (source files) and +``${B}`` (build artifacts) are used correctly in functions such as +:ref:`ref-tasks-configure` and +:ref:`ref-tasks-install`. For kernel recipes that do not +inherit from ``kernel-yocto`` or include ``linux-yocto.inc``, you might +wish to refer to the ``linux.inc`` file in the ``meta-oe`` layer for the +kinds of changes you need to make. For reference, here is the +`commit `__ +where the ``linux.inc`` file in ``meta-oe`` was updated. + +Recipes that rely on the kernel source code and do not inherit the +module classes might need to add explicit dependencies on the +``do_shared_workdir`` kernel task, for example: :: + + do_configure[depends] += "virtual/kernel:do_shared_workdir" + +.. _migration-1.8-ssl: + +SSL 3.0 is Now Disabled in OpenSSL +---------------------------------- + +SSL 3.0 is now disabled when building OpenSSL. Disabling SSL 3.0 avoids +any lingering instances of the POODLE vulnerability. If you feel you +must re-enable SSL 3.0, then you can add an append file (``*.bbappend``) +for the ``openssl`` recipe to remove "-no-ssl3" from +:term:`EXTRA_OECONF`. + +.. _migration-1.8-default-sysroot-poisoning: + +Default Sysroot Poisoning +------------------------- + +``gcc's`` default sysroot and include directories are now "poisoned". In +other words, the sysroot and include directories are being redirected to +a non-existent location in order to catch when host directories are +being used due to the correct options not being passed. This poisoning +applies both to the cross-compiler used within the build and to the +cross-compiler produced in the SDK. + +If this change causes something in the build to fail, it almost +certainly means the various compiler flags and commands are not being +passed correctly to the underlying piece of software. In such cases, you +need to take corrective steps. + +.. _migration-1.8-rebuild-improvements: + +Rebuild Improvements +-------------------- + +Changes have been made to the :ref:`base `, +:ref:`autotools `, and +:ref:`cmake ` classes to clean out generated files +when the :ref:`ref-tasks-configure` task needs to be +re-executed. + +One of the improvements is to attempt to run "make clean" during the +``do_configure`` task if a ``Makefile`` exists. Some software packages +do not provide a working clean target within their make files. If you +have such recipes, you need to set +:term:`CLEANBROKEN` to "1" within the recipe, for example: :: + + CLEANBROKEN = "1" + +.. _migration-1.8-qa-check-and-validation-changes: + +QA Check and Validation Changes +------------------------------- + +The following QA Check and Validation Changes have occurred: + +- Usage of ``PRINC`` previously triggered a warning. It now triggers an + error. You should remove any remaining usage of ``PRINC`` in any + recipe or append file. + +- An additional QA check has been added to detect usage of ``${D}`` in + :term:`FILES` values where :term:`D` values + should not be used at all. The same check ensures that ``$D`` is used + in ``pkg_preinst/pkg_postinst/pkg_prerm/pkg_postrm`` functions + instead of ``${D}``. + +- :term:`S` now needs to be set to a valid value within a + recipe. If ``S`` is not set in the recipe, the directory is not + automatically created. If ``S`` does not point to a directory that + exists at the time the :ref:`ref-tasks-unpack` task + finishes, a warning will be shown. + +- :term:`LICENSE` is now validated for correct + formatting of multiple licenses. If the format is invalid (e.g. + multiple licenses are specified with no operators to specify how the + multiple licenses interact), then a warning will be shown. + +.. _migration-1.8-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes have occurred: + +- The ``send-error-report`` script now expects a "-s" option to be + specified before the server address. This assumes a server address is + being specified. + +- The ``oe-pkgdata-util`` script now expects a "-p" option to be + specified before the ``pkgdata`` directory, which is now optional. If + the ``pkgdata`` directory is not specified, the script will run + BitBake to query :term:`PKGDATA_DIR` from the + build environment. + + diff --git a/poky/documentation/ref-manual/migration-2.0.rst b/poky/documentation/ref-manual/migration-2.0.rst new file mode 100644 index 000000000..570486ba0 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.0.rst @@ -0,0 +1,281 @@ +Moving to the Yocto Project 2.0 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.0 Release from the prior release. + +.. _migration-2.0-gcc-5: + +GCC 5 +----- + +The default compiler is now GCC 5.2. This change has required fixes for +compilation errors in a number of other recipes. + +One important example is a fix for when the Linux kernel freezes at boot +time on ARM when built with GCC 5. If you are using your own kernel +recipe or source tree and building for ARM, you will likely need to +apply this +`patch `__. +The standard ``linux-yocto`` kernel source tree already has a workaround +for the same issue. + +For further details, see https://gcc.gnu.org/gcc-5/changes.html +and the porting guide at +https://gcc.gnu.org/gcc-5/porting_to.html. + +Alternatively, you can switch back to GCC 4.9 or 4.8 by setting +``GCCVERSION`` in your configuration, as follows: +:: + + GCCVERSION = "4.9%" + +.. _migration-2.0-Gstreamer-0.10-removed: + +Gstreamer 0.10 Removed +---------------------- + +Gstreamer 0.10 has been removed in favor of Gstreamer 1.x. As part of +the change, recipes for Gstreamer 0.10 and related software are now +located in ``meta-multimedia``. This change results in Qt4 having Phonon +and Gstreamer support in QtWebkit disabled by default. + +.. _migration-2.0-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been moved or removed: + +- ``bluez4``: The recipe is obsolete and has been moved due to + ``bluez5`` becoming fully integrated. The ``bluez4`` recipe now + resides in ``meta-oe``. + +- ``gamin``: The recipe is obsolete and has been removed. + +- ``gnome-icon-theme``: The recipe's functionally has been replaced by + ``adwaita-icon-theme``. + +- Gstreamer 0.10 Recipes: Recipes for Gstreamer 0.10 have been removed + in favor of the recipes for Gstreamer 1.x. + +- ``insserv``: The recipe is obsolete and has been removed. + +- ``libunique``: The recipe is no longer used and has been moved to + ``meta-oe``. + +- ``midori``: The recipe's functionally has been replaced by + ``epiphany``. + +- ``python-gst``: The recipe is obsolete and has been removed since it + only contains bindings for Gstreamer 0.10. + +- ``qt-mobility``: The recipe is obsolete and has been removed since it + requires ``Gstreamer 0.10``, which has been replaced. + +- ``subversion``: All 1.6.x versions of this recipe have been removed. + +- ``webkit-gtk``: The older 1.8.3 version of this recipe has been + removed in favor of ``webkitgtk``. + +.. _migration-2.0-bitbake-datastore-improvements: + +BitBake datastore improvements +------------------------------ + +The method by which BitBake's datastore handles overrides has changed. +Overrides are now applied dynamically and ``bb.data.update_data()`` is +now a no-op. Thus, ``bb.data.update_data()`` is no longer required in +order to apply the correct overrides. In practice, this change is +unlikely to require any changes to Metadata. However, these minor +changes in behavior exist: + +- All potential overrides are now visible in the variable history as + seen when you run the following: + :: + + $ bitbake -e + +- ``d.delVar('``\ VARNAME\ ``')`` and + ``d.setVar('``\ VARNAME\ ``', None)`` result in the variable and all + of its overrides being cleared out. Before the change, only the + non-overridden values were cleared. + +.. _migration-2.0-shell-message-function-changes: + +Shell Message Function Changes +------------------------------ + +The shell versions of the BitBake message functions (i.e. ``bbdebug``, +``bbnote``, ``bbwarn``, ``bbplain``, ``bberror``, and ``bbfatal``) are +now connected through to their BitBake equivalents ``bb.debug()``, +``bb.note()``, ``bb.warn()``, ``bb.plain()``, ``bb.error()``, and +``bb.fatal()``, respectively. Thus, those message functions that you +would expect to be printed by the BitBake UI are now actually printed. +In practice, this change means two things: + +- If you now see messages on the console that you did not previously + see as a result of this change, you might need to clean up the calls + to ``bbwarn``, ``bberror``, and so forth. Or, you might want to + simply remove the calls. + +- The ``bbfatal`` message function now suppresses the full error log in + the UI, which means any calls to ``bbfatal`` where you still wish to + see the full error log should be replaced by ``die`` or + ``bbfatal_log``. + +.. _migration-2.0-extra-development-debug-package-cleanup: + +Extra Development/Debug Package Cleanup +--------------------------------------- + +The following recipes have had extra ``dev/dbg`` packages removed: + +- ``acl`` + +- ``apmd`` + +- ``aspell`` + +- ``attr`` + +- ``augeas`` + +- ``bzip2`` + +- ``cogl`` + +- ``curl`` + +- ``elfutils`` + +- ``gcc-target`` + +- ``libgcc`` + +- ``libtool`` + +- ``libxmu`` + +- ``opkg`` + +- ``pciutils`` + +- ``rpm`` + +- ``sysfsutils`` + +- ``tiff`` + +- ``xz`` + +All of the above recipes now conform to the standard packaging scheme +where a single ``-dev``, ``-dbg``, and ``-staticdev`` package exists per +recipe. + +.. _migration-2.0-recipe-maintenance-tracking-data-moved-to-oe-core: + +Recipe Maintenance Tracking Data Moved to OE-Core +------------------------------------------------- + +Maintenance tracking data for recipes that was previously part of +``meta-yocto`` has been moved to :term:`OpenEmbedded-Core (OE-Core)`. The change +includes ``package_regex.inc`` and ``distro_alias.inc``, which are +typically enabled when using the ``distrodata`` class. Additionally, the +contents of ``upstream_tracking.inc`` has now been split out to the +relevant recipes. + +.. _migration-2.0-automatic-stale-sysroot-file-cleanup: + +Automatic Stale Sysroot File Cleanup +------------------------------------ + +Stale files from recipes that no longer exist in the current +configuration are now automatically removed from sysroot as well as +removed from any other place managed by shared state. This automatic +cleanup means that the build system now properly handles situations such +as renaming the build system side of recipes, removal of layers from +``bblayers.conf``, and :term:`DISTRO_FEATURES` +changes. + +Additionally, work directories for old versions of recipes are now +pruned. If you wish to disable pruning old work directories, you can set +the following variable in your configuration: +:: + + SSTATE_PRUNE_OBSOLETEWORKDIR = "0" + +.. _migration-2.0-linux-yocto-kernel-metadata-repository-now-split-from-source: + +``linux-yocto`` Kernel Metadata Repository Now Split from Source +---------------------------------------------------------------- + +The ``linux-yocto`` tree has up to now been a combined set of kernel +changes and configuration (meta) data carried in a single tree. While +this format is effective at keeping kernel configuration and source +modifications synchronized, it is not always obvious to developers how +to manipulate the Metadata as compared to the source. + +Metadata processing has now been removed from the +:ref:`kernel-yocto ` class and the external +Metadata repository ``yocto-kernel-cache``, which has always been used +to seed the ``linux-yocto`` "meta" branch. This separate ``linux-yocto`` +cache repository is now the primary location for this data. Due to this +change, ``linux-yocto`` is no longer able to process combined trees. +Thus, if you need to have your own combined kernel repository, you must +do the split there as well and update your recipes accordingly. See the +``meta/recipes-kernel/linux/linux-yocto_4.1.bb`` recipe for an example. + +.. _migration-2.0-additional-qa-checks: + +Additional QA checks +-------------------- + +The following QA checks have been added: + +- Added a "host-user-contaminated" check for ownership issues for + packaged files outside of ``/home``. The check looks for files that + are incorrectly owned by the user that ran BitBake instead of owned + by a valid user in the target system. + +- Added an "invalid-chars" check for invalid (non-UTF8) characters in + recipe metadata variable values (i.e. + :term:`DESCRIPTION`, + :term:`SUMMARY`, :term:`LICENSE`, and + :term:`SECTION`). Some package managers do not support + these characters. + +- Added an "invalid-packageconfig" check for any options specified in + :term:`PACKAGECONFIG` that do not match any + ``PACKAGECONFIG`` option defined for the recipe. + +.. _migration-2.0-miscellaneous: + +Miscellaneous Changes +--------------------- + +These additional changes exist: + +- ``gtk-update-icon-cache`` has been renamed to ``gtk-icon-utils``. + +- The ``tools-profile`` :term:`IMAGE_FEATURES` + item as well as its corresponding packagegroup and + ``packagegroup-core-tools-profile`` no longer bring in ``oprofile``. + Bringing in ``oprofile`` was originally added to aid compilation on + resource-constrained targets. However, this aid has not been widely + used and is not likely to be used going forward due to the more + powerful target platforms and the existence of better + cross-compilation tools. + +- The :term:`IMAGE_FSTYPES` variable's default + value now specifies ``ext4`` instead of ``ext3``. + +- All support for the ``PRINC`` variable has been removed. + +- The ``packagegroup-core-full-cmdline`` packagegroup no longer brings + in ``lighttpd`` due to the fact that bringing in ``lighttpd`` is not + really in line with the packagegroup's purpose, which is to add full + versions of command-line tools that by default are provided by + ``busybox``. + + diff --git a/poky/documentation/ref-manual/migration-2.1.rst b/poky/documentation/ref-manual/migration-2.1.rst new file mode 100644 index 000000000..a1fd3ea81 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.1.rst @@ -0,0 +1,434 @@ +Moving to the Yocto Project 2.1 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.1 Release from the prior release. + +.. _migration-2.1-variable-expansion-in-python-functions: + +Variable Expansion in Python Functions +-------------------------------------- + +Variable expressions, such as ``${``\ VARNAME\ ``}`` no longer expand +automatically within Python functions. Suppressing expansion was done to +allow Python functions to construct shell scripts or other code for +situations in which you do not want such expressions expanded. For any +existing code that relies on these expansions, you need to change the +expansions to expand the value of individual variables through +``d.getVar()``. To alternatively expand more complex expressions, use +``d.expand()``. + +.. _migration-2.1-overrides-must-now-be-lower-case: + +Overrides Must Now be Lower-Case +-------------------------------- + +The convention for overrides has always been for them to be lower-case +characters. This practice is now a requirement as BitBake's datastore +now assumes lower-case characters in order to give a slight performance +boost during parsing. In practical terms, this requirement means that +anything that ends up in :term:`OVERRIDES` must now +appear in lower-case characters (e.g. values for ``MACHINE``, +``TARGET_ARCH``, ``DISTRO``, and also recipe names if +``_pn-``\ recipename overrides are to be effective). + +.. _migration-2.1-expand-parameter-to-getvar-and-getvarflag-now-mandatory: + +Expand Parameter to ``getVar()`` and ``getVarFlag()`` is Now Mandatory +---------------------------------------------------------------------- + +The expand parameter to ``getVar()`` and ``getVarFlag()`` previously +defaulted to False if not specified. Now, however, no default exists so +one must be specified. You must change any ``getVar()`` calls that do +not specify the final expand parameter to calls that do specify the +parameter. You can run the following ``sed`` command at the base of a +layer to make this change: +:: + + sed -e 's:\(\.getVar([^,()]*\)):\1, False):g' -i `grep -ril getVar *` + sed -e 's:\(\.getVarFlag([^,()]*,[^,()]*\)):\1, False):g' -i `grep -ril getVarFlag *` + +.. note:: + + The reason for this change is that it prepares the way for changing + the default to True in a future Yocto Project release. This future + change is a much more sensible default than False. However, the + change needs to be made gradually as a sudden change of the default + would potentially cause side-effects that would be difficult to + detect. + +.. _migration-2.1-makefile-environment-changes: + +Makefile Environment Changes +---------------------------- + +:term:`EXTRA_OEMAKE` now defaults to "" instead of +"-e MAKEFLAGS=". Setting ``EXTRA_OEMAKE`` to "-e MAKEFLAGS=" by default +was a historical accident that has required many classes (e.g. +``autotools``, ``module``) and recipes to override this default in order +to work with sensible build systems. When upgrading to the release, you +must edit any recipe that relies upon this old default by either setting +``EXTRA_OEMAKE`` back to "-e MAKEFLAGS=" or by explicitly setting any +required variable value overrides using ``EXTRA_OEMAKE``, which is +typically only needed when a Makefile sets a default value for a +variable that is inappropriate for cross-compilation using the "=" +operator rather than the "?=" operator. + +.. _migration-2.1-libexecdir-reverted-to-prefix-libexec: + +``libexecdir`` Reverted to ``${prefix}/libexec`` +------------------------------------------------ + +The use of ``${libdir}/${BPN}`` as ``libexecdir`` is different as +compared to all other mainstream distributions, which either uses +``${prefix}/libexec`` or ``${libdir}``. The use is also contrary to the +GNU Coding Standards (i.e. +https://www.gnu.org/prep/standards/html_node/Directory-Variables.html) +that suggest ``${prefix}/libexec`` and also notes that any +package-specific nesting should be done by the package itself. Finally, +having ``libexecdir`` change between recipes makes it very difficult for +different recipes to invoke binaries that have been installed into +``libexecdir``. The Filesystem Hierarchy Standard (i.e. +http://refspecs.linuxfoundation.org/FHS_3.0/fhs/ch04s07.html) now +recognizes the use of ``${prefix}/libexec/``, giving distributions the +choice between ``${prefix}/lib`` or ``${prefix}/libexec`` without +breaking FHS. + +.. _migration-2.1-ac-cv-sizeof-off-t-no-longer-cached-in-site-files: + +``ac_cv_sizeof_off_t`` is No Longer Cached in Site Files +-------------------------------------------------------- + +For recipes inheriting the :ref:`autotools ` +class, ``ac_cv_sizeof_off_t`` is no longer cached in the site files for +``autoconf``. The reason for this change is because the +``ac_cv_sizeof_off_t`` value is not necessarily static per architecture +as was previously assumed. Rather, the value changes based on whether +large file support is enabled. For most software that uses ``autoconf``, +this change should not be a problem. However, if you have a recipe that +bypasses the standard :ref:`ref-tasks-configure` task +from the ``autotools`` class and the software the recipe is building +uses a very old version of ``autoconf``, the recipe might be incapable +of determining the correct size of ``off_t`` during ``do_configure``. + +The best course of action is to patch the software as necessary to allow +the default implementation from the ``autotools`` class to work such +that ``autoreconf`` succeeds and produces a working configure script, +and to remove the overridden ``do_configure`` task such that the default +implementation does get used. + +.. _migration-2.1-image-generation-split-out-from-filesystem-generation: + +Image Generation is Now Split Out from Filesystem Generation +------------------------------------------------------------ + +Previously, for image recipes the :ref:`ref-tasks-rootfs` +task assembled the filesystem and then from that filesystem generated +images. With this Yocto Project release, image generation is split into +separate ```do_image_*`` <#ref-tasks-image>`__ tasks for clarity both in +operation and in the code. + +For most cases, this change does not present any problems. However, if +you have made customizations that directly modify the ``do_rootfs`` task +or that mention ``do_rootfs``, you might need to update those changes. +In particular, if you had added any tasks after ``do_rootfs``, you +should make edits so that those tasks are after the +```do_image_complete`` <#ref-tasks-image-complete>`__ task rather than +after ``do_rootfs`` so that the your added tasks run at the correct +time. + +A minor part of this restructuring is that the post-processing +definitions and functions have been moved from the +:ref:`image ` class to the +:ref:`rootfs-postcommands ` class. Functionally, +however, they remain unchanged. + +.. _migration-2.1-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed in the 2.1 release: + +- ``gcc`` version 4.8: Versions 4.9 and 5.3 remain. + +- ``qt4``: All support for Qt 4.x has been moved out to a separate + ``meta-qt4`` layer because Qt 4 is no longer supported upstream. + +- ``x11vnc``: Moved to the ``meta-oe`` layer. + +- ``linux-yocto-3.14``: No longer supported. + +- ``linux-yocto-3.19``: No longer supported. + +- ``libjpeg``: Replaced by the ``libjpeg-turbo`` recipe. + +- ``pth``: Became obsolete. + +- ``liboil``: Recipe is no longer needed and has been moved to the + ``meta-multimedia`` layer. + +- ``gtk-theme-torturer``: Recipe is no longer needed and has been moved + to the ``meta-gnome`` layer. + +- ``gnome-mime-data``: Recipe is no longer needed and has been moved to + the ``meta-gnome`` layer. + +- ``udev``: Replaced by the ``eudev`` recipe for compatibility when + using ``sysvinit`` with newer kernels. + +- ``python-pygtk``: Recipe became obsolete. + +- ``adt-installer``: Recipe became obsolete. See the "`ADT + Removed <#migration-2.1-adt-removed>`__" section for more + information. + +.. _migration-2.1-class-changes: + +Class Changes +------------- + +The following classes have changed: + +- ``autotools_stage``: Removed because the + :ref:`autotools ` class now provides its + functionality. Recipes that inherited from ``autotools_stage`` should + now inherit from ``autotools`` instead. + +- ``boot-directdisk``: Merged into the ``image-vm`` class. The + ``boot-directdisk`` class was rarely directly used. Consequently, + this change should not cause any issues. + +- ``bootimg``: Merged into the + :ref:`image-live ` class. The ``bootimg`` + class was rarely directly used. Consequently, this change should not + cause any issues. + +- ``packageinfo``: Removed due to its limited use by the Hob UI, which + has itself been removed. + +.. _migration-2.1-build-system-ui-changes: + +Build System User Interface Changes +----------------------------------- + +The following changes have been made to the build system user interface: + +- *Hob GTK+-based UI*: Removed because it is unmaintained and based on + the outdated GTK+ 2 library. The Toaster web-based UI is much more + capable and is actively maintained. See the + ":ref:`toaster-manual/toaster-manual-setup-and-use:using the toaster web interface`" + section in the Toaster User Manual for more information on this + interface. + +- *"puccho" BitBake UI*: Removed because is unmaintained and no longer + useful. + +.. _migration-2.1-adt-removed: + +ADT Removed +----------- + +The Application Development Toolkit (ADT) has been removed because its +functionality almost completely overlapped with the :ref:`standard +SDK ` and the +:ref:`extensible SDK `. For +information on these SDKs and how to build and use them, see the +:doc:`../sdk-manual/sdk-manual` manual. + +.. note:: + + The Yocto Project Eclipse IDE Plug-in is still supported and is not + affected by this change. + +.. _migration-2.1-poky-reference-distribution-changes: + +Poky Reference Distribution Changes +----------------------------------- + +The following changes have been made for the Poky distribution: + +- The ``meta-yocto`` layer has been renamed to ``meta-poky`` to better + match its purpose, which is to provide the Poky reference + distribution. The ``meta-yocto-bsp`` layer retains its original name + since it provides reference machines for the Yocto Project and it is + otherwise unrelated to Poky. References to ``meta-yocto`` in your + ``conf/bblayers.conf`` should automatically be updated, so you should + not need to change anything unless you are relying on this naming + elsewhere. + +- The :ref:`uninative ` class is now enabled + by default in Poky. This class attempts to isolate the build system + from the host distribution's C library and makes re-use of native + shared state artifacts across different host distributions practical. + With this class enabled, a tarball containing a pre-built C library + is downloaded at the start of the build. + + The ``uninative`` class is enabled through the + ``meta/conf/distro/include/yocto-uninative.inc`` file, which for + those not using the Poky distribution, can include to easily enable + the same functionality. + + Alternatively, if you wish to build your own ``uninative`` tarball, + you can do so by building the ``uninative-tarball`` recipe, making it + available to your build machines (e.g. over HTTP/HTTPS) and setting a + similar configuration as the one set by ``yocto-uninative.inc``. + +- Static library generation, for most cases, is now disabled by default + in the Poky distribution. Disabling this generation saves some build + time as well as the size used for build output artifacts. + + Disabling this library generation is accomplished through a + ``meta/conf/distro/include/no-static-libs.inc``, which for those not + using the Poky distribution can easily include to enable the same + functionality. + + Any recipe that needs to opt-out of having the "--disable-static" + option specified on the configure command line either because it is + not a supported option for the configure script or because static + libraries are needed should set the following variable: + DISABLE_STATIC = "" + +- The separate ``poky-tiny`` distribution now uses the musl C library + instead of a heavily pared down ``glibc``. Using musl results in a + smaller distribution and facilitates much greater maintainability + because musl is designed to have a small footprint. + + If you have used ``poky-tiny`` and have customized the ``glibc`` + configuration you will need to redo those customizations with musl + when upgrading to the new release. + +.. _migration-2.1-packaging-changes: + +Packaging Changes +----------------- + +The following changes have been made to packaging: + +- The ``runuser`` and ``mountpoint`` binaries, which were previously in + the main ``util-linux`` package, have been split out into the + ``util-linux-runuser`` and ``util-linux-mountpoint`` packages, + respectively. + +- The ``python-elementtree`` package has been merged into the + ``python-xml`` package. + +.. _migration-2.1-tuning-file-changes: + +Tuning File Changes +------------------- + +The following changes have been made to the tuning files: + +- The "no-thumb-interwork" tuning feature has been dropped from the ARM + tune include files. Because interworking is required for ARM EABI, + attempting to disable it through a tuning feature no longer makes + sense. + + .. note:: + + Support for ARM OABI was deprecated in gcc 4.7. + +- The ``tune-cortexm*.inc`` and ``tune-cortexr4.inc`` files have been + removed because they are poorly tested. Until the OpenEmbedded build + system officially gains support for CPUs without an MMU, these tuning + files would probably be better maintained in a separate layer if + needed. + +.. _migration-2.1-supporting-gobject-introspection: + +Supporting GObject Introspection +-------------------------------- + +This release supports generation of GLib Introspective Repository (GIR) +files through GObject introspection, which is the standard mechanism for +accessing GObject-based software from runtime environments. You can +enable, disable, and test the generation of this data. See the +":ref:`dev-manual/dev-manual-common-tasks:enabling gobject introspection support`" +section in the Yocto Project Development Tasks Manual for more +information. + +.. _migration-2.1-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +These additional changes exist: + +- The minimum Git version has been increased to 1.8.3.1. If your host + distribution does not provide a sufficiently recent version, you can + install the buildtools, which will provide it. See the "`Required + Git, tar, Python and gcc + Versions <#required-git-tar-python-and-gcc-versions>`__" section for + more information on the buildtools tarball. + +- The buggy and incomplete support for the RPM version 4 package + manager has been removed. The well-tested and maintained support for + RPM version 5 remains. + +- Previously, the following list of packages were removed if + package-management was not in + :term:`IMAGE_FEATURES`, regardless of any + dependencies: + :: + + update-rc.d + base-passwd + shadow + update-alternatives + + run-postinsts With the Yocto Project 2.1 release, these packages are + only removed if "read-only-rootfs" is in ``IMAGE_FEATURES``, since + they might still be needed for a read-write image even in the absence + of a package manager (e.g. if users need to be added, modified, or + removed at runtime). + +- The + :ref:`devtool modify ` + command now defaults to extracting the source since that is most + commonly expected. The "-x" or "--extract" options are now no-ops. If + you wish to provide your own existing source tree, you will now need + to specify either the "-n" or "--no-extract" options when running + ``devtool modify``. + +- If the formfactor for a machine is either not supplied or does not + specify whether a keyboard is attached, then the default is to assume + a keyboard is attached rather than assume no keyboard. This change + primarily affects the Sato UI. + +- The ``.debug`` directory packaging is now automatic. If your recipe + builds software that installs binaries into directories other than + the standard ones, you no longer need to take care of setting + ``FILES_${PN}-dbg`` to pick up the resulting ``.debug`` directories + as these directories are automatically found and added. + +- Inaccurate disk and CPU percentage data has been dropped from + ``buildstats`` output. This data has been replaced with + ``getrusage()`` data and corrected IO statistics. You will probably + need to update any custom code that reads the ``buildstats`` data. + +- The ``meta/conf/distro/include/package_regex.inc`` is now deprecated. + The contents of this file have been moved to individual recipes. + + .. note:: + + Because this file will likely be removed in a future Yocto Project + release, it is suggested that you remove any references to the + file that might be in your configuration. + +- The ``v86d/uvesafb`` has been removed from the ``genericx86`` and + ``genericx86-64`` reference machines, which are provided by the + ``meta-yocto-bsp`` layer. Most modern x86 boards do not rely on this + file and it only adds kernel error messages during startup. If you do + still need to support ``uvesafb``, you can simply add ``v86d`` to + your image. + +- Build sysroot paths are now removed from debug symbol files. Removing + these paths means that remote GDB using an unstripped build system + sysroot will no longer work (although this was never documented to + work). The supported method to accomplish something similar is to set + ``IMAGE_GEN_DEBUGFS`` to "1", which will generate a companion debug + image containing unstripped binaries and associated debug sources + alongside the image. + + diff --git a/poky/documentation/ref-manual/migration-2.2.rst b/poky/documentation/ref-manual/migration-2.2.rst new file mode 100644 index 000000000..59d0eeeb9 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.2.rst @@ -0,0 +1,451 @@ +Moving to the Yocto Project 2.2 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.2 Release from the prior release. + +.. _migration-2.2-minimum-kernel-version: + +Minimum Kernel Version +---------------------- + +The minimum kernel version for the target system and for SDK is now +3.2.0, due to the upgrade to ``glibc 2.24``. Specifically, for +AArch64-based targets the version is 3.14. For Nios II-based targets, +the minimum kernel version is 3.19. + +.. note:: + + For x86 and x86_64, you can reset + OLDEST_KERNEL + to anything down to 2.6.32 if desired. + +.. _migration-2.2-staging-directories-in-sysroot-simplified: + +Staging Directories in Sysroot Has Been Simplified +-------------------------------------------------- + +The way directories are staged in sysroot has been simplified and +introduces the new :term:`SYSROOT_DIRS`, +:term:`SYSROOT_DIRS_NATIVE`, and +:term:`SYSROOT_DIRS_BLACKLIST`. See the +`v2 patch series on the OE-Core Mailing +List `__ +for additional information. + +.. _migration-2.2-removal-of-old-images-from-tmp-deploy-now-enabled: + +Removal of Old Images and Other Files in ``tmp/deploy`` Now Enabled +------------------------------------------------------------------- + +Removal of old images and other files in ``tmp/deploy/`` is now enabled +by default due to a new staging method used for those files. As a result +of this change, the ``RM_OLD_IMAGE`` variable is now redundant. + +.. _migration-2.2-python-changes: + +Python Changes +-------------- + +The following changes for Python occurred: + +.. _migration-2.2-bitbake-now-requires-python-3.4: + +BitBake Now Requires Python 3.4+ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BitBake requires Python 3.4 or greater. + +.. _migration-2.2-utf-8-locale-required-on-build-host: + +UTF-8 Locale Required on Build Host +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +A UTF-8 locale is required on the build host due to Python 3. Since +C.UTF-8 is not a standard, the default is en_US.UTF-8. + +.. _migration-2.2-metadata-now-must-use-python-3-syntax: + +Metadata Must Now Use Python 3 Syntax +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The metadata is now required to use Python 3 syntax. For help preparing +metadata, see any of the many Python 3 porting guides available. +Alternatively, you can reference the conversion commits for Bitbake and +you can use :term:`OpenEmbedded-Core (OE-Core)` as a guide for changes. Following are +particular areas of interest: + + - subprocess command-line pipes needing locale decoding + + - the syntax for octal values changed + + - the ``iter*()`` functions changed name \* iterators now return views, not lists + + - changed names for Python modules + +.. _migration-2.2-target-python-recipes-switched-to-python-3: + +Target Python Recipes Switched to Python 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Most target Python recipes have now been switched to Python 3. +Unfortunately, systems using RPM as a package manager and providing +online package-manager support through SMART still require Python 2. + +.. note:: + + Python 2 and recipes that use it can still be built for the target as + with previous versions. + +.. _migration-2.2-buildtools-tarball-includes-python-3: + +``buildtools-tarball`` Includes Python 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +``buildtools-tarball`` now includes Python 3. + +.. _migration-2.2-uclibc-replaced-by-musl: + +uClibc Replaced by musl +----------------------- + +uClibc has been removed in favor of musl. Musl has matured, is better +maintained, and is compatible with a wider range of applications as +compared to uClibc. + +.. _migration-2.2-B-no-longer-default-working-directory-for-tasks: + +``${B}`` No Longer Default Working Directory for Tasks +------------------------------------------------------ + +``${``\ :term:`B`\ ``}`` is no longer the default working +directory for tasks. Consequently, any custom tasks you define now need +to either have the +``[``\ :ref:`dirs `\ ``]`` flag +set, or the task needs to change into the appropriate working directory +manually (e.g using ``cd`` for a shell task). + +.. note:: + + The preferred method is to use the + [dirs] + flag. + +.. _migration-2.2-runqemu-ported-to-python: + +``runqemu`` Ported to Python +---------------------------- + +``runqemu`` has been ported to Python and has changed behavior in some +cases. Previous usage patterns continue to be supported. + +The new ``runqemu`` is a Python script. Machine knowledge is no longer +hardcoded into ``runqemu``. You can choose to use the ``qemuboot`` +configuration file to define the BSP's own arguments and to make it +bootable with ``runqemu``. If you use a configuration file, use the +following form: +:: + + image-name-machine.qemuboot.conf + +The configuration file +enables fine-grained tuning of options passed to QEMU without the +``runqemu`` script hard-coding any knowledge about different machines. +Using a configuration file is particularly convenient when trying to use +QEMU with machines other than the ``qemu*`` machines in +:term:`OpenEmbedded-Core (OE-Core)`. The ``qemuboot.conf`` file is generated by the +``qemuboot`` class when the root filesystem is being build (i.e. build +rootfs). QEMU boot arguments can be set in BSP's configuration file and +the ``qemuboot`` class will save them to ``qemuboot.conf``. + +If you want to use ``runqemu`` without a configuration file, use the +following command form: +:: + + $ runqemu machine rootfs kernel [options] + +Supported machines are as follows: + + - qemuarm + - qemuarm64 + - qemux86 + - qemux86-64 + - qemuppc + - qemumips + - qemumips64 + - qemumipsel + - qemumips64el + +Consider the +following example, which uses the ``qemux86-64`` machine, provides a +root filesystem, provides an image, and uses the ``nographic`` option: :: + + $ runqemu qemux86-64 tmp/deploy/images/qemux86-64/core-image-minimal-qemux86-64.ext4 tmp/deploy/images/qemux86-64/bzImage nographic + +Following is a list of variables that can be set in configuration files +such as ``bsp.conf`` to enable the BSP to be booted by ``runqemu``: + +.. note:: + + "QB" means "QEMU Boot". + +:: + + QB_SYSTEM_NAME: QEMU name (e.g. "qemu-system-i386") + QB_OPT_APPEND: Options to append to QEMU (e.g. "-show-cursor") + QB_DEFAULT_KERNEL: Default kernel to boot (e.g. "bzImage") + QB_DEFAULT_FSTYPE: Default FSTYPE to boot (e.g. "ext4") + QB_MEM: Memory (e.g. "-m 512") + QB_MACHINE: QEMU machine (e.g. "-machine virt") + QB_CPU: QEMU cpu (e.g. "-cpu qemu32") + QB_CPU_KVM: Similar to QB_CPU except used for kvm support (e.g. "-cpu kvm64") + QB_KERNEL_CMDLINE_APPEND: Options to append to the kernel's -append + option (e.g. "console=ttyS0 console=tty") + QB_DTB: QEMU dtb name + QB_AUDIO_DRV: QEMU audio driver (e.g. "alsa", set it when support audio) + QB_AUDIO_OPT: QEMU audio option (e.g. "-soundhw ac97,es1370"), which is used + when QB_AUDIO_DRV is set. + QB_KERNEL_ROOT: Kernel's root (e.g. /dev/vda) + QB_TAP_OPT: Network option for 'tap' mode (e.g. + "-netdev tap,id=net0,ifname=@TAP@,script=no,downscript=no -device virtio-net-device,netdev=net0"). + runqemu will replace "@TAP@" with the one that is used, such as tap0, tap1 ... + QB_SLIRP_OPT: Network option for SLIRP mode (e.g. "-netdev user,id=net0 -device virtio-net-device,netdev=net0") + QB_ROOTFS_OPT: Used as rootfs (e.g. + "-drive id=disk0,file=@ROOTFS@,if=none,format=raw -device virtio-blk-device,drive=disk0"). + runqemu will replace "@ROOTFS@" with the one which is used, such as + core-image-minimal-qemuarm64.ext4. + QB_SERIAL_OPT: Serial port (e.g. "-serial mon:stdio") + QB_TCPSERIAL_OPT: tcp serial port option (e.g. + " -device virtio-serial-device -chardev socket,id=virtcon,port=@PORT@,host=127.0.0.1 -device virtconsole,chardev=virtcon" + runqemu will replace "@PORT@" with the port number which is used. + +To use ``runqemu``, set :term:`IMAGE_CLASSES` as +follows and run ``runqemu``: + +.. note:: + + For command-line syntax, use + runqemu help + . + +:: + + IMAGE_CLASSES += "qemuboot" + +.. _migration-2.2-default-linker-hash-style-changed: + +Default Linker Hash Style Changed +--------------------------------- + +The default linker hash style for ``gcc-cross`` is now "sysv" in order +to catch recipes that are building software without using the +OpenEmbedded :term:`LDFLAGS`. This change could result in +seeing some "No GNU_HASH in the elf binary" QA issues when building such +recipes. You need to fix these recipes so that they use the expected +``LDFLAGS``. Depending on how the software is built, the build system +used by the software (e.g. a Makefile) might need to be patched. +However, sometimes making this fix is as simple as adding the following +to the recipe: +:: + + TARGET_CC_ARCH += "${LDFLAGS}" + +.. _migration-2.2-kernel-image-base-name-no-longer-uses-kernel-imagetype: + +``KERNEL_IMAGE_BASE_NAME`` no Longer Uses ``KERNEL_IMAGETYPE`` +-------------------------------------------------------------- + +The ``KERNEL_IMAGE_BASE_NAME`` variable no longer uses the +:term:`KERNEL_IMAGETYPE` variable to create the +image's base name. Because the OpenEmbedded build system can now build +multiple kernel image types, this part of the kernel image base name as +been removed leaving only the following: +:: + + KERNEL_IMAGE_BASE_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}-${DATETIME}" + +If you have recipes or +classes that use ``KERNEL_IMAGE_BASE_NAME`` directly, you might need to +update the references to ensure they continue to work. + +.. _migration-2.2-bitbake-changes: + +BitBake Changes +--------------- + +The following changes took place for BitBake: + +- The "goggle" UI and standalone image-writer tool have been removed as + they both require GTK+ 2.0 and were not being maintained. + +- The Perforce fetcher now supports :term:`SRCREV` for + specifying the source revision to use, be it + ``${``\ :term:`AUTOREV`\ ``}``, changelist number, + p4date, or label, in preference to separate + :term:`SRC_URI` parameters to specify these. This + change is more in-line with how the other fetchers work for source + control systems. Recipes that fetch from Perforce will need to be + updated to use ``SRCREV`` in place of specifying the source revision + within ``SRC_URI``. + +- Some of BitBake's internal code structures for accessing the recipe + cache needed to be changed to support the new multi-configuration + functionality. These changes will affect external tools that use + BitBake's tinfoil module. For information on these changes, see the + changes made to the scripts supplied with OpenEmbedded-Core: + `1 `__ + and + `2 `__. + +- The task management code has been rewritten to avoid using ID + indirection in order to improve performance. This change is unlikely + to cause any problems for most users. However, the setscene + verification function as pointed to by + ``BB_SETSCENE_VERIFY_FUNCTION`` needed to change signature. + Consequently, a new variable named ``BB_SETSCENE_VERIFY_FUNCTION2`` + has been added allowing multiple versions of BitBake to work with + suitably written metadata, which includes OpenEmbedded-Core and Poky. + Anyone with custom BitBake task scheduler code might also need to + update the code to handle the new structure. + +.. _migration-2.2-swabber-has-been-removed: + +Swabber has Been Removed +------------------------ + +Swabber, a tool that was intended to detect host contamination in the +build process, has been removed, as it has been unmaintained and unused +for some time and was never particularly effective. The OpenEmbedded +build system has since incorporated a number of mechanisms including +enhanced QA checks that mean that there is less of a need for such a +tool. + +.. _migration-2.2-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``augeas``: No longer needed and has been moved to ``meta-oe``. + +- ``directfb``: Unmaintained and has been moved to ``meta-oe``. + +- ``gcc``: Removed 4.9 version. Versions 5.4 and 6.2 are still present. + +- ``gnome-doc-utils``: No longer needed. + +- ``gtk-doc-stub``: Replaced by ``gtk-doc``. + +- ``gtk-engines``: No longer needed and has been moved to + ``meta-gnome``. + +- ``gtk-sato-engine``: Became obsolete. + +- ``libglade``: No longer needed and has been moved to ``meta-oe``. + +- ``libmad``: Unmaintained and functionally replaced by ``libmpg123``. + ``libmad`` has been moved to ``meta-oe``. + +- ``libowl``: Became obsolete. + +- ``libxsettings-client``: No longer needed. + +- ``oh-puzzles``: Functionally replaced by ``puzzles``. + +- ``oprofileui``: Became obsolete. OProfile has been largely supplanted + by perf. + +- ``packagegroup-core-directfb.bb``: Removed. + +- ``core-image-directfb.bb``: Removed. + +- ``pointercal``: No longer needed and has been moved to ``meta-oe``. + +- ``python-imaging``: No longer needed and moved to ``meta-python`` + +- ``python-pyrex``: No longer needed and moved to ``meta-python``. + +- ``sato-icon-theme``: Became obsolete. + +- ``swabber-native``: Swabber has been removed. See the `entry on + Swabber <#migration-2.2-swabber-has-been-removed>`__. + +- ``tslib``: No longer needed and has been moved to ``meta-oe``. + +- ``uclibc``: Removed in favor of musl. + +- ``xtscal``: No longer needed and moved to ``meta-oe`` + +.. _migration-2.2-removed-classes: + +Removed Classes +--------------- + +The following classes have been removed: + +- ``distutils-native-base``: No longer needed. + +- ``distutils3-native-base``: No longer needed. + +- ``sdl``: Only set :term:`DEPENDS` and + :term:`SECTION`, which are better set within the + recipe instead. + +- ``sip``: Mostly unused. + +- ``swabber``: See the `entry on + Swabber <#migration-2.2-swabber-has-been-removed>`__. + +.. _migration-2.2-minor-packaging-changes: + +Minor Packaging Changes +----------------------- + +The following minor packaging changes have occurred: + +- ``grub``: Split ``grub-editenv`` into its own package. + +- ``systemd``: Split container and vm related units into a new package, + systemd-container. + +- ``util-linux``: Moved ``prlimit`` to a separate + ``util-linux-prlimit`` package. + +.. _migration-2.2-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes have occurred: + +- ``package_regex.inc``: Removed because the definitions + ``package_regex.inc`` previously contained have been moved to their + respective recipes. + +- Both ``devtool add`` and ``recipetool create`` now use a fixed + :term:`SRCREV` by default when fetching from a Git + repository. You can override this in either case to use + ``${``\ :term:`AUTOREV`\ ``}`` instead by using the + ``-a`` or ``DASHDASHautorev`` command-line option + +- ``distcc``: GTK+ UI is now disabled by default. + +- ``packagegroup-core-tools-testapps``: Removed Piglit. + +- ``image.bbclass``: Renamed COMPRESS(ION) to CONVERSION. This change + means that ``COMPRESSIONTYPES``, ``COMPRESS_DEPENDS`` and + ``COMPRESS_CMD`` are deprecated in favor of ``CONVERSIONTYPES``, + ``CONVERSION_DEPENDS`` and ``CONVERSION_CMD``. The ``COMPRESS*`` + variable names will still work in the 2.2 release but metadata that + does not need to be backwards-compatible should be changed to use the + new names as the ``COMPRESS*`` ones will be removed in a future + release. + +- ``gtk-doc``: A full version of ``gtk-doc`` is now made available. + However, some old software might not be capable of using the current + version of ``gtk-doc`` to build documentation. You need to change + recipes that build such software so that they explicitly disable + building documentation with ``gtk-doc``. + + diff --git a/poky/documentation/ref-manual/migration-2.3.rst b/poky/documentation/ref-manual/migration-2.3.rst new file mode 100644 index 000000000..7f34f0cd7 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.3.rst @@ -0,0 +1,530 @@ +Moving to the Yocto Project 2.3 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.3 Release from the prior release. + +.. _migration-2.3-recipe-specific-sysroots: + +Recipe-specific Sysroots +------------------------ + +The OpenEmbedded build system now uses one sysroot per recipe to resolve +long-standing issues with configuration script auto-detection of +undeclared dependencies. Consequently, you might find that some of your +previously written custom recipes are missing declared dependencies, +particularly those dependencies that are incidentally built earlier in a +typical build process and thus are already likely to be present in the +shared sysroot in previous releases. + +Consider the following: + +- *Declare Build-Time Dependencies:* Because of this new feature, you + must explicitly declare all build-time dependencies for your recipe. + If you do not declare these dependencies, they are not populated into + the sysroot for the recipe. + +- *Specify Pre-Installation and Post-Installation Native Tool + Dependencies:* You must specifically specify any special native tool + dependencies of ``pkg_preinst`` and ``pkg_postinst`` scripts by using + the :term:`PACKAGE_WRITE_DEPS` variable. + Specifying these dependencies ensures that these tools are available + if these scripts need to be run on the build host during the + :ref:`ref-tasks-rootfs` task. + + As an example, see the ``dbus`` recipe. You will see that this recipe + has a ``pkg_postinst`` that calls ``systemctl`` if "systemd" is in + :term:`DISTRO_FEATURES`. In the example, + ``systemd-systemctl-native`` is added to ``PACKAGE_WRITE_DEPS``, + which is also conditional on "systemd" being in ``DISTRO_FEATURES``. + +- Examine Recipes that Use ``SSTATEPOSTINSTFUNCS``: You need to + examine any recipe that uses ``SSTATEPOSTINSTFUNCS`` and determine + steps to take. + + Functions added to ``SSTATEPOSTINSTFUNCS`` are still called as they + were in previous Yocto Project releases. However, since a separate + sysroot is now being populated for every recipe and if existing + functions being called through ``SSTATEPOSTINSTFUNCS`` are doing + relocation, then you will need to change these to use a + post-installation script that is installed by a function added to + :term:`SYSROOT_PREPROCESS_FUNCS`. + + For an example, see the ``pixbufcache`` class in ``meta/classes/`` in + the :ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`. + + .. note:: + + The + SSTATEPOSTINSTFUNCS + variable itself is now deprecated in favor of the + do_populate_sysroot[postfuncs] + task. Consequently, if you do still have any function or functions + that need to be called after the sysroot component is created for + a recipe, then you would be well advised to take steps to use a + post installation script as described previously. Taking these + steps prepares your code for when + SSTATEPOSTINSTFUNCS + is removed in a future Yocto Project release. + +- *Specify the Sysroot when Using Certain External Scripts:* Because + the shared sysroot is now gone, the scripts + ``oe-find-native-sysroot`` and ``oe-run-native`` have been changed + such that you need to specify which recipe's + :term:`STAGING_DIR_NATIVE` is used. + +.. note:: + + You can find more information on how recipe-specific sysroots work in + the " + staging.bbclass + " section. + +.. _migration-2.3-path-variable: + +``PATH`` Variable +----------------- + +Within the environment used to run build tasks, the environment variable +``PATH`` is now sanitized such that the normal native binary paths +(``/bin``, ``/sbin``, ``/usr/bin`` and so forth) are removed and a +directory containing symbolic links linking only to the binaries from +the host mentioned in the :term:`HOSTTOOLS` and +:term:`HOSTTOOLS_NONFATAL` variables is added +to ``PATH``. + +Consequently, any native binaries provided by the host that you need to +call needs to be in one of these two variables at the configuration +level. + +Alternatively, you can add a native recipe (i.e. ``-native``) that +provides the binary to the recipe's :term:`DEPENDS` +value. + +.. note:: + + PATH + is not sanitized in the same way within + devshell + . If it were, you would have difficulty running host tools for + development and debugging within the shell. + +.. _migration-2.3-scripts: + +Changes to Scripts +------------------ + +The following changes to scripts took place: + +- ``oe-find-native-sysroot``: The usage for the + ``oe-find-native-sysroot`` script has changed to the following: + :: + + $ . oe-find-native-sysroot recipe + + You must now supply a recipe for recipe + as part of the command. Prior to the Yocto Project &DISTRO; release, it + was not necessary to provide the script with the command. + +- ``oe-run-native``: The usage for the ``oe-run-native`` script has + changed to the following: + :: + + $ oe-run-native native_recipe tool + + You must + supply the name of the native recipe and the tool you want to run as + part of the command. Prior to the Yocto Project DISTRO release, it + was not necessary to provide the native recipe with the command. + +- ``cleanup-workdir``: The ``cleanup-workdir`` script has been + removed because the script was found to be deleting files it should + not have, which lead to broken build trees. Rather than trying to + delete portions of :term:`TMPDIR` and getting it wrong, + it is recommended that you delete ``TMPDIR`` and have it restored + from shared state (sstate) on subsequent builds. + +- ``wipe-sysroot``: The ``wipe-sysroot`` script has been removed as + it is no longer needed with recipe-specific sysroots. + +.. _migration-2.3-functions: + +Changes to Functions +-------------------- + +The previously deprecated ``bb.data.getVar()``, ``bb.data.setVar()``, +and related functions have been removed in favor of ``d.getVar()``, +``d.setVar()``, and so forth. + +You need to fix any references to these old functions. + +.. _migration-2.3-bitbake-changes: + +BitBake Changes +--------------- + +The following changes took place for BitBake: + +- *BitBake's Graphical Dependency Explorer UI Replaced:* BitBake's + graphical dependency explorer UI ``depexp`` was replaced by + ``taskexp`` ("Task Explorer"), which provides a graphical way of + exploring the ``task-depends.dot`` file. The data presented by Task + Explorer is much more accurate than the data that was presented by + ``depexp``. Being able to visualize the data is an often requested + feature as standard ``*.dot`` file viewers cannot usual cope with the + size of the ``task-depends.dot`` file. + +- *BitBake "-g" Output Changes:* The ``package-depends.dot`` and + ``pn-depends.dot`` files as previously generated using the + ``bitbake -g`` command have been removed. A ``recipe-depends.dot`` + file is now generated as a collapsed version of ``task-depends.dot`` + instead. + + The reason for this change is because ``package-depends.dot`` and + ``pn-depends.dot`` largely date back to a time before task-based + execution and do not take into account task-level dependencies + between recipes, which could be misleading. + +- *Mirror Variable Splitting Changes:* Mirror variables including + :term:`MIRRORS`, :term:`PREMIRRORS`, + and :term:`SSTATE_MIRRORS` can now separate + values entirely with spaces. Consequently, you no longer need "\\n". + BitBake looks for pairs of values, which simplifies usage. There + should be no change required to existing mirror variable values + themselves. + +- *The Subversion (SVN) Fetcher Uses an "ssh" Parameter and Not an + "rsh" Parameter:* The SVN fetcher now takes an "ssh" parameter + instead of an "rsh" parameter. This new optional parameter is used + when the "protocol" parameter is set to "svn+ssh". You can only use + the new parameter to specify the ``ssh`` program used by SVN. The SVN + fetcher passes the new parameter through the ``SVN_SSH`` environment + variable during the :ref:`ref-tasks-fetch` task. + + See the ":ref:`bitbake:svn-fetcher`" + section in the BitBake + User Manual for additional information. + +- ``BB_SETSCENE_VERIFY_FUNCTION`` and ``BB_SETSCENE_VERIFY_FUNCTION2`` + Removed: Because the mechanism they were part of is no longer + necessary with recipe-specific sysroots, the + ``BB_SETSCENE_VERIFY_FUNCTION`` and ``BB_SETSCENE_VERIFY_FUNCTION2`` + variables have been removed. + +.. _migration-2.3-absolute-symlinks: + +Absolute Symbolic Links +----------------------- + +Absolute symbolic links (symlinks) within staged files are no longer +permitted and now trigger an error. Any explicit creation of symlinks +can use the ``lnr`` script, which is a replacement for ``ln -r``. + +If the build scripts in the software that the recipe is building are +creating a number of absolute symlinks that need to be corrected, you +can inherit ``relative_symlinks`` within the recipe to turn those +absolute symlinks into relative symlinks. + +.. _migration-2.3-gplv2-and-gplv3-moves: + +GPLv2 Versions of GPLv3 Recipes Moved +------------------------------------- + +Older GPLv2 versions of GPLv3 recipes have moved to a separate +``meta-gplv2`` layer. + +If you use :term:`INCOMPATIBLE_LICENSE` to +exclude GPLv3 or set :term:`PREFERRED_VERSION` +to substitute a GPLv2 version of a GPLv3 recipe, then you must add the +``meta-gplv2`` layer to your configuration. + +.. note:: + + You can find + meta-gplv2 + layer in the OpenEmbedded layer index at + . + +These relocated GPLv2 recipes do not receive the same level of +maintenance as other core recipes. The recipes do not get security fixes +and upstream no longer maintains them. In fact, the upstream community +is actively hostile towards people that use the old versions of the +recipes. Moving these recipes into a separate layer both makes the +different needs of the recipes clearer and clearly identifies the number +of these recipes. + +.. note:: + + The long-term solution might be to move to BSD-licensed replacements + of the GPLv3 components for those that need to exclude GPLv3-licensed + components from the target system. This solution will be investigated + for future Yocto Project releases. + +.. _migration-2.3-package-management-changes: + +Package Management Changes +-------------------------- + +The following package management changes took place: + +- Smart package manager is replaced by DNF package manager. Smart has + become unmaintained upstream, is not ported to Python 3.x. + Consequently, Smart needed to be replaced. DNF is the only feasible + candidate. + + The change in functionality is that the on-target runtime package + management from remote package feeds is now done with a different + tool that has a different set of command-line options. If you have + scripts that call the tool directly, or use its API, they need to be + fixed. + + For more information, see the `DNF + Documentation `__. + +- Rpm 5.x is replaced with Rpm 4.x. This is done for two major reasons: + + - DNF is API-incompatible with Rpm 5.x and porting it and + maintaining the port is non-trivial. + + - Rpm 5.x itself has limited maintenance upstream, and the Yocto + Project is one of the very few remaining users. + +- Berkeley DB 6.x is removed and Berkeley DB 5.x becomes the default: + + - Version 6.x of Berkeley DB has largely been rejected by the open + source community due to its AGPLv3 license. As a result, most + mainstream open source projects that require DB are still + developed and tested with DB 5.x. + + - In OE-core, the only thing that was requiring DB 6.x was Rpm 5.x. + Thus, no reason exists to continue carrying DB 6.x in OE-core. + +- ``createrepo`` is replaced with ``createrepo_c``. + + ``createrepo_c`` is the current incarnation of the tool that + generates remote repository metadata. It is written in C as compared + to ``createrepo``, which is written in Python. ``createrepo_c`` is + faster and is maintained. + +- Architecture-independent RPM packages are "noarch" instead of "all". + + This change was made because too many places in DNF/RPM4 stack + already make that assumption. Only the filenames and the architecture + tag has changed. Nothing else has changed in OE-core system, + particularly in the :ref:`allarch.bbclass ` + class. + +- Signing of remote package feeds using ``PACKAGE_FEED_SIGN`` is not + currently supported. This issue will be fully addressed in a future + Yocto Project release. See `defect + 11209 `__ + for more information on a solution to package feed signing with RPM + in the Yocto Project 2.3 release. + +- OPKG now uses the libsolv backend for resolving package dependencies + by default. This is vastly superior to OPKG's internal ad-hoc solver + that was previously used. This change does have a small impact on + disk (around 500 KB) and memory footprint. + + .. note:: + + For further details on this change, see the + commit message + . + +.. _migration-2.3-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``linux-yocto 4.8``: Version 4.8 has been removed. Versions 4.1 + (LTSI), 4.4 (LTS), 4.9 (LTS/LTSI) and 4.10 are now present. + +- ``python-smartpm``: Functionally replaced by ``dnf``. + +- ``createrepo``: Replaced by the ``createrepo-c`` recipe. + +- ``rpmresolve``: No longer needed with the move to RPM 4 as RPM + itself is used instead. + +- ``gstreamer``: Removed the GStreamer Git version recipes as they + have been stale. ``1.10.``\ x recipes are still present. + +- ``alsa-conf-base``: Merged into ``alsa-conf`` since ``libasound`` + depended on both. Essentially, no way existed to install only one of + these. + +- ``tremor``: Moved to ``meta-multimedia``. Fixed-integer Vorbis + decoding is not needed by current hardware. Thus, GStreamer's ivorbis + plugin has been disabled by default eliminating the need for the + ``tremor`` recipe in :term:`OpenEmbedded-Core (OE-Core)`. + +- ``gummiboot``: Replaced by ``systemd-boot``. + +.. _migration-2.3-wic-changes: + +Wic Changes +----------- + +The following changes have been made to Wic: + +.. note:: + + For more information on Wic, see the " + Creating Partitioned Images Using Wic + " section in the Yocto Project Development Tasks Manual. + +- *Default Output Directory Changed:* Wic's default output directory is + now the current directory by default instead of the unusual + ``/var/tmp/wic``. + + The "-o" and "--outdir" options remain unchanged and are used to + specify your preferred output directory if you do not want to use the + default directory. + +- *fsimage Plug-in Removed:* The Wic fsimage plugin has been removed as + it duplicates functionality of the rawcopy plugin. + +.. _migration-2.3-qa-changes: + +QA Changes +---------- + +The following QA checks have changed: + +- ``unsafe-references-in-binaries``: The + ``unsafe-references-in-binaries`` QA check, which was disabled by + default, has now been removed. This check was intended to detect + binaries in ``/bin`` that link to libraries in ``/usr/lib`` and have + the case where the user has ``/usr`` on a separate filesystem to + ``/``. + + The removed QA check was buggy. Additionally, ``/usr`` residing on a + separate partition from ``/`` is now a rare configuration. + Consequently, ``unsafe-references-in-binaries`` was removed. + +- ``file-rdeps``: The ``file-rdeps`` QA check is now an error by + default instead of a warning. Because it is an error instead of a + warning, you need to address missing runtime dependencies. + + For additional information, see the + :ref:`insane ` class and the "`Errors and + Warnings <#qa-errors-and-warnings>`__" section. + +.. _migration-2.3-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes have occurred: + +- In this release, a number of recipes have been changed to ignore the + ``largefile`` :term:`DISTRO_FEATURES` item, + enabling large file support unconditionally. This feature has always + been enabled by default. Disabling the feature has not been widely + tested. + + .. note:: + + Future releases of the Yocto Project will remove entirely the + ability to disable the + largefile + feature, which would make it unconditionally enabled everywhere. + +- If the :term:`DISTRO_VERSION` value contains + the value of the :term:`DATE` variable, which is the + default between Poky releases, the ``DATE`` value is explicitly + excluded from ``/etc/issue`` and ``/etc/issue.net``, which is + displayed at the login prompt, in order to avoid conflicts with + Multilib enabled. Regardless, the ``DATE`` value is inaccurate if the + ``base-files`` recipe is restored from shared state (sstate) rather + than rebuilt. + + If you need the build date recorded in ``/etc/issue*`` or anywhere + else in your image, a better method is to define a post-processing + function to do it and have the function called from + :term:`ROOTFS_POSTPROCESS_COMMAND`. + Doing so ensures the value is always up-to-date with the created + image. + +- Dropbear's ``init`` script now disables DSA host keys by default. + This change is in line with the systemd service file, which supports + RSA keys only, and with recent versions of OpenSSH, which deprecates + DSA host keys. + +- The :ref:`buildhistory ` class now + correctly uses tabs as separators between all columns in + ``installed-package-sizes.txt`` in order to aid import into other + tools. + +- The ``USE_LDCONFIG`` variable has been replaced with the "ldconfig" + ``DISTRO_FEATURES`` feature. Distributions that previously set: + :: + + USE_LDCONFIG = "0" + + should now instead use the following: + + :: + + DISTRO_FEATURES_BACKFILL_CONSIDERED_append = " ldconfig" + +- The default value of + :term:`COPYLEFT_LICENSE_INCLUDE` now + includes all versions of AGPL licenses in addition to GPL and LGPL. + + .. note:: + + The default list is not intended to be guaranteed as a complete + safe list. You should seek legal advice based on what you are + distributing if you are unsure. + +- Kernel module packages are now suffixed with the kernel version in + order to allow module packages from multiple kernel versions to + co-exist on a target system. If you wish to return to the previous + naming scheme that does not include the version suffix, use the + following: + :: + + KERNEL_MODULE_PACKAGE_SUFFIX to "" + +- Removal of ``libtool`` ``*.la`` files is now enabled by default. The + ``*.la`` files are not actually needed on Linux and relocating them + is an unnecessary burden. + + If you need to preserve these ``.la`` files (e.g. in a custom + distribution), you must change + :term:`INHERIT_DISTRO` such that + "remove-libtool" is not included in the value. + +- Extensible SDKs built for GCC 5+ now refuse to install on a + distribution where the host GCC version is 4.8 or 4.9. This change + resulted from the fact that the installation is known to fail due to + the way the ``uninative`` shared state (sstate) package is built. See + the :ref:`uninative ` class for additional + information. + +- All native and nativesdk recipes now use a separate + ``DISTRO_FEATURES`` value instead of sharing the value used by + recipes for the target, in order to avoid unnecessary rebuilds. + + The ``DISTRO_FEATURES`` for ``native`` recipes is + :term:`DISTRO_FEATURES_NATIVE` added to + an intersection of ``DISTRO_FEATURES`` and + :term:`DISTRO_FEATURES_FILTER_NATIVE`. + + For nativesdk recipes, the corresponding variables are + :term:`DISTRO_FEATURES_NATIVESDK` + and + :term:`DISTRO_FEATURES_FILTER_NATIVESDK`. + +- The ``FILESDIR`` variable, which was previously deprecated and rarely + used, has now been removed. You should change any recipes that set + ``FILESDIR`` to set :term:`FILESPATH` instead. + +- The ``MULTIMACH_HOST_SYS`` variable has been removed as it is no + longer needed with recipe-specific sysroots. + + diff --git a/poky/documentation/ref-manual/migration-2.4.rst b/poky/documentation/ref-manual/migration-2.4.rst new file mode 100644 index 000000000..260b3204b --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.4.rst @@ -0,0 +1,327 @@ +Moving to the Yocto Project 2.4 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.4 Release from the prior release. + +.. _migration-2.4-memory-resident-mode: + +Memory Resident Mode +-------------------- + +A persistent mode is now available in BitBake's default operation, +replacing its previous "memory resident mode" (i.e. +``oe-init-build-env-memres``). Now you only need to set +:term:`BB_SERVER_TIMEOUT` to a timeout (in +seconds) and BitBake's server stays resident for that amount of time +between invocations. The ``oe-init-build-env-memres`` script has been +removed since a separate environment setup script is no longer needed. + +.. _migration-2.4-packaging-changes: + +Packaging Changes +----------------- + +This section provides information about packaging changes that have +occurred: + +- ``python3`` Changes: + + - The main "python3" package now brings in all of the standard + Python 3 distribution rather than a subset. This behavior matches + what is expected based on traditional Linux distributions. If you + wish to install a subset of Python 3, specify ``python-core`` plus + one or more of the individual packages that are still produced. + + - ``python3``: The ``bz2.py``, ``lzma.py``, and + ``_compression.py`` scripts have been moved from the + ``python3-misc`` package to the ``python3-compression`` package. + +- ``binutils``: The ``libbfd`` library is now packaged in a separate + "libbfd" package. This packaging saves space when certain tools (e.g. + ``perf``) are installed. In such cases, the tools only need + ``libbfd`` rather than all the packages in ``binutils``. + +- ``util-linux`` Changes: + + - The ``su`` program is now packaged in a separate "util-linux-su" + package, which is only built when "pam" is listed in the + :term:`DISTRO_FEATURES` variable. + ``util-linux`` should not be installed unless it is needed because + ``su`` is normally provided through the shadow file format. The + main ``util-linux`` package has runtime dependencies (i.e. + :term:`RDEPENDS`) on the ``util-linux-su`` package + when "pam" is in ``DISTRO_FEATURES``. + + - The ``switch_root`` program is now packaged in a separate + "util-linux-switch-root" package for small initramfs images that + do not need the whole ``util-linux`` package or the busybox + binary, which are both much larger than ``switch_root``. The main + ``util-linux`` package has a recommended runtime dependency (i.e. + :term:`RRECOMMENDS`) on the + ``util-linux-switch-root`` package. + + - The ``ionice`` program is now packaged in a separate + "util-linux-ionice" package. The main ``util-linux`` package has a + recommended runtime dependency (i.e. ``RRECOMMENDS``) on the + ``util-linux-ionice`` package. + +- ``initscripts``: The ``sushell`` program is now packaged in a + separate "initscripts-sushell" package. This packaging change allows + systems to pull ``sushell`` in when ``selinux`` is enabled. The + change also eliminates needing to pull in the entire ``initscripts`` + package. The main ``initscripts`` package has a runtime dependency + (i.e. ``RDEPENDS``) on the ``sushell`` package when "selinux" is in + ``DISTRO_FEATURES``. + +- ``glib-2.0``: The ``glib-2.0`` package now has a recommended + runtime dependency (i.e. ``RRECOMMENDS``) on the ``shared-mime-info`` + package, since large portions of GIO are not useful without the MIME + database. You can remove the dependency by using the + :term:`BAD_RECOMMENDATIONS` variable if + ``shared-mime-info`` is too large and is not required. + +- *Go Standard Runtime:* The Go standard runtime has been split out + from the main ``go`` recipe into a separate ``go-runtime`` recipe. + +.. _migration-2.4-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``acpitests``: This recipe is not maintained. + +- ``autogen-native``: No longer required by Grub, oe-core, or + meta-oe. + +- ``bdwgc``: Nothing in OpenEmbedded-Core requires this recipe. It + has moved to meta-oe. + +- ``byacc``: This recipe was only needed by rpm 5.x and has moved to + meta-oe. + +- ``gcc (5.4)``: The 5.4 series dropped the recipe in favor of 6.3 / + 7.2. + +- ``gnome-common``: Deprecated upstream and no longer needed. + +- ``go-bootstrap-native``: Go 1.9 does its own bootstrapping so this + recipe has been removed. + +- ``guile``: This recipe was only needed by ``autogen-native`` and + ``remake``. The recipe is no longer needed by either of these + programs. + +- ``libclass-isa-perl``: This recipe was previously needed for LSB 4, + no longer needed. + +- ``libdumpvalue-perl``: This recipe was previously needed for LSB 4, + no longer needed. + +- ``libenv-perl``: This recipe was previously needed for LSB 4, no + longer needed. + +- ``libfile-checktree-perl``: This recipe was previously needed for + LSB 4, no longer needed. + +- ``libi18n-collate-perl``: This recipe was previously needed for LSB + 4, no longer needed. + +- ``libiconv``: This recipe was only needed for ``uclibc``, which was + removed in the previous release. ``glibc`` and ``musl`` have their + own implementations. ``meta-mingw`` still needs ``libiconv``, so it + has been moved to ``meta-mingw``. + +- ``libpng12``: This recipe was previously needed for LSB. The + current ``libpng`` is 1.6.x. + +- ``libpod-plainer-perl``: This recipe was previously needed for LSB + 4, no longer needed. + +- ``linux-yocto (4.1)``: This recipe was removed in favor of 4.4, + 4.9, 4.10 and 4.12. + +- ``mailx``: This recipe was previously only needed for LSB + compatibility, and upstream is defunct. + +- ``mesa (git version only)``: The git version recipe was stale with + respect to the release version. + +- ``ofono (git version only)``: The git version recipe was stale with + respect to the release version. + +- ``portmap``: This recipe is obsolete and is superseded by + ``rpcbind``. + +- ``python3-pygpgme``: This recipe is old and unmaintained. It was + previously required by ``dnf``, which has switched to official + ``gpgme`` Python bindings. + +- ``python-async``: This recipe has been removed in favor of the + Python 3 version. + +- ``python-gitdb``: This recipe has been removed in favor of the + Python 3 version. + +- ``python-git``: This recipe was removed in favor of the Python 3 + version. + +- ``python-mako``: This recipe was removed in favor of the Python 3 + version. + +- ``python-pexpect``: This recipe was removed in favor of the Python + 3 version. + +- ``python-ptyprocess``: This recipe was removed in favor of Python + the 3 version. + +- ``python-pycurl``: Nothing is using this recipe in + OpenEmbedded-Core (i.e. ``meta-oe``). + +- ``python-six``: This recipe was removed in favor of the Python 3 + version. + +- ``python-smmap``: This recipe was removed in favor of the Python 3 + version. + +- ``remake``: Using ``remake`` as the provider of ``virtual/make`` is + broken. Consequently, this recipe is not needed in OpenEmbedded-Core. + +.. _migration-2.4-kernel-device-tree-move: + +Kernel Device Tree Move +----------------------- + +Kernel Device Tree support is now easier to enable in a kernel recipe. +The Device Tree code has moved to a +:ref:`kernel-devicetree ` class. +Functionality is automatically enabled for any recipe that inherits the +:ref:`kernel ` class and sets the +:term:`KERNEL_DEVICETREE` variable. The +previous mechanism for doing this, +``meta/recipes-kernel/linux/linux-dtb.inc``, is still available to avoid +breakage, but triggers a deprecation warning. Future releases of the +Yocto Project will remove ``meta/recipes-kernel/linux/linux-dtb.inc``. +It is advisable to remove any ``require`` statements that request +``meta/recipes-kernel/linux/linux-dtb.inc`` from any custom kernel +recipes you might have. This will avoid breakage in post 2.4 releases. + +.. _migration-2.4-package-qa-changes: + +Package QA Changes +------------------ + +The following package QA changes took place: + +- The "unsafe-references-in-scripts" QA check has been removed. + +- If you refer to ``${COREBASE}/LICENSE`` within + :term:`LIC_FILES_CHKSUM` you receive a + warning because this file is a description of the license for + OE-Core. Use ``${COMMON_LICENSE_DIR}/MIT`` if your recipe is + MIT-licensed and you cannot use the preferred method of referring to + a file within the source tree. + +.. _migration-2.4-readme-changes: + +``README`` File Changes +----------------------- + +The following are changes to ``README`` files: + +- The main Poky ``README`` file has been moved to the ``meta-poky`` + layer and has been renamed ``README.poky``. A symlink has been + created so that references to the old location work. + +- The ``README.hardware`` file has been moved to ``meta-yocto-bsp``. A + symlink has been created so that references to the old location work. + +- A ``README.qemu`` file has been created with coverage of the + ``qemu*`` machines. + +.. _migration-2.4-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following are additional changes: + +- The ``ROOTFS_PKGMANAGE_BOOTSTRAP`` variable and any references to it + have been removed. You should remove this variable from any custom + recipes. + +- The ``meta-yocto`` directory has been removed. + + .. note:: + + In the Yocto Project 2.1 release + meta-yocto + was renamed to + meta-poky + and the + meta-yocto + subdirectory remained to avoid breaking existing configurations. + +- The ``maintainers.inc`` file, which tracks maintainers by listing a + primary person responsible for each recipe in OE-Core, has been moved + from ``meta-poky`` to OE-Core (i.e. from + ``meta-poky/conf/distro/include`` to ``meta/conf/distro/include``). + +- The :ref:`buildhistory ` class now makes + a single commit per build rather than one commit per subdirectory in + the repository. This behavior assumes the commits are enabled with + :term:`BUILDHISTORY_COMMIT` = "1", which + is typical. Previously, the ``buildhistory`` class made one commit + per subdirectory in the repository in order to make it easier to see + the changes for a particular subdirectory. To view a particular + change, specify that subdirectory as the last parameter on the + ``git show`` or ``git diff`` commands. + +- The ``x86-base.inc`` file, which is included by all x86-based machine + configurations, now sets :term:`IMAGE_FSTYPES` + using ``?=`` to "live" rather than appending with ``+=``. This change + makes the default easier to override. + +- BitBake fires multiple "BuildStarted" events when multiconfig is + enabled (one per configuration). For more information, see the + ":ref:`Events `" section in the BitBake User + Manual. + +- By default, the ``security_flags.inc`` file sets a + :term:`GCCPIE` variable with an option to enable + Position Independent Executables (PIE) within ``gcc``. Enabling PIE + in the GNU C Compiler (GCC), makes Return Oriented Programming (ROP) + attacks much more difficult to execute. + +- OE-Core now provides a ``bitbake-layers`` plugin that implements a + "create-layer" subcommand. The implementation of this subcommand has + resulted in the ``yocto-layer`` script being deprecated and will + likely be removed in the next Yocto Project release. + +- The ``vmdk``, ``vdi``, and ``qcow2`` image file types are now used in + conjunction with the "wic" image type through ``CONVERSION_CMD``. + Consequently, the equivalent image types are now ``wic.vmdk``, + ``wic.vdi``, and ``wic.qcow2``, respectively. + +- ``do_image_[depends]`` has replaced ``IMAGE_DEPENDS_``. + If you have your own classes that implement custom image types, then + you need to update them. + +- OpenSSL 1.1 has been introduced. However, the default is still 1.0.x + through the :term:`PREFERRED_VERSION` + variable. This preference is set is due to the remaining + compatibility issues with other software. The + :term:`PROVIDES` variable in the openssl 1.0 recipe + now includes "openssl10" as a marker that can be used in + :term:`DEPENDS` within recipes that build software + that still depend on OpenSSL 1.0. + +- To ensure consistent behavior, BitBake's "-r" and "-R" options (i.e. + prefile and postfile), which are used to read or post-read additional + configuration files from the command line, now only affect the + current BitBake command. Before these BitBake changes, these options + would "stick" for future executions. + + diff --git a/poky/documentation/ref-manual/migration-2.5.rst b/poky/documentation/ref-manual/migration-2.5.rst new file mode 100644 index 000000000..a2adc1775 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.5.rst @@ -0,0 +1,310 @@ +Moving to the Yocto Project 2.5 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.5 Release from the prior release. + +.. _migration-2.5-packaging-changes: + +Packaging Changes +----------------- + +This section provides information about packaging changes that have +occurred: + +- ``bind-libs``: The libraries packaged by the bind recipe are in a + separate ``bind-libs`` package. + +- ``libfm-gtk``: The ``libfm`` GTK+ bindings are split into a + separate ``libfm-gtk`` package. + +- ``flex-libfl``: The flex recipe splits out libfl into a separate + ``flex-libfl`` package to avoid too many dependencies being pulled in + where only the library is needed. + +- ``grub-efi``: The ``grub-efi`` configuration is split into a + separate ``grub-bootconf`` recipe. However, the dependency + relationship from ``grub-efi`` is through a virtual/grub-bootconf + provider making it possible to have your own recipe provide the + dependency. Alternatively, you can use a BitBake append file to bring + the configuration back into the ``grub-efi`` recipe. + +- *armv7a Legacy Package Feed Support:* Legacy support is removed for + transitioning from ``armv7a`` to ``armv7a-vfp-neon`` in package + feeds, which was previously enabled by setting + ``PKGARCHCOMPAT_ARMV7A``. This transition occurred in 2011 and active + package feeds should by now be updated to the new naming. + +.. _migration-2.5-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- ``gcc``: The version 6.4 recipes are replaced by 7.x. + +- ``gst-player``: Renamed to ``gst-examples`` as per upstream. + +- ``hostap-utils``: This software package is obsolete. + +- ``latencytop``: This recipe is no longer maintained upstream. The + last release was in 2009. + +- ``libpfm4``: The only file that requires this recipe is + ``oprofile``, which has been removed. + +- ``linux-yocto``: The version 4.4, 4.9, and 4.10 recipes have been + removed. Versions 4.12, 4.14, and 4.15 remain. + +- ``man``: This recipe has been replaced by modern ``man-db`` + +- ``mkelfimage``: This tool has been removed in the upstream coreboot + project, and is no longer needed with the removal of the ELF image + type. + +- ``nativesdk-postinst-intercept``: This recipe is not maintained. + +- ``neon``: This software package is no longer maintained upstream + and is no longer needed by anything in OpenEmbedded-Core. + +- ``oprofile``: The functionality of this recipe is replaced by + ``perf`` and keeping compatibility on an ongoing basis with ``musl`` + is difficult. + +- ``pax``: This software package is obsolete. + +- ``stat``: This software package is not maintained upstream. + ``coreutils`` provides a modern stat binary. + +- ``zisofs-tools-native``: This recipe is no longer needed because + the compressed ISO image feature has been removed. + +.. _migration-2.5-scripts-and-tools-changes: + +Scripts and Tools Changes +------------------------- + +The following are changes to scripts and tools: + +- ``yocto-bsp``, ``yocto-kernel``, and ``yocto-layer``: The + ``yocto-bsp``, ``yocto-kernel``, and ``yocto-layer`` scripts + previously shipped with poky but not in OpenEmbedded-Core have been + removed. These scripts are not maintained and are outdated. In many + cases, they are also limited in scope. The + ``bitbake-layers create-layer`` command is a direct replacement for + ``yocto-layer``. See the documentation to create a BSP or kernel + recipe in the ":ref:`bsp-guide/bsp:bsp kernel recipe example`" section. + +- ``devtool finish``: ``devtool finish`` now exits with an error if + there are uncommitted changes or a rebase/am in progress in the + recipe's source repository. If this error occurs, there might be + uncommitted changes that will not be included in updates to the + patches applied by the recipe. A -f/--force option is provided for + situations that the uncommitted changes are inconsequential and you + want to proceed regardless. + +- ``scripts/oe-setup-rpmrepo`` script: The functionality of + ``scripts/oe-setup-rpmrepo`` is replaced by + ``bitbake package-index``. + +- ``scripts/test-dependencies.sh`` script: The script is largely made + obsolete by the recipe-specific sysroots functionality introduced in + the previous release. + +.. _migration-2.5-bitbake-changes: + +BitBake Changes +--------------- + +The following are BitBake changes: + +- The ``--runall`` option has changed. There are two different + behaviors people might want: + + - *Behavior A:* For a given target (or set of targets) look through + the task graph and run task X only if it is present and will be + built. + + - *Behavior B:* For a given target (or set of targets) look through + the task graph and run task X if any recipe in the taskgraph has + such a target, even if it is not in the original task graph. + + The ``--runall`` option now performs "Behavior B". Previously + ``--runall`` behaved like "Behavior A". A ``--runonly`` option has + been added to retain the ability to perform "Behavior A". + +- Several explicit "run this task for all recipes in the dependency + tree" tasks have been removed (e.g. ``fetchall``, ``checkuriall``, + and the ``*all`` tasks provided by the ``distrodata`` and + ``archiver`` classes). There is a BitBake option to complete this for + any arbitrary task. For example: + :: + + bitbake -c fetchall + + should now be replaced with: + :: + + bitbake --runall=fetch + +.. _migration-2.5-python-and-python3-changes: + +Python and Python 3 Changes +--------------------------- + +The following are auto-packaging changes to Python and Python 3: + +The script-managed ``python-*-manifest.inc`` files that were previously +used to generate Python and Python 3 packages have been replaced with a +JSON-based file that is easier to read and maintain. A new task is +available for maintainers of the Python recipes to update the JSON file +when upgrading to new Python versions. You can now edit the file +directly instead of having to edit a script and run it to update the +file. + +One particular change to note is that the Python recipes no longer have +build-time provides for their packages. This assumes ``python-foo`` is +one of the packages provided by the Python recipe. You can no longer run +``bitbake python-foo`` or have a +:term:`DEPENDS` on ``python-foo``, +but doing either of the following causes the package to work as +expected: :: + + IMAGE_INSTALL_append = " python-foo" + +or :: + + RDEPENDS_${PN} = "python-foo" + +The earlier build-time provides behavior was a quirk of the +way the Python manifest file was created. For more information on this +change please see `this +commit `__. + +.. _migration-2.5-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following are additional changes: + +- The ``kernel`` class supports building packages for multiple kernels. + If your kernel recipe or ``.bbappend`` file mentions packaging at + all, you should replace references to the kernel in package names + with ``${KERNEL_PACKAGE_NAME}``. For example, if you disable + automatic installation of the kernel image using + ``RDEPENDS_kernel-base = ""`` you can avoid warnings using + ``RDEPENDS_${KERNEL_PACKAGE_NAME}-base = ""`` instead. + +- The ``buildhistory`` class commits changes to the repository by + default so you no longer need to set ``BUILDHISTORY_COMMIT = "1"``. + If you want to disable commits you need to set + ``BUILDHISTORY_COMMIT = "0"`` in your configuration. + +- The ``beaglebone`` reference machine has been renamed to + ``beaglebone-yocto``. The ``beaglebone-yocto`` BSP is a reference + implementation using only mainline components available in + OpenEmbedded-Core and ``meta-yocto-bsp``, whereas Texas Instruments + maintains a full-featured BSP in the ``meta-ti`` layer. This rename + avoids the previous name clash that existed between the two BSPs. + +- The ``update-alternatives`` class no longer works with SysV ``init`` + scripts because this usage has been problematic. Also, the + ``sysklogd`` recipe no longer uses ``update-alternatives`` because it + is incompatible with other implementations. + +- By default, the :ref:`cmake ` class uses + ``ninja`` instead of ``make`` for building. This improves build + performance. If a recipe is broken with ``ninja``, then the recipe + can set ``OECMAKE_GENERATOR = "Unix Makefiles"`` to change back to + ``make``. + +- The previously deprecated ``base_*`` functions have been removed in + favor of their replacements in ``meta/lib/oe`` and + ``bitbake/lib/bb``. These are typically used from recipes and + classes. Any references to the old functions must be updated. The + following table shows the removed functions and their replacements: + + +------------------------------+----------------------------------------------------------+ + | *Removed* | *Replacement* | + +==============================+==========================================================+ + | base_path_join() | oe.path.join() | + +------------------------------+----------------------------------------------------------+ + | base_path_relative() | oe.path.relative() | + +------------------------------+----------------------------------------------------------+ + | base_path_out() | oe.path.format_display() | + +------------------------------+----------------------------------------------------------+ + | base_read_file() | oe.utils.read_file() | + +------------------------------+----------------------------------------------------------+ + | base_ifelse() | oe.utils.ifelse() | + +------------------------------+----------------------------------------------------------+ + | base_conditional() | oe.utils.conditional() | + +------------------------------+----------------------------------------------------------+ + | base_less_or_equal() | oe.utils.less_or_equal() | + +------------------------------+----------------------------------------------------------+ + | base_version_less_or_equal() | oe.utils.version_less_or_equal() | + +------------------------------+----------------------------------------------------------+ + | base_contains() | bb.utils.contains() | + +------------------------------+----------------------------------------------------------+ + | base_both_contain() | oe.utils.both_contain() | + +------------------------------+----------------------------------------------------------+ + | base_prune_suffix() | oe.utils.prune_suffix() | + +------------------------------+----------------------------------------------------------+ + | oe_filter() | oe.utils.str_filter() | + +------------------------------+----------------------------------------------------------+ + | oe_filter_out() | oe.utils.str_filter_out() (or use the \_remove operator) | + +------------------------------+----------------------------------------------------------+ + +- Using ``exit 1`` to explicitly defer a postinstall script until first + boot is now deprecated since it is not an obvious mechanism and can + mask actual errors. If you want to explicitly defer a postinstall to + first boot on the target rather than at ``rootfs`` creation time, use + ``pkg_postinst_ontarget()`` or call + ``postinst_intercept delay_to_first_boot`` from ``pkg_postinst()``. + Any failure of a ``pkg_postinst()`` script (including ``exit 1``) + will trigger a warning during ``do_rootfs``. + + For more information, see the + ":ref:`dev-manual/dev-manual-common-tasks:post-installation scripts`" + section in the Yocto Project Development Tasks Manual. + +- The ``elf`` image type has been removed. This image type was removed + because the ``mkelfimage`` tool that was required to create it is no + longer provided by coreboot upstream and required updating every time + ``binutils`` updated. + +- Support for .iso image compression (previously enabled through + ``COMPRESSISO = "1"``) has been removed. The userspace tools + (``zisofs-tools``) are unmaintained and ``squashfs`` provides better + performance and compression. In order to build a live image with + squashfs+lz4 compression enabled you should now set + ``LIVE_ROOTFS_TYPE = "squashfs-lz4"`` and ensure that ``live`` is in + ``IMAGE_FSTYPES``. + +- Recipes with an unconditional dependency on ``libpam`` are only + buildable with ``pam`` in ``DISTRO_FEATURES``. If the dependency is + truly optional then it is recommended that the dependency be + conditional upon ``pam`` being in ``DISTRO_FEATURES``. + +- For EFI-based machines, the bootloader (``grub-efi`` by default) is + installed into the image at /boot. Wic can be used to split the + bootloader into separate boot and rootfs partitions if necessary. + +- Patches whose context does not match exactly (i.e. where patch + reports "fuzz" when applying) will generate a warning. For an example + of this see `this + commit `__. + +- Layers are expected to set ``LAYERSERIES_COMPAT_layername`` to match + the version(s) of OpenEmbedded-Core they are compatible with. This is + specified as codenames using spaces to separate multiple values (e.g. + "rocko sumo"). If a layer does not set + ``LAYERSERIES_COMPAT_layername``, a warning will is shown. If a layer + sets a value that does not include the current version ("sumo" for + the 2.5 release), then an error will be produced. + +- The ``TZ`` environment variable is set to "UTC" within the build + environment in order to fix reproducibility problems in some recipes. + + diff --git a/poky/documentation/ref-manual/migration-2.6.rst b/poky/documentation/ref-manual/migration-2.6.rst new file mode 100644 index 000000000..f16aaaa97 --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.6.rst @@ -0,0 +1,476 @@ +Moving to the Yocto Project 2.6 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.6 Release from the prior release. + +.. _migration-2.6-gcc-changes: + +GCC 8.2 is Now Used by Default +------------------------------ + +The GNU Compiler Collection version 8.2 is now used by default for +compilation. For more information on what has changed in the GCC 8.x +release, see https://gcc.gnu.org/gcc-8/changes.html. + +If you still need to compile with version 7.x, GCC 7.3 is also provided. +You can select this version by setting the and can be selected by +setting the :term:`GCCVERSION` variable to "7.%" in +your configuration. + +.. _migration-2.6-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- *beecrypt*: No longer needed since moving to RPM 4. +- *bigreqsproto*: Replaced by ``xorgproto``. +- *calibrateproto*: Removed in favor of ``xinput``. +- *compositeproto*: Replaced by ``xorgproto``. +- *damageproto*: Replaced by ``xorgproto``. +- *dmxproto*: Replaced by ``xorgproto``. +- *dri2proto*: Replaced by ``xorgproto``. +- *dri3proto*: Replaced by ``xorgproto``. +- *eee-acpi-scripts*: Became obsolete. +- *fixesproto*: Replaced by ``xorgproto``. +- *fontsproto*: Replaced by ``xorgproto``. +- *fstests*: Became obsolete. +- *gccmakedep*: No longer used. +- *glproto*: Replaced by ``xorgproto``. +- *gnome-desktop3*: No longer needed. This recipe has moved to ``meta-oe``. +- *icon-naming-utils*: No longer used since the Sato theme was removed in 2016. +- *inputproto*: Replaced by ``xorgproto``. +- *kbproto*: Replaced by ``xorgproto``. +- *libusb-compat*: Became obsolete. +- *libuser*: Became obsolete. +- *libnfsidmap*: No longer an external requirement since ``nfs-utils`` 2.2.1. ``libnfsidmap`` is now integrated. +- *libxcalibrate*: No longer needed with ``xinput`` +- *mktemp*: Became obsolete. The ``mktemp`` command is provided by both ``busybox`` and ``coreutils``. +- *ossp-uuid*: Is not being maintained and has mostly been replaced by ``uuid.h`` in ``util-linux``. +- *pax-utils*: No longer needed. Previous QA tests that did use this recipe are now done at build time. +- *pcmciautils*: Became obsolete. +- *pixz*: No longer needed. ``xz`` now supports multi-threaded compression. +- *presentproto*: Replaced by ``xorgproto``. +- *randrproto*: Replaced by ``xorgproto``. +- *recordproto*: Replaced by ``xorgproto``. +- *renderproto*: Replaced by ``xorgproto``. +- *resourceproto*: Replaced by ``xorgproto``. +- *scrnsaverproto*: Replaced by ``xorgproto``. +- *trace-cmd*: Became obsolete. ``perf`` replaced this recipe's functionally. +- *videoproto*: Replaced by ``xorgproto``. +- *wireless-tools*: Became obsolete. Superseded by ``iw``. +- *xcmiscproto*: Replaced by ``xorgproto``. +- *xextproto*: Replaced by ``xorgproto``. +- *xf86dgaproto*: Replaced by ``xorgproto``. +- *xf86driproto*: Replaced by ``xorgproto``. +- *xf86miscproto*: Replaced by ``xorgproto``. +- *xf86-video-omapfb*: Became obsolete. Use kernel modesetting driver instead. +- *xf86-video-omap*: Became obsolete. Use kernel modesetting driver instead. +- *xf86vidmodeproto*: Replaced by ``xorgproto``. +- *xineramaproto*: Replaced by ``xorgproto``. +- *xproto*: Replaced by ``xorgproto``. +- *yasm*: No longer needed since previous usages are now satisfied by ``nasm``. + +.. _migration-2.6-packaging-changes: + +Packaging Changes +----------------- + +The following packaging changes have been made: + +- *cmake*: ``cmake.m4`` and ``toolchain`` files have been moved to + the main package. + +- *iptables*: The ``iptables`` modules have been split into + separate packages. + +- *alsa-lib*: ``libasound`` is now in the main ``alsa-lib`` package + instead of ``libasound``. + +- *glibc*: ``libnss-db`` is now in its own package along with a + ``/var/db/makedbs.sh`` script to update databases. + +- *python and python3*: The main package has been removed from + the recipe. You must install specific packages or ``python-modules`` + / ``python3-modules`` for everything. + +- *systemtap*: Moved ``systemtap-exporter`` into its own package. + +.. _migration-2.6-xorg-protocol-dependencies: + +XOrg Protocol dependencies +-------------------------- + +The ``*proto`` upstream repositories have been combined into one +"xorgproto" repository. Thus, the corresponding recipes have also been +combined into a single ``xorgproto`` recipe. Any recipes that depend +upon the older ``*proto`` recipes need to be changed to depend on the +newer ``xorgproto`` recipe instead. + +For names of recipes removed because of this repository change, see the +`Removed Recipes <#migration-2.6-removed-recipes>`__ section. + +.. _migration-2.6-distutils-distutils3-fetching-dependencies: + +``distutils`` and ``distutils3`` Now Prevent Fetching Dependencies During the ``do_configure`` Task +--------------------------------------------------------------------------------------------------- + +Previously, it was possible for Python recipes that inherited the +:ref:`distutils ` and +:ref:`distutils3 ` classes to fetch code +during the :ref:`ref-tasks-configure` task to satisfy +dependencies mentioned in ``setup.py`` if those dependencies were not +provided in the sysroot (i.e. recipes providing the dependencies were +missing from :term:`DEPENDS`). + +.. note:: + + This change affects classes beyond just the two mentioned (i.e. + distutils + and + distutils3 + ). Any recipe that inherits + distutils\* + classes are affected. For example, the + setuptools + and + setuptools3 + recipes are affected since they inherit the + distutils\* + classes. + +Fetching these types of dependencies that are not provided in the +sysroot negatively affects the ability to reproduce builds. This type of +fetching is now explicitly disabled. Consequently, any missing +dependencies in Python recipes that use these classes now result in an +error during the ``do_configure`` task. + +.. _migration-2.6-linux-yocto-configuration-audit-issues-now-correctly-reported: + +``linux-yocto`` Configuration Audit Issues Now Correctly Reported +----------------------------------------------------------------- + +Due to a bug, the kernel configuration audit functionality was not +writing out any resulting warnings during the build. This issue is now +corrected. You might notice these warnings now if you have a custom +kernel configuration with a ``linux-yocto`` style kernel recipe. + +.. _migration-2.6-image-kernel-artifact-naming-changes: + +Image/Kernel Artifact Naming Changes +------------------------------------ + +The following changes have been made: + +- Name variables (e.g. :term:`IMAGE_NAME`) use a new + ``IMAGE_VERSION_SUFFIX`` variable instead of + :term:`DATETIME`. Using ``IMAGE_VERSION_SUFFIX`` + allows easier and more direct changes. + + The ``IMAGE_VERSION_SUFFIX`` variable is set in the ``bitbake.conf`` + configuration file as follows: + :: + + IMAGE_VERSION_SUFFIX = "-${DATETIME}" + +- Several variables have changed names for consistency: + :: + + Old Variable Name New Variable Name + ======================================================== + KERNEL_IMAGE_BASE_NAME :term:`KERNEL_IMAGE_NAME` + KERNEL_IMAGE_SYMLINK_NAME :term:`KERNEL_IMAGE_LINK_NAME` + MODULE_TARBALL_BASE_NAME :term:`MODULE_TARBALL_NAME` + MODULE_TARBALL_SYMLINK_NAME :term:`MODULE_TARBALL_LINK_NAME` + INITRAMFS_BASE_NAME :term:`INITRAMFS_NAME` + +- The ``MODULE_IMAGE_BASE_NAME`` variable has been removed. The module + tarball name is now controlled directly with the + :term:`MODULE_TARBALL_NAME` variable. + +- The :term:`KERNEL_DTB_NAME` and + :term:`KERNEL_DTB_LINK_NAME` variables + have been introduced to control kernel Device Tree Binary (DTB) + artifact names instead of mangling ``KERNEL_IMAGE_*`` variables. + +- The :term:`KERNEL_FIT_NAME` and + :term:`KERNEL_FIT_LINK_NAME` variables + have been introduced to specify the name of flattened image tree + (FIT) kernel images similar to other deployed artifacts. + +- The :term:`MODULE_TARBALL_NAME` and + :term:`MODULE_TARBALL_LINK_NAME` + variable values no longer include the "module-" prefix or ".tgz" + suffix. These parts are now hardcoded so that the values are + consistent with other artifact naming variables. + +- Added the :term:`INITRAMFS_LINK_NAME` + variable so that the symlink can be controlled similarly to other + artifact types. + +- :term:`INITRAMFS_NAME` now uses + "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" instead + of "${PV}-${PR}-${MACHINE}-${DATETIME}", which makes it consistent + with other variables. + +.. _migration-2.6-serial-console-deprecated: + +``SERIAL_CONSOLE`` Deprecated +----------------------------- + +The :term:`SERIAL_CONSOLE` variable has been +functionally replaced by the +:term:`SERIAL_CONSOLES` variable for some time. +With the Yocto Project 2.6 release, ``SERIAL_CONSOLE`` has been +officially deprecated. + +``SERIAL_CONSOLE`` will continue to work as before for the 2.6 release. +However, for the sake of future compatibility, it is recommended that +you replace all instances of ``SERIAL_CONSOLE`` with +``SERIAL_CONSOLES``. + +.. note:: + + The only difference in usage is that + SERIAL_CONSOLES + expects entries to be separated using semicolons as compared to + SERIAL_CONSOLE + , which expects spaces. + +.. _migration-2.6-poky-sets-unknown-configure-option-to-qa-error: + +Configure Script Reports Unknown Options as Errors +-------------------------------------------------- + +If the configure script reports an unknown option, this now triggers a +QA error instead of a warning. Any recipes that previously got away with +specifying such unknown options now need to be fixed. + +.. _migration-2.6-override-changes: + +Override Changes +---------------- + +The following changes have occurred: + +- The ``virtclass-native`` and ``virtclass-nativesdk`` Overrides Have + Been Removed: The ``virtclass-native`` and ``virtclass-nativesdk`` + overrides have been deprecated since 2012 in favor of + ``class-native`` and ``class-nativesdk``, respectively. Both + ``virtclass-native`` and ``virtclass-nativesdk`` are now dropped. + + .. note:: + + The + virtclass-multilib- + overrides for multilib are still valid. + +- The ``forcevariable`` Override Now Has a Higher Priority Than + ``libc`` Overrides: The ``forcevariable`` override is documented to + be the highest priority override. However, due to a long-standing + quirk of how :term:`OVERRIDES` is set, the ``libc`` + overrides (e.g. ``libc-glibc``, ``libc-musl``, and so forth) + erroneously had a higher priority. This issue is now corrected. + + It is likely this change will not cause any problems. However, it is + possible with some unusual configurations that you might see a change + in behavior if you were relying on the previous behavior. Be sure to + check how you use ``forcevariable`` and ``libc-*`` overrides in your + custom layers and configuration files to ensure they make sense. + +- The ``build-${BUILD_OS}`` Override Has Been Removed: The + ``build-${BUILD_OS}``, which is typically ``build-linux``, override + has been removed because building on a host operating system other + than a recent version of Linux is neither supported nor recommended. + Dropping the override avoids giving the impression that other host + operating systems might be supported. + +- The "_remove" operator now preserves whitespace. Consequently, when + specifying list items to remove, be aware that leading and trailing + whitespace resulting from the removal is retained. + + See the ":ref:`bitbake:removing-override-style-syntax`" + section in the BitBake User Manual for a detailed example. + +.. _migration-2.6-systemd-configuration-now-split-out-to-system-conf: + +``systemd`` Configuration is Now Split Into ``systemd-conf`` +------------------------------------------------------------ + +The configuration for the ``systemd`` recipe has been moved into a +``system-conf`` recipe. Moving this configuration to a separate recipe +avoids the ``systemd`` recipe from becoming machine-specific for cases +where machine-specific configurations need to be applied (e.g. for +``qemu*`` machines). + +Currently, the new recipe packages the following files: +:: + + ${sysconfdir}/machine-id + ${sysconfdir}/systemd/coredump.conf + ${sysconfdir}/systemd/journald.conf + ${sysconfdir}/systemd/logind.conf + ${sysconfdir}/systemd/system.conf + ${sysconfdir}/systemd/user.conf + +If you previously used bbappend files to append the ``systemd`` recipe to +change any of the listed files, you must do so for the ``systemd-conf`` +recipe instead. + +.. _migration-2.6-automatic-testing-changes: + +Automatic Testing Changes +------------------------- + +This section provides information about automatic testing changes: + +- ``TEST_IMAGE`` Variable Removed: Prior to this release, you set the + ``TEST_IMAGE`` variable to "1" to enable automatic testing for + successfully built images. The ``TEST_IMAGE`` variable no longer + exists and has been replaced by the + :term:`TESTIMAGE_AUTO` variable. + +- Inheriting the ``testimage`` and ``testsdk`` Classes: Best + practices now dictate that you use the + :term:`IMAGE_CLASSES` variable rather than the + :term:`INHERIT` variable when you inherit the + :ref:`testimage ` and + :ref:`testsdk ` classes used for automatic + testing. + +.. _migration-2.6-openssl-changes: + +OpenSSL Changes +--------------- + +`OpenSSL `__ has been upgraded from 1.0 to +1.1. By default, this upgrade could cause problems for recipes that have +both versions in their dependency chains. The problem is that both +versions cannot be installed together at build time. + +.. note:: + + It is possible to have both versions of the library at runtime. + +.. _migration-2.6-bitbake-changes: + +BitBake Changes +--------------- + +The server logfile ``bitbake-cookerdaemon.log`` is now always placed in +the :term:`Build Directory` instead of the current +directory. + +.. _migration-2.6-security-changes: + +Security Changes +---------------- + +The Poky distribution now uses security compiler flags by default. +Inclusion of these flags could cause new failures due to stricter +checking for various potential security issues in code. + +.. _migration-2.6-post-installation-changes: + +Post Installation Changes +------------------------- + +You must explicitly mark post installs to defer to the target. If you +want to explicitly defer a postinstall to first boot on the target +rather than at rootfs creation time, use ``pkg_postinst_ontarget()`` or +call ``postinst_intercept delay_to_first_boot`` from ``pkg_postinst()``. +Any failure of a ``pkg_postinst()`` script (including exit 1) triggers +an error during the :ref:`ref-tasks-rootfs` task. + +For more information on post-installation behavior, see the +":ref:`dev-manual/dev-manual-common-tasks:post-installation scripts`" +section in the Yocto Project Development Tasks Manual. + +.. _migration-2.6-python-3-profile-guided-optimizations: + +Python 3 Profile-Guided Optimization +------------------------------------ + +The ``python3`` recipe now enables profile-guided optimization. Using +this optimization requires a little extra build time in exchange for +improved performance on the target at runtime. Additionally, the +optimization is only enabled if the current +:term:`MACHINE` has support for user-mode emulation in +QEMU (i.e. "qemu-usermode" is in +:term:`MACHINE_FEATURES`, which it is by +default). + +If you wish to disable Python profile-guided optimization regardless of +the value of ``MACHINE_FEATURES``, then ensure that +:term:`PACKAGECONFIG` for the ``python3`` recipe +does not contain "pgo". You could accomplish the latter using the +following at the configuration level: +:: + + PACKAGECONFIG_remove_pn-python3 = "pgo" + +Alternatively, you can set ``PACKAGECONFIG`` using an append file +for the ``python3`` recipe. + +.. _migration-2.6-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes occurred: + +- Default to using the Thumb-2 instruction set for armv7a and above. If + you have any custom recipes that build software that needs to be + built with the ARM instruction set, change the recipe to set the + instruction set as follows: + :: + + ARM_INSTRUCTION_SET = "arm" + +- ``run-postinsts`` no longer uses ``/etc/*-postinsts`` for + ``dpkg/opkg`` in favor of built-in postinst support. RPM behavior + remains unchanged. + +- The ``NOISO`` and ``NOHDD`` variables are no longer used. You now + control building ``*.iso`` and ``*.hddimg`` image types directly by + using the :term:`IMAGE_FSTYPES` variable. + +- The ``scripts/contrib/mkefidisk.sh`` has been removed in favor of + Wic. + +- ``kernel-modules`` has been removed from + :term:`RRECOMMENDS` for ``qemumips`` and + ``qemumips64`` machines. Removal also impacts the ``x86-base.inc`` + file. + + .. note:: + + genericx86 + and + genericx86-64 + retain + kernel-modules + as part of the + RRECOMMENDS + variable setting. + +- The ``LGPLv2_WHITELIST_GPL-3.0`` variable has been removed. If you + are setting this variable in your configuration, set or append it to + the ``WHITELIST_GPL-3.0`` variable instead. + +- ``${ASNEEDED}`` is now included in the + :term:`TARGET_LDFLAGS` variable directly. The + remaining definitions from ``meta/conf/distro/include/as-needed.inc`` + have been moved to corresponding recipes. + +- Support for DSA host keys has been dropped from the OpenSSH recipes. + If you are still using DSA keys, you must switch over to a more + secure algorithm as recommended by OpenSSH upstream. + +- The ``dhcp`` recipe now uses the ``dhcpd6.conf`` configuration file + in ``dhcpd6.service`` for IPv6 DHCP rather than re-using + ``dhcpd.conf``, which is now reserved for IPv4. + + diff --git a/poky/documentation/ref-manual/migration-2.7.rst b/poky/documentation/ref-manual/migration-2.7.rst new file mode 100644 index 000000000..7e628fc3e --- /dev/null +++ b/poky/documentation/ref-manual/migration-2.7.rst @@ -0,0 +1,180 @@ +Moving to the Yocto Project 2.7 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 2.7 Release from the prior release. + +.. _migration-2.7-bitbake-changes: + +BitBake Changes +--------------- + +The following changes have been made to BitBake: + +- BitBake now checks anonymous Python functions and pure Python + functions (e.g. ``def funcname:``) in the metadata for tab + indentation. If found, BitBake produces a warning. + +- Bitbake now checks + :term:`BBFILE_COLLECTIONS` for duplicate + entries and triggers an error if any are found. + +.. _migration-2.7-eclipse-support-dropped: + +Eclipse Support Removed +----------------------- + +Support for the Eclipse IDE has been removed. Support continues for +those releases prior to 2.7 that did include support. The 2.7 release +does not include the Eclipse Yocto plugin. + +.. _migration-2.7-qemu-native-splits-system-and-user-mode-parts: + +``qemu-native`` Splits the System and User-Mode Parts +----------------------------------------------------- + +The system and user-mode parts of ``qemu-native`` are now split. +``qemu-native`` provides the user-mode components and +``qemu-system-native`` provides the system components. If you have +recipes that depend on QEMU's system emulation functionality at build +time, they should now depend upon ``qemu-system-native`` instead of +``qemu-native``. + +.. _migration-2.7-upstream-tracking.inc-removed: + +The ``upstream-tracking.inc`` File Has Been Removed +--------------------------------------------------- + +The previously deprecated ``upstream-tracking.inc`` file is now removed. +Any ``UPSTREAM_TRACKING*`` variables are now set in the corresponding +recipes instead. + +Remove any references you have to the ``upstream-tracking.inc`` file in +your configuration. + +.. _migration-2.7-distro-features-libc-removed: + +The ``DISTRO_FEATURES_LIBC`` Variable Has Been Removed +------------------------------------------------------ + +The ``DISTRO_FEATURES_LIBC`` variable is no longer used. The ability to +configure glibc using kconfig has been removed for quite some time +making the ``libc-*`` features set no longer effective. + +Remove any references you have to ``DISTRO_FEATURES_LIBC`` in your own +layers. + +.. _migration-2.7-license-values: + +License Value Corrections +------------------------- + +The following corrections have been made to the +:term:`LICENSE` values set by recipes: + +- *socat*: Corrected ``LICENSE`` to be "GPLv2" rather than "GPLv2+". +- *libgfortran*: Set license to "GPL-3.0-with-GCC-exception". +- *elfutils*: Removed "Elfutils-Exception" and set to "GPLv2" for shared libraries + +.. _migration-2.7-packaging-changes: + +Packaging Changes +----------------- + +This section provides information about packaging changes. + +- ``bind``: The ``nsupdate`` binary has been moved to the + ``bind-utils`` package. + +- Debug split: The default debug split has been changed to create + separate source packages (i.e. package_name\ ``-dbg`` and + package_name\ ``-src``). If you are currently using ``dbg-pkgs`` in + :term:`IMAGE_FEATURES` to bring in debug + symbols and you still need the sources, you must now also add + ``src-pkgs`` to ``IMAGE_FEATURES``. Source packages remain in the + target portion of the SDK by default, unless you have set your own + value for :term:`SDKIMAGE_FEATURES` that + does not include ``src-pkgs``. + +- Mount all using ``util-linux``: ``/etc/default/mountall`` has moved + into the -mount sub-package. + +- Splitting binaries using ``util-linux``: ``util-linux`` now splits + each binary into its own package for fine-grained control. The main + ``util-linux`` package pulls in the individual binary packages using + the :term:`RRECOMMENDS` and + :term:`RDEPENDS` variables. As a result, existing + images should not see any changes assuming + :term:`NO_RECOMMENDATIONS` is not set. + +- ``netbase/base-files``: ``/etc/hosts`` has moved from ``netbase`` to + ``base-files``. + +- ``tzdata``: The main package has been converted to an empty meta + package that pulls in all ``tzdata`` packages by default. + +- ``lrzsz``: This package has been removed from + ``packagegroup-self-hosted`` and + ``packagegroup-core-tools-testapps``. The X/Y/ZModem support is less + likely to be needed on modern systems. If you are relying on these + packagegroups to include the ``lrzsz`` package in your image, you now + need to explicitly add the package. + +.. _migration-2.7-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed: + +- *gcc*: Drop version 7.3 recipes. Version 8.3 now remains. +- *linux-yocto*: Drop versions 4.14 and 4.18 recipes. Versions 4.19 and 5.0 remain. +- *go*: Drop version 1.9 recipes. Versions 1.11 and 1.12 remain. +- *xvideo-tests*: Became obsolete. +- *libart-lgpl*: Became obsolete. +- *gtk-icon-utils-native*: These tools are now provided by gtk+3-native +- *gcc-cross-initial*: No longer needed. gcc-cross/gcc-crosssdk is now used instead. +- *gcc-crosssdk-initial*: No longer needed. gcc-cross/gcc-crosssdk is now used instead. +- *glibc-initial*: Removed because the benefits of having it for site_config are currently outweighed by the cost of building the recipe. + +.. _migration-2.7-removed-classes: + +Removed Classes +--------------- + +The following classes have been removed: + +- *distutils-tools*: This class was never used. +- *bugzilla.bbclass*: Became obsolete. +- *distrodata*: This functionally has been replaced by a more modern tinfoil-based implementation. + +.. _migration-2.7-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes occurred: + +- The ``distro`` subdirectory of the Poky repository has been removed + from the top-level ``scripts`` directory. + +- Perl now builds for the target using + `perl-cross `_ for better + maintainability and improved build performance. This change should + not present any problems unless you have heavily customized your Perl + recipe. + +- ``arm-tunes``: Removed the "-march" option if mcpu is already added. + +- ``update-alternatives``: Convert file renames to + :term:`PACKAGE_PREPROCESS_FUNCS` + +- ``base/pixbufcache``: Obsolete ``sstatecompletions`` code has been + removed. + +- :ref:`native ` class: + :term:`RDEPENDS` handling has been enabled. + +- ``inetutils``: This recipe has rsh disabled. + + diff --git a/poky/documentation/ref-manual/migration-3.0.rst b/poky/documentation/ref-manual/migration-3.0.rst new file mode 100644 index 000000000..e1305dfcc --- /dev/null +++ b/poky/documentation/ref-manual/migration-3.0.rst @@ -0,0 +1,321 @@ +Moving to the Yocto Project 3.0 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 3.0 Release from the prior release. + +.. _migration-3.0-init-system-selection: + +Init System Selection +--------------------- + +Changing the init system manager previously required setting a number of +different variables. You can now change the manager by setting the +``INIT_MANAGER`` variable and the corresponding include files (i.e. +``conf/distro/include/init-manager-*.conf``). Include files are provided +for four values: "none", "sysvinit", "systemd", and "mdev-busybox". The +default value, "none", for ``INIT_MANAGER`` should allow your current +settings to continue working. However, it is advisable to explicitly set +``INIT_MANAGER``. + +.. _migration-3.0-lsb-support-removed: + +LSB Support Removed +------------------- + +Linux Standard Base (LSB) as a standard is not current, and is not well +suited for embedded applications. Support can be continued in a separate +layer if needed. However, presently LSB support has been removed from +the core. + +As a result of this change, the ``poky-lsb`` derivative distribution +configuration that was also used for testing alternative configurations +has been replaced with a ``poky-altcfg`` distribution that has LSB parts +removed. + +.. _migration-3.0-removed-recipes: + +Removed Recipes +--------------- + +The following recipes have been removed. + +- ``core-image-lsb-dev``: Part of removed LSB support. + +- ``core-image-lsb``: Part of removed LSB support. + +- ``core-image-lsb-sdk``: Part of removed LSB support. + +- ``cve-check-tool``: Functionally replaced by the ``cve-update-db`` + recipe and ``cve-check`` class. + +- ``eglinfo``: No longer maintained. ``eglinfo`` from ``mesa-demos`` is + an adequate and maintained alternative. + +- ``gcc-8.3``: Version 8.3 removed. Replaced by 9.2. + +- ``gnome-themes-standard``: Only needed by gtk+ 2.x, which has been + removed. + +- ``gtk+``: GTK+ 2 is obsolete and has been replaced by gtk+3. + +- ``irda-utils``: Has become obsolete. IrDA support has been removed + from the Linux kernel in version 4.17 and later. + +- ``libnewt-python``: ``libnewt`` Python support merged into main + ``libnewt`` recipe. + +- ``libsdl``: Replaced by newer ``libsdl2``. + +- ``libx11-diet``: Became obsolete. + +- ``libxx86dga``: Removed obsolete client library. + +- ``libxx86misc``: Removed. Library is redundant. + +- ``linux-yocto``: Version 5.0 removed, which is now redundant (5.2 / + 4.19 present). + +- ``lsbinitscripts``: Part of removed LSB support. + +- ``lsb``: Part of removed LSB support. + +- ``lsbtest``: Part of removed LSB support. + +- ``openssl10``: Replaced by newer ``openssl`` version 1.1. + +- ``packagegroup-core-lsb``: Part of removed LSB support. + +- ``python-nose``: Removed the Python 2.x version of the recipe. + +- ``python-numpy``: Removed the Python 2.x version of the recipe. + +- ``python-scons``: Removed the Python 2.x version of the recipe. + +- ``source-highlight``: No longer needed. + +- ``stress``: Replaced by ``stress-ng``. + +- ``vulkan``: Split into ``vulkan-loader``, ``vulkan-headers``, and + ``vulkan-tools``. + +- ``weston-conf``: Functionality moved to ``weston-init``. + +.. _migration-3.0-packaging-changes: + +Packaging Changes +----------------- + +The following packaging changes have occurred. + +- The `Epiphany `__ browser + has been dropped from ``packagegroup-self-hosted`` as it has not been + needed inside ``build-appliance-image`` for quite some time and was + causing resource problems. + +- ``libcap-ng`` Python support has been moved to a separate + ``libcap-ng-python`` recipe to streamline the build process when the + Python bindings are not needed. + +- ``libdrm`` now packages the file ``amdgpu.ids`` into a separate + ``libdrm-amdgpu`` package. + +- ``python3``: The ``runpy`` module is now in the ``python3-core`` + package as it is required to support the common "python3 -m" command + usage. + +- ``distcc`` now provides separate ``distcc-client`` and + ``distcc-server`` packages as typically one or the other are needed, + rather than both. + +- ``python*-setuptools`` recipes now separately package the + ``pkg_resources`` module in a ``python-pkg-resources`` / + ``python3-pkg-resources`` package as the module is useful independent + of the rest of the setuptools package. The main ``python-setuptools`` + / ``python3-setuptools`` package depends on this new package so you + should only need to update dependencies unless you want to take + advantage of the increased granularity. + +.. _migration-3.0-cve-checking: + +CVE Checking +------------ + +``cve-check-tool`` has been functionally replaced by a new +``cve-update-db`` recipe and functionality built into the ``cve-check`` +class. The result uses NVD JSON data feeds rather than the deprecated +XML feeds that ``cve-check-tool`` was using, supports CVSSv3 scoring, +and makes other improvements. + +Additionally, the ``CVE_CHECK_CVE_WHITELIST`` variable has been replaced +by ``CVE_CHECK_WHITELIST``. + +.. _migration-3.0-bitbake-changes: + +Bitbake Changes +--------------- + +The following BitBake changes have occurred. + +- ``addtask`` statements now properly validate dependent tasks. + Previously, an invalid task was silently ignored. With this change, + the invalid task generates a warning. + +- Other invalid ``addtask`` and ``deltask`` usages now trigger these + warnings: "multiple target tasks arguments with addtask / deltask", + and "multiple before/after clauses". + +- The "multiconfig" prefix is now shortened to "mc". "multiconfig" will + continue to work, however it may be removed in a future release. + +- The ``bitbake -g`` command no longer generates a + ``recipe-depends.dot`` file as the contents (i.e. a reprocessed + version of ``task-depends.dot``) were confusing. + +- The ``bb.build.FuncFailed`` exception, previously raised by + ``bb.build.exec_func()`` when certain other exceptions have occurred, + has been removed. The real underlying exceptions will be raised + instead. If you have calls to ``bb.build.exec_func()`` in custom + classes or ``tinfoil-using`` scripts, any references to + ``bb.build.FuncFailed`` should be cleaned up. + +- Additionally, the ``bb.build.exec_func()`` no longer accepts the + "pythonexception" parameter. The function now always raises + exceptions. Remove this argument in any calls to + ``bb.build.exec_func()`` in custom classes or scripts. + +- The + :term:`bitbake:BB_SETSCENE_VERIFY_FUNCTION2` + is no longer used. In the unlikely event that you have any references + to it, they should be removed. + +- The ``RunQueueExecuteScenequeue`` and ``RunQueueExecuteTasks`` events + have been removed since setscene tasks are now executed as part of + the normal runqueue. Any event handling code in custom classes or + scripts that handles these two events need to be updated. + +- The arguments passed to functions used with + :term:`bitbake:BB_HASHCHECK_FUNCTION` + have changed. If you are using your own custom hash check function, + see + http://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?id=40a5e193c4ba45c928fccd899415ea56b5417725 + for details. + +- Task specifications in ``BB_TASKDEPDATA`` and class implementations + used in signature generator classes now use ":" everywhere + rather than the "." delimiter that was being used in some places. + This change makes it consistent with all areas in the code. Custom + signature generator classes and code that reads ``BB_TASKDEPDATA`` + need to be updated to use ':' as a separator rather than '.'. + +.. _migration-3.0-sanity-checks: + +Sanity Checks +------------- + +The following sanity check changes occurred. + +- :term:`SRC_URI` is now checked for usage of two + problematic items: + + - "${PN}" prefix/suffix use - Warnings always appear if ${PN} is + used. You must fix the issue regardless of whether multiconfig or + anything else that would cause prefixing/suffixing to happen. + + - Github archive tarballs - these are not guaranteed to be stable. + Consequently, it is likely that the tarballs will be refreshed and + thus the SRC_URI checksums will fail to apply. It is recommended + that you fetch either an official release tarball or a specific + revision from the actual Git repository instead. + + Either one of these items now trigger a warning by default. If you + wish to disable this check, remove ``src-uri-bad`` from + :term:`WARN_QA`. + +- The ``file-rdeps`` runtime dependency check no longer expands + :term:`RDEPENDS` recursively as there is no mechanism + to ensure they can be fully computed, and thus races sometimes result + in errors either showing up or not. Thus, you might now see errors + for missing runtime dependencies that were previously satisfied + recursively. Here is an example: package A contains a shell script + starting with ``#!/bin/bash`` but has no dependency on bash. However, + package A depends on package B, which does depend on bash. You need + to add the missing dependency or dependencies to resolve the warning. + +- Setting ``DEPENDS_${PN}`` anywhere (i.e. typically in a recipe) now + triggers an error. The error is triggered because + :term:`DEPENDS` is not a package-specific variable + unlike RDEPENDS. You should set ``DEPENDS`` instead. + +- systemd currently does not work well with the musl C library because + only upstream officially supports linking the library with glibc. + Thus, a warning is shown when building systemd in conjunction with + musl. + +.. _migration-3.0-miscellaneous-changes: + +Miscellaneous Changes +--------------------- + +The following miscellaneous changes have occurred. + +- The ``gnome`` class has been removed because it now does very little. + You should update recipes that previously inherited this class to do + the following: inherit gnomebase gtk-icon-cache gconf mime + +- The ``meta/recipes-kernel/linux/linux-dtb.inc`` file has been + removed. This file was previously deprecated in favor of setting + :term:`KERNEL_DEVICETREE` in any kernel + recipe and only produced a warning. Remove any ``include`` or + ``require`` statements pointing to this file. + +- :term:`TARGET_CFLAGS`, + :term:`TARGET_CPPFLAGS`, + :term:`TARGET_CXXFLAGS`, and + :term:`TARGET_LDFLAGS` are no longer exported + to the external environment. This change did not require any changes + to core recipes, which is a good indicator that no changes will be + required. However, if for some reason the software being built by one + of your recipes is expecting these variables to be set, then building + the recipe will fail. In such cases, you must either export the + variable or variables in the recipe or change the scripts so that + exporting is not necessary. + +- You must change the host distro identifier used in + :term:`NATIVELSBSTRING` to use all lowercase + characters even if it does not contain a version number. This change + is necessary only if you are not using ``uninative`` and + :term:`SANITY_TESTED_DISTROS`. + +- In the ``base-files`` recipe, writing the hostname into + ``/etc/hosts`` and ``/etc/hostname`` is now done within the main + :ref:`ref-tasks-install` function rather than in the + ``do_install_basefilesissue`` function. The reason for the change is + because ``do_install_basefilesissue`` is more easily overridden + without having to duplicate the hostname functionality. If you have + done the latter (e.g. in a ``base-files`` bbappend), then you should + remove it from your customized ``do_install_basefilesissue`` + function. + +- The ``wic --expand`` command now uses commas to separate "key:value" + pairs rather than hyphens. + + .. note:: + + The wic command-line help is not updated. + + You must update any scripts or commands where you use + ``wic --expand`` with multiple "key:value" pairs. + +- UEFI image variable settings have been moved from various places to a + central ``conf/image-uefi.conf``. This change should not influence + any existing configuration as the ``meta/conf/image-uefi.conf`` in + the core metadata sets defaults that can be overridden in the same + manner as before. + +- ``conf/distro/include/world-broken.inc`` has been removed. For cases + where certain recipes need to be disabled when using the musl C + library, these recipes now have ``COMPATIBLE_HOST_libc-musl`` set + with a comment that explains why. + + diff --git a/poky/documentation/ref-manual/migration-3.1.rst b/poky/documentation/ref-manual/migration-3.1.rst new file mode 100644 index 000000000..92c8c7761 --- /dev/null +++ b/poky/documentation/ref-manual/migration-3.1.rst @@ -0,0 +1,276 @@ +Moving to the Yocto Project 3.1 Release +======================================= + +This section provides migration information for moving to the Yocto +Project 3.1 Release from the prior release. + +.. _migration-3.1-minimum-system-requirements: + +Minimum system requirements +--------------------------- + +The following versions / requirements of build host components have been +updated: + +- gcc 5.0 + +- python 3.5 + +- tar 1.28 + +- ``rpcgen`` is now required on the host (part of the ``libc-dev-bin`` + package on Ubuntu, Debian and related distributions, and the + ``glibc`` package on RPM-based distributions). + +Additionally, the ``makeinfo`` and ``pod2man`` tools are *no longer* +required on the host. + +.. _migration-3.1-mpc8315e-rdb-removed: + +mpc8315e-rdb machine removed +---------------------------- + +The MPC8315E-RDB machine is old/obsolete and unobtainable, thus given +the maintenance burden the ``mpc8315e-rdb`` machine configuration that +supported it has been removed in this release. The removal does leave a +gap in official PowerPC reference hardware support; this may change in +future if a suitable machine with accompanying support resources is +found. + +.. _migration-3.1-python-2-removed: + +Python 2 removed +---------------- + +Due to the expiration of upstream support in January 2020, support for +Python 2 has now been removed; it is recommended that you use Python 3 +instead. If absolutely needed there is a meta-python2 community layer +containing Python 2, related classes and various Python 2-based modules, +however it should not be considered as supported. + +.. _migration-3.1-reproducible-builds: + +Reproducible builds now enabled by default +------------------------------------------ + +In order to avoid unnecessary differences in output files (aiding binary +reproducibility), the Poky distribution configuration +(``DISTRO = "poky"``) now inherits the ``reproducible_build`` class by +default. + +.. _migration-3.1-ptest-feature-impact: + +Impact of ptest feature is now more significant +----------------------------------------------- + +The Poky distribution configuration (``DISTRO = "poky"``) enables ptests +by default to enable runtime testing of various components. In this +release, a dependency needed to be added that has resulted in a +significant increase in the number of components that will be built just +when building a simple image such as core-image-minimal. If you do not +need runtime tests enabled for core components, then it is recommended +that you remove "ptest" from +:term:`DISTRO_FEATURES` to save a significant +amount of build time e.g. by adding the following in your configuration: +:: + + DISTRO_FEATURES_remove = "ptest" + +.. _migration-3.1-removed-recipes: + +Removed recipes +--------------- + +The following recipes have been removed: + +- ``chkconfig``: obsolete + +- ``console-tools``: obsolete + +- ``enchant``: replaced by ``enchant2`` + +- ``foomatic-filters``: obsolete + +- ``libidn``: no longer needed, moved to meta-oe + +- ``libmodulemd``: replaced by ``libmodulemd-v1`` + +- ``linux-yocto``: drop 4.19, 5.2 version recipes (5.4 now provided) + +- ``nspr``: no longer needed, moved to meta-oe + +- ``nss``: no longer needed, moved to meta-oe + +- ``python``: Python 2 removed (Python 3 preferred) + +- ``python-setuptools``: Python 2 version removed (python3-setuptools + preferred) + +- ``sysprof``: no longer needed, moved to meta-oe + +- ``texi2html``: obsolete + +- ``u-boot-fw-utils``: functionally replaced by ``libubootenv`` + +.. _migration-3.1-features-check: + +features_check class replaces distro_features_check +--------------------------------------------------- + +The ``distro_features_check`` class has had its functionality expanded, +now supporting ``ANY_OF_MACHINE_FEATURES``, +``REQUIRED_MACHINE_FEATURES``, ``CONFLICT_MACHINE_FEATURES``, +``ANY_OF_COMBINED_FEATURES``, ``REQUIRED_COMBINED_FEATURES``, +``CONFLICT_COMBINED_FEATURES``. As a result the class has now been +renamed to ``features_check``; the ``distro_features_check`` class still +exists but generates a warning and redirects to the new class. In +preparation for a future removal of the old class it is recommended that +you update recipes currently inheriting ``distro_features_check`` to +inherit ``features_check`` instead. + +.. _migration-3.1-removed-classes: + +Removed classes +--------------- + +The following classes have been removed: + +- ``distutils-base``: moved to meta-python2 + +- ``distutils``: moved to meta-python2 + +- ``libc-common``: merged into the glibc recipe as nothing else used + it. + +- ``python-dir``: moved to meta-python2 + +- ``pythonnative``: moved to meta-python2 + +- ``setuptools``: moved to meta-python2 + +- ``tinderclient``: dropped as it was obsolete. + +.. _migration-3.1-src-uri-checksums: + +SRC_URI checksum behaviour +-------------------------- + +Previously, recipes by tradition included both SHA256 and MD5 checksums +for remotely fetched files in :term:`SRC_URI`, even +though only one is actually mandated. However, the MD5 checksum does not +add much given its inherent weakness; thus when a checksum fails only +the SHA256 sum will now be printed. The md5sum will still be verified if +it is specified. + +.. _migration-3.1-npm: + +npm fetcher changes +------------------- + +The npm fetcher has been completely reworked in this release. The npm +fetcher now only fetches the package source itself and no longer the +dependencies; there is now also an npmsw fetcher which explicitly +fetches the shrinkwrap file and the dependencies. This removes the +slightly awkward ``NPM_LOCKDOWN`` and ``NPM_SHRINKWRAP`` variables which +pointed to local files; the lockdown file is no longer needed at all. +Additionally, the package name in ``npm://`` entries in +:term:`SRC_URI` is now specified using a ``package`` +parameter instead of the earlier ``name`` which overlapped with the +generic ``name`` parameter. All recipes using the npm fetcher will need +to be changed as a result. + +An example of the new scheme: :: + + SRC_URI = "npm://registry.npmjs.org;package=array-flatten;version=1.1.1 \\ + npmsw://${THISDIR}/npm-shrinkwrap.json" + +Another example where the sources are fetched from git rather than an npm repository: :: + + SRC_URI = "git://github.com/foo/bar.git;protocol=https \ + npmsw://${THISDIR}/npm-shrinkwrap.json" + +devtool and recipetool have also been updated to match with the npm +fetcher changes. Other than producing working and more complete recipes +for npm sources, there is also a minor change to the command line for +devtool: the ``--fetch-dev`` option has been renamed to ``--npm-dev`` as +it is npm-specific. + +.. _migration-3.1-packaging-changes: + +Packaging changes +----------------- + +- ``intltool`` has been removed from ``packagegroup-core-sdk`` as it is + rarely needed to build modern software - gettext can do most of the + things it used to be needed for. ``intltool`` has also been removed + from ``packagegroup-core-self-hosted`` as it is not needed to for + standard builds. + +- git: ``git-am``, ``git-difftool``, ``git-submodule``, and + ``git-request-pull`` are no longer perl-based, so are now installed + with the main ``git`` package instead of within ``git-perltools``. + +- The ``ldconfig`` binary built as part of glibc has now been moved to + its own ``ldconfig`` package (note no ``glibc-`` prefix). This + package is in the :term:`RRECOMMENDS` of the main + ``glibc`` package if ``ldconfig`` is present in + :term:`DISTRO_FEATURES`. + +- ``libevent`` now splits each shared library into its own package (as + Debian does). Since these are shared libraries and will be pulled in + through the normal shared library dependency handling, there should + be no impact to existing configurations other than less unnecessary + libraries being installed in some cases. + +- linux-firmware now has a new package for ``bcm4366c`` and includes + available NVRAM config files into the ``bcm43340``, ``bcm43362``, + ``bcm43430`` and ``bcm4356-pcie`` packages. + +- ``harfbuzz`` now splits the new ``libharfbuzz-subset.so`` library + into its own package to reduce the main package size in cases where + ``libharfbuzz-subset.so`` is not needed. + +.. _migration-3.1-package-qa-warnings: + +Additional warnings +------------------- + +Warnings will now be shown at ``do_package_qa`` time in the following +circumstances: + +- A recipe installs ``.desktop`` files containing ``MimeType`` keys but + does not inherit the new ``mime-xdg`` class + +- A recipe installs ``.xml`` files into ``${datadir}/mime/packages`` + but does not inherit the ``mime`` class + +.. _migration-3.1-x86-live-wic: + +``wic`` image type now used instead of ``live`` by default for x86 +------------------------------------------------------------------ + +``conf/machine/include/x86-base.inc`` (inherited by most x86 machine +configurations) now specifies ``wic`` instead of ``live`` by default in +:term:`IMAGE_FSTYPES`. The ``live`` image type will +likely be removed in a future release so it is recommended that you use +``wic`` instead. + +.. _migration-3.1-misc: + +Miscellaneous changes +--------------------- + +- The undocumented ``SRC_DISTRIBUTE_LICENSES`` variable has now been + removed in favour of a new ``AVAILABLE_LICENSES`` variable which is + dynamically set based upon license files found in + ``${COMMON_LICENSE_DIR}`` and ``${LICENSE_PATH}``. + +- The tune definition for big-endian microblaze machines is now + ``microblaze`` instead of ``microblazeeb``. + +- ``newlib`` no longer has built-in syscalls. ``libgloss`` should then + provide the syscalls, ``crt0.o`` and other functions that are no + longer part of ``newlib`` itself. If you are using + ``TCLIBC = "newlib"`` this now means that you must link applications + with both ``newlib`` and ``libgloss``, whereas before ``newlib`` + would run in many configurations by itself. diff --git a/poky/documentation/ref-manual/migration-general.rst b/poky/documentation/ref-manual/migration-general.rst new file mode 100644 index 000000000..182482ec4 --- /dev/null +++ b/poky/documentation/ref-manual/migration-general.rst @@ -0,0 +1,54 @@ +General Migration Considerations +================================ + +Some considerations are not tied to a specific Yocto Project release. +This section presents information you should consider when migrating to +any new Yocto Project release. + +- *Dealing with Customized Recipes*: + + Issues could arise if you take + older recipes that contain customizations and simply copy them + forward expecting them to work after you migrate to new Yocto Project + metadata. For example, suppose you have a recipe in your layer that + is a customized version of a core recipe copied from the earlier + release, rather than through the use of an append file. When you + migrate to a newer version of Yocto Project, the metadata (e.g. + perhaps an include file used by the recipe) could have changed in a + way that would break the build. Say, for example, a function is + removed from an include file and the customized recipe tries to call + that function. + + You could "forward-port" all your customizations in your recipe so + that everything works for the new release. However, this is not the + optimal solution as you would have to repeat this process with each + new release if changes occur that give rise to problems. + + The better solution (where practical) is to use append files + (``*.bbappend``) to capture any customizations you want to make to a + recipe. Doing so, isolates your changes from the main recipe making + them much more manageable. However, sometimes it is not practical to + use an append file. A good example of this is when introducing a + newer or older version of a recipe in another layer. + +- *Updating Append Files*: + + Since append files generally only contain + your customizations, they often do not need to be adjusted for new + releases. However, if the ``.bbappend`` file is specific to a + particular version of the recipe (i.e. its name does not use the % + wildcard) and the version of the recipe to which it is appending has + changed, then you will at a minimum need to rename the append file to + match the name of the recipe file. A mismatch between an append file + and its corresponding recipe file (``.bb``) will trigger an error + during parsing. + + Depending on the type of customization the append file applies, other + incompatibilities might occur when you upgrade. For example, if your + append file applies a patch and the recipe to which it is appending + is updated to a newer version, the patch might no longer apply. If + this is the case and assuming the patch is still needed, you must + modify the patch file so that it does apply. + + + diff --git a/poky/documentation/ref-manual/migration.rst b/poky/documentation/ref-manual/migration.rst new file mode 100644 index 000000000..6c6119dae --- /dev/null +++ b/poky/documentation/ref-manual/migration.rst @@ -0,0 +1,30 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************************************** +Migrating to a Newer Yocto Project Release +****************************************** + +This chapter provides information you can use to migrate work to a newer +Yocto Project release. You can find the same information in the release +notes for a given release. + +.. toctree:: + + migration-general + migration-1.3 + migration-1.4 + migration-1.5 + migration-1.6 + migration-1.7 + migration-1.8 + migration-2.0 + migration-2.1 + migration-2.2 + migration-2.3 + migration-2.4 + migration-2.5 + migration-2.6 + migration-2.7 + migration-3.0 + migration-3.1 + diff --git a/poky/documentation/ref-manual/ref-classes.rst b/poky/documentation/ref-manual/ref-classes.rst new file mode 100644 index 000000000..60ce8efd2 --- /dev/null +++ b/poky/documentation/ref-manual/ref-classes.rst @@ -0,0 +1,2963 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******* +Classes +******* + +Class files are used to abstract common functionality and share it +amongst multiple recipe (``.bb``) files. To use a class file, you simply +make sure the recipe inherits the class. In most cases, when a recipe +inherits a class it is enough to enable its features. There are cases, +however, where in the recipe you might need to set variables or override +some default behavior. + +Any :term:`Metadata` usually found in a recipe can also be +placed in a class file. Class files are identified by the extension +``.bbclass`` and are usually placed in a ``classes/`` directory beneath +the ``meta*/`` directory found in the :term:`Source Directory`. +Class files can also be pointed to by +:term:`BUILDDIR` (e.g. ``build/``) in the same way as +``.conf`` files in the ``conf`` directory. Class files are searched for +in :term:`BBPATH` using the same method by which ``.conf`` +files are searched. + +This chapter discusses only the most useful and important classes. Other +classes do exist within the ``meta/classes`` directory in the Source +Directory. You can reference the ``.bbclass`` files directly for more +information. + +.. _ref-classes-allarch: + +``allarch.bbclass`` +=================== + +The ``allarch`` class is inherited by recipes that do not produce +architecture-specific output. The class disables functionality that is +normally needed for recipes that produce executable binaries (such as +building the cross-compiler and a C library as pre-requisites, and +splitting out of debug symbols during packaging). + +.. note:: + + Unlike some distro recipes (e.g. Debian), OpenEmbedded recipes that + produce packages that depend on tunings through use of the + :term:`RDEPENDS` and + :term:`TUNE_PKGARCH` variables, should never be + configured for all architectures using ``allarch``. This is the case + even if the recipes do not produce architecture-specific output. + + Configuring such recipes for all architectures causes the + ```do_package_write_*`` tasks to + have different signatures for the machines with different tunings. + Additionally, unnecessary rebuilds occur every time an image for a + different ``MACHINE`` is built even when the recipe never changes. + +By default, all recipes inherit the :ref:`base ` and +:ref:`package ` classes, which enable +functionality needed for recipes that produce executable output. If your +recipe, for example, only produces packages that contain configuration +files, media files, or scripts (e.g. Python and Perl), then it should +inherit the ``allarch`` class. + +.. _ref-classes-archiver: + +``archiver.bbclass`` +==================== + +The ``archiver`` class supports releasing source code and other +materials with the binaries. + +For more details on the source archiver, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. You can also see +the :term:`ARCHIVER_MODE` variable for information +about the variable flags (varflags) that help control archive creation. + +.. _ref-classes-autotools: + +``autotools*.bbclass`` +====================== + +The ``autotools*`` classes support Autotooled packages. + +The ``autoconf``, ``automake``, and ``libtool`` packages bring +standardization. This class defines a set of tasks (e.g. ``configure``, +``compile`` and so forth) that work for all Autotooled packages. It +should usually be enough to define a few standard variables and then +simply ``inherit autotools``. These classes can also work with software +that emulates Autotools. For more information, see the +":ref:`new-recipe-autotooled-package`" section +in the Yocto Project Development Tasks Manual. + +By default, the ``autotools*`` classes use out-of-tree builds (i.e. +``autotools.bbclass`` building with ``B != S``). + +If the software being built by a recipe does not support using +out-of-tree builds, you should have the recipe inherit the +``autotools-brokensep`` class. The ``autotools-brokensep`` class behaves +the same as the ``autotools`` class but builds with :term:`B` +== :term:`S`. This method is useful when out-of-tree build +support is either not present or is broken. + +.. note:: + + It is recommended that out-of-tree support be fixed and used if at + all possible. + +It's useful to have some idea of how the tasks defined by the +``autotools*`` classes work and what they do behind the scenes. + +- :ref:`ref-tasks-configure` - Regenerates the + configure script (using ``autoreconf``) and then launches it with a + standard set of arguments used during cross-compilation. You can pass + additional parameters to ``configure`` through the ``EXTRA_OECONF`` + or :term:`PACKAGECONFIG_CONFARGS` + variables. + +- :ref:`ref-tasks-compile` - Runs ``make`` with + arguments that specify the compiler and linker. You can pass + additional arguments through the ``EXTRA_OEMAKE`` variable. + +- :ref:`ref-tasks-install` - Runs ``make install`` and + passes in ``${``\ :term:`D`\ ``}`` as ``DESTDIR``. + +.. _ref-classes-base: + +``base.bbclass`` +================ + +The ``base`` class is special in that every ``.bb`` file implicitly +inherits the class. This class contains definitions for standard basic +tasks such as fetching, unpacking, configuring (empty by default), +compiling (runs any ``Makefile`` present), installing (empty by default) +and packaging (empty by default). These classes are often overridden or +extended by other classes such as the +:ref:`autotools ` class or the +:ref:`package ` class. + +The class also contains some commonly used functions such as +``oe_runmake``, which runs ``make`` with the arguments specified in +:term:`EXTRA_OEMAKE` variable as well as the +arguments passed directly to ``oe_runmake``. + +.. _ref-classes-bash-completion: + +``bash-completion.bbclass`` +=========================== + +Sets up packaging and dependencies appropriate for recipes that build +software that includes bash-completion data. + +.. _ref-classes-bin-package: + +``bin_package.bbclass`` +======================= + +The ``bin_package`` class is a helper class for recipes that extract the +contents of a binary package (e.g. an RPM) and install those contents +rather than building the binary from source. The binary package is +extracted and new packages in the configured output package format are +created. Extraction and installation of proprietary binaries is a good +example use for this class. + +.. note:: + + For RPMs and other packages that do not contain a subdirectory, you + should specify an appropriate fetcher parameter to point to the + subdirectory. For example, if BitBake is using the Git fetcher ( + git:// + ), the "subpath" parameter limits the checkout to a specific subpath + of the tree. Here is an example where + ${BP} + is used so that the files are extracted into the subdirectory + expected by the default value of + S + : + :: + + SRC_URI = "git://example.com/downloads/somepackage.rpm;subpath=${BP}" + + + See the " + Fetchers + " section in the BitBake User Manual for more information on + supported BitBake Fetchers. + +.. _ref-classes-binconfig: + +``binconfig.bbclass`` +===================== + +The ``binconfig`` class helps to correct paths in shell scripts. + +Before ``pkg-config`` had become widespread, libraries shipped shell +scripts to give information about the libraries and include paths needed +to build software (usually named ``LIBNAME-config``). This class assists +any recipe using such scripts. + +During staging, the OpenEmbedded build system installs such scripts into +the ``sysroots/`` directory. Inheriting this class results in all paths +in these scripts being changed to point into the ``sysroots/`` directory +so that all builds that use the script use the correct directories for +the cross compiling layout. See the +:term:`BINCONFIG_GLOB` variable for more +information. + +.. _ref-classes-binconfig-disabled: + +``binconfig-disabled.bbclass`` +============================== + +An alternative version of the :ref:`binconfig ` +class, which disables binary configuration scripts by making them return +an error in favor of using ``pkg-config`` to query the information. The +scripts to be disabled should be specified using the +:term:`BINCONFIG` variable within the recipe inheriting +the class. + +.. _ref-classes-blacklist: + +``blacklist.bbclass`` +===================== + +The ``blacklist`` class prevents the OpenEmbedded build system from +building specific recipes (blacklists them). To use this class, inherit +the class globally and set :term:`PNBLACKLIST` for +each recipe you wish to blacklist. Specify the :term:`PN` +value as a variable flag (varflag) and provide a reason, which is +reported, if the package is requested to be built as the value. For +example, if you want to blacklist a recipe called "exoticware", you add +the following to your ``local.conf`` or distribution configuration: +:: + + INHERIT += "blacklist" + PNBLACKLIST[exoticware] = "Not supported by our organization." + +.. _ref-classes-buildhistory: + +``buildhistory.bbclass`` +======================== + +The ``buildhistory`` class records a history of build output metadata, +which can be used to detect possible regressions as well as used for +analysis of the build output. For more information on using Build +History, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining build output quality`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-buildstats: + +``buildstats.bbclass`` +====================== + +The ``buildstats`` class records performance statistics about each task +executed during the build (e.g. elapsed time, CPU usage, and I/O usage). + +When you use this class, the output goes into the +:term:`BUILDSTATS_BASE` directory, which defaults +to ``${TMPDIR}/buildstats/``. You can analyze the elapsed time using +``scripts/pybootchartgui/pybootchartgui.py``, which produces a cascading +chart of the entire build process and can be useful for highlighting +bottlenecks. + +Collecting build statistics is enabled by default through the +:term:`USER_CLASSES` variable from your +``local.conf`` file. Consequently, you do not have to do anything to +enable the class. However, if you want to disable the class, simply +remove "buildstats" from the ``USER_CLASSES`` list. + +.. _ref-classes-buildstats-summary: + +``buildstats-summary.bbclass`` +============================== + +When inherited globally, prints statistics at the end of the build on +sstate re-use. In order to function, this class requires the +:ref:`buildstats ` class be enabled. + +.. _ref-classes-ccache: + +``ccache.bbclass`` +================== + +The ``ccache`` class enables the C/C++ Compiler Cache for the build. +This class is used to give a minor performance boost during the build. +However, using the class can lead to unexpected side-effects. Thus, it +is recommended that you do not use this class. See +http://ccache.samba.org/ for information on the C/C++ Compiler +Cache. + +.. _ref-classes-chrpath: + +``chrpath.bbclass`` +=================== + +The ``chrpath`` class is a wrapper around the "chrpath" utility, which +is used during the build process for ``nativesdk``, ``cross``, and +``cross-canadian`` recipes to change ``RPATH`` records within binaries +in order to make them relocatable. + +.. _ref-classes-clutter: + +``clutter.bbclass`` +=================== + +The ``clutter`` class consolidates the major and minor version naming +and other common items used by Clutter and related recipes. + +.. note:: + + Unlike some other classes related to specific libraries, recipes + building other software that uses Clutter do not need to inherit this + class unless they use the same recipe versioning scheme that the + Clutter and related recipes do. + +.. _ref-classes-cmake: + +``cmake.bbclass`` +================= + +The ``cmake`` class allows for recipes that need to build software using +the `CMake `__ build system. You can use +the :term:`EXTRA_OECMAKE` variable to specify +additional configuration options to be passed using the ``cmake`` +command line. + +On the occasion that you would be installing custom CMake toolchain +files supplied by the application being built, you should install them +to the preferred CMake Module directory: ``${D}${datadir}/cmake/`` +Modules during +:ref:`ref-tasks-install`. + +.. _ref-classes-cml1: + +``cml1.bbclass`` +================ + +The ``cml1`` class provides basic support for the Linux kernel style +build configuration system. + +.. _ref-classes-compress_doc: + +``compress_doc.bbclass`` +======================== + +Enables compression for man pages and info pages. This class is intended +to be inherited globally. The default compression mechanism is gz (gzip) +but you can select an alternative mechanism by setting the +:term:`DOC_COMPRESS` variable. + +.. _ref-classes-copyleft_compliance: + +``copyleft_compliance.bbclass`` +=============================== + +The ``copyleft_compliance`` class preserves source code for the purposes +of license compliance. This class is an alternative to the ``archiver`` +class and is still used by some users even though it has been deprecated +in favor of the :ref:`archiver ` class. + +.. _ref-classes-copyleft_filter: + +``copyleft_filter.bbclass`` +=========================== + +A class used by the :ref:`archiver ` and +:ref:`copyleft_compliance ` classes +for filtering licenses. The ``copyleft_filter`` class is an internal +class and is not intended to be used directly. + +.. _ref-classes-core-image: + +``core-image.bbclass`` +====================== + +The ``core-image`` class provides common definitions for the +``core-image-*`` image recipes, such as support for additional +:term:`IMAGE_FEATURES`. + +.. _ref-classes-cpan: + +``cpan*.bbclass`` +================= + +The ``cpan*`` classes support Perl modules. + +Recipes for Perl modules are simple. These recipes usually only need to +point to the source's archive and then inherit the proper class file. +Building is split into two methods depending on which method the module +authors used. + +- Modules that use old ``Makefile.PL``-based build system require + ``cpan.bbclass`` in their recipes. + +- Modules that use ``Build.PL``-based build system require using + ``cpan_build.bbclass`` in their recipes. + +Both build methods inherit the ``cpan-base`` class for basic Perl +support. + +.. _ref-classes-cross: + +``cross.bbclass`` +================= + +The ``cross`` class provides support for the recipes that build the +cross-compilation tools. + +.. _ref-classes-cross-canadian: + +``cross-canadian.bbclass`` +========================== + +The ``cross-canadian`` class provides support for the recipes that build +the Canadian Cross-compilation tools for SDKs. See the +":ref:`overview-manual/overview-manual-concepts:cross-development toolchain generation`" +section in the Yocto Project Overview and Concepts Manual for more +discussion on these cross-compilation tools. + +.. _ref-classes-crosssdk: + +``crosssdk.bbclass`` +==================== + +The ``crosssdk`` class provides support for the recipes that build the +cross-compilation tools used for building SDKs. See the +":ref:`overview-manual/overview-manual-concepts:cross-development toolchain generation`" +section in the Yocto Project Overview and Concepts Manual for more +discussion on these cross-compilation tools. + +.. _ref-classes-debian: + +``debian.bbclass`` +================== + +The ``debian`` class renames output packages so that they follow the +Debian naming policy (i.e. ``glibc`` becomes ``libc6`` and +``glibc-devel`` becomes ``libc6-dev``.) Renaming includes the library +name and version as part of the package name. + +If a recipe creates packages for multiple libraries (shared object files +of ``.so`` type), use the :term:`LEAD_SONAME` +variable in the recipe to specify the library on which to apply the +naming scheme. + +.. _ref-classes-deploy: + +``deploy.bbclass`` +================== + +The ``deploy`` class handles deploying files to the +:term:`DEPLOY_DIR_IMAGE` directory. The main +function of this class is to allow the deploy step to be accelerated by +shared state. Recipes that inherit this class should define their own +:ref:`ref-tasks-deploy` function to copy the files to be +deployed to :term:`DEPLOYDIR`, and use ``addtask`` to +add the task at the appropriate place, which is usually after +:ref:`ref-tasks-compile` or +:ref:`ref-tasks-install`. The class then takes care of +staging the files from ``DEPLOYDIR`` to ``DEPLOY_DIR_IMAGE``. + +.. _ref-classes-devshell: + +``devshell.bbclass`` +==================== + +The ``devshell`` class adds the ``do_devshell`` task. Distribution +policy dictates whether to include this class. See the ":ref:`platdev-appdev-devshell`" +section in the Yocto Project Development Tasks Manual for more +information about using ``devshell``. + +.. _ref-classes-devupstream: + +``devupstream.bbclass`` +======================= + +The ``devupstream`` class uses +:term:`BBCLASSEXTEND` to add a variant of the +recipe that fetches from an alternative URI (e.g. Git) instead of a +tarball. Following is an example: +:: + + BBCLASSEXTEND = "devupstream:target" + SRC_URI_class-devupstream = "git://git.example.com/example" + SRCREV_class-devupstream = "abcd1234" + +Adding the above statements to your recipe creates a variant that has +:term:`DEFAULT_PREFERENCE` set to "-1". +Consequently, you need to select the variant of the recipe to use it. +Any development-specific adjustments can be done by using the +``class-devupstream`` override. Here is an example: +:: + + DEPENDS_append_class-devupstream = " gperf-native" + do_configure_prepend_class-devupstream() { + touch ${S}/README + } + +The class +currently only supports creating a development variant of the target +recipe, not ``native`` or ``nativesdk`` variants. + +The ``BBCLASSEXTEND`` syntax (i.e. ``devupstream:target``) provides +support for ``native`` and ``nativesdk`` variants. Consequently, this +functionality can be added in a future release. + +Support for other version control systems such as Subversion is limited +due to BitBake's automatic fetch dependencies (e.g. +``subversion-native``). + +.. _ref-classes-distro_features_check: + +``distro_features_check.bbclass`` +================================= + +The ``distro_features_check`` class allows individual recipes to check +for required and conflicting +:term:`DISTRO_FEATURES`. + +This class provides support for the +:term:`REQUIRED_DISTRO_FEATURES` and +:term:`CONFLICT_DISTRO_FEATURES` +variables. If any conditions specified in the recipe using the above +variables are not met, the recipe will be skipped. + +.. _ref-classes-distutils: + +``distutils*.bbclass`` +====================== + +The ``distutils*`` classes support recipes for Python version 2.x +extensions, which are simple. These recipes usually only need to point +to the source's archive and then inherit the proper class. Building is +split into two methods depending on which method the module authors +used. + +- Extensions that use an Autotools-based build system require Autotools + and the classes based on ``distutils`` in their recipes. + +- Extensions that use build systems based on ``distutils`` require the + ``distutils`` class in their recipes. + +- Extensions that use build systems based on ``setuptools`` require the + :ref:`setuptools ` class in their recipes. + +The ``distutils-common-base`` class is required by some of the +``distutils*`` classes to provide common Python2 support. + +.. _ref-classes-distutils3: + +``distutils3*.bbclass`` +======================= + +The ``distutils3*`` classes support recipes for Python version 3.x +extensions, which are simple. These recipes usually only need to point +to the source's archive and then inherit the proper class. Building is +split into three methods depending on which method the module authors +used. + +- Extensions that use an Autotools-based build system require Autotools + and ``distutils``-based classes in their recipes. + +- Extensions that use ``distutils``-based build systems require the + ``distutils`` class in their recipes. + +- Extensions that use build systems based on ``setuptools3`` require + the :ref:`setuptools3 ` class in their + recipes. + +The ``distutils3*`` classes either inherit their corresponding +``distutils*`` class or replicate them using a Python3 version instead +(e.g. ``distutils3-base`` inherits ``distutils-common-base``, which is +the same as ``distutils-base`` but inherits ``python3native`` instead of +``pythonnative``). + +.. _ref-classes-externalsrc: + +``externalsrc.bbclass`` +======================= + +The ``externalsrc`` class supports building software from source code +that is external to the OpenEmbedded build system. Building software +from an external source tree means that the build system's normal fetch, +unpack, and patch process is not used. + +By default, the OpenEmbedded build system uses the :term:`S` +and :term:`B` variables to locate unpacked recipe source code +and to build it, respectively. When your recipe inherits the +``externalsrc`` class, you use the +:term:`EXTERNALSRC` and +:term:`EXTERNALSRC_BUILD` variables to +ultimately define ``S`` and ``B``. + +By default, this class expects the source code to support recipe builds +that use the :term:`B` variable to point to the directory in +which the OpenEmbedded build system places the generated objects built +from the recipes. By default, the ``B`` directory is set to the +following, which is separate from the source directory (``S``): +:: + + ${WORKDIR}/${BPN}/{PV}/ + +See these variables for more information: +:term:`WORKDIR`, :term:`BPN`, and +:term:`PV`, + +For more information on the ``externalsrc`` class, see the comments in +``meta/classes/externalsrc.bbclass`` in the :term:`Source Directory`. +For information on how to use the +``externalsrc`` class, see the +":ref:`dev-manual/dev-manual-common-tasks:building software from an external source`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-extrausers: + +``extrausers.bbclass`` +====================== + +The ``extrausers`` class allows additional user and group configuration +to be applied at the image level. Inheriting this class either globally +or from an image recipe allows additional user and group operations to +be performed using the +:term:`EXTRA_USERS_PARAMS` variable. + +.. note:: + + The user and group operations added using the + extrausers + class are not tied to a specific recipe outside of the recipe for the + image. Thus, the operations can be performed across the image as a + whole. Use the + useradd + class to add user and group configuration to a specific recipe. + +Here is an example that uses this class in an image recipe: +:: + + inherit extrausers + EXTRA_USERS_PARAMS = "\ + useradd -p '' tester; \ + groupadd developers; \ + userdel nobody; \ + groupdel -g video; \ + groupmod -g 1020 developers; \ + usermod -s /bin/sh tester; \ + " + +Here is an example that adds two users named "tester-jim" and "tester-sue" and assigns +passwords: +:: + + inherit extrausers + EXTRA_USERS_PARAMS = "\ + useradd -P tester01 tester-jim; \ + useradd -P tester01 tester-sue; \ + " + +Finally, here is an example that sets the root password to "1876*18": +:: + + inherit extrausers + EXTRA_USERS_PARAMS = "\ + usermod -P 1876*18 root; \ + " + +.. _ref-classes-fontcache: + +``fontcache.bbclass`` +===================== + +The ``fontcache`` class generates the proper post-install and +post-remove (postinst and postrm) scriptlets for font packages. These +scriptlets call ``fc-cache`` (part of ``Fontconfig``) to add the fonts +to the font information cache. Since the cache files are +architecture-specific, ``fc-cache`` runs using QEMU if the postinst +scriptlets need to be run on the build host during image creation. + +If the fonts being installed are in packages other than the main +package, set :term:`FONT_PACKAGES` to specify the +packages containing the fonts. + +.. _ref-classes-fs-uuid: + +``fs-uuid.bbclass`` +=================== + +The ``fs-uuid`` class extracts UUID from +``${``\ :term:`ROOTFS`\ ``}``, which must have been built +by the time that this function gets called. The ``fs-uuid`` class only +works on ``ext`` file systems and depends on ``tune2fs``. + +.. _ref-classes-gconf: + +``gconf.bbclass`` +================= + +The ``gconf`` class provides common functionality for recipes that need +to install GConf schemas. The schemas will be put into a separate +package (``${``\ :term:`PN`\ ``}-gconf``) that is created +automatically when this class is inherited. This package uses the +appropriate post-install and post-remove (postinst/postrm) scriptlets to +register and unregister the schemas in the target image. + +.. _ref-classes-gettext: + +``gettext.bbclass`` +=================== + +The ``gettext`` class provides support for building software that uses +the GNU ``gettext`` internationalization and localization system. All +recipes building software that use ``gettext`` should inherit this +class. + +.. _ref-classes-gnomebase: + +``gnomebase.bbclass`` +===================== + +The ``gnomebase`` class is the base class for recipes that build +software from the GNOME stack. This class sets +:term:`SRC_URI` to download the source from the GNOME +mirrors as well as extending :term:`FILES` with the typical +GNOME installation paths. + +.. _ref-classes-gobject-introspection: + +``gobject-introspection.bbclass`` +================================= + +Provides support for recipes building software that supports GObject +introspection. This functionality is only enabled if the +"gobject-introspection-data" feature is in +:term:`DISTRO_FEATURES` as well as +"qemu-usermode" being in +:term:`MACHINE_FEATURES`. + +.. note:: + + This functionality is backfilled by default and, if not applicable, + should be disabled through + DISTRO_FEATURES_BACKFILL_CONSIDERED + or + MACHINE_FEATURES_BACKFILL_CONSIDERED + , respectively. + +.. _ref-classes-grub-efi: + +``grub-efi.bbclass`` +==================== + +The ``grub-efi`` class provides ``grub-efi``-specific functions for +building bootable images. + +This class supports several variables: + +- :term:`INITRD`: Indicates list of filesystem images to + concatenate and use as an initial RAM disk (initrd) (optional). + +- :term:`ROOTFS`: Indicates a filesystem image to include + as the root filesystem (optional). + +- :term:`GRUB_GFXSERIAL`: Set this to "1" to have + graphics and serial in the boot menu. + +- :term:`LABELS`: A list of targets for the automatic + configuration. + +- :term:`APPEND`: An override list of append strings for + each ``LABEL``. + +- :term:`GRUB_OPTS`: Additional options to add to the + configuration (optional). Options are delimited using semi-colon + characters (``;``). + +- :term:`GRUB_TIMEOUT`: Timeout before executing + the default ``LABEL`` (optional). + +.. _ref-classes-gsettings: + +``gsettings.bbclass`` +===================== + +The ``gsettings`` class provides common functionality for recipes that +need to install GSettings (glib) schemas. The schemas are assumed to be +part of the main package. Appropriate post-install and post-remove +(postinst/postrm) scriptlets are added to register and unregister the +schemas in the target image. + +.. _ref-classes-gtk-doc: + +``gtk-doc.bbclass`` +=================== + +The ``gtk-doc`` class is a helper class to pull in the appropriate +``gtk-doc`` dependencies and disable ``gtk-doc``. + +.. _ref-classes-gtk-icon-cache: + +``gtk-icon-cache.bbclass`` +========================== + +The ``gtk-icon-cache`` class generates the proper post-install and +post-remove (postinst/postrm) scriptlets for packages that use GTK+ and +install icons. These scriptlets call ``gtk-update-icon-cache`` to add +the fonts to GTK+'s icon cache. Since the cache files are +architecture-specific, ``gtk-update-icon-cache`` is run using QEMU if +the postinst scriptlets need to be run on the build host during image +creation. + +.. _ref-classes-gtk-immodules-cache: + +``gtk-immodules-cache.bbclass`` +=============================== + +The ``gtk-immodules-cache`` class generates the proper post-install and +post-remove (postinst/postrm) scriptlets for packages that install GTK+ +input method modules for virtual keyboards. These scriptlets call +``gtk-update-icon-cache`` to add the input method modules to the cache. +Since the cache files are architecture-specific, +``gtk-update-icon-cache`` is run using QEMU if the postinst scriptlets +need to be run on the build host during image creation. + +If the input method modules being installed are in packages other than +the main package, set +:term:`GTKIMMODULES_PACKAGES` to specify +the packages containing the modules. + +.. _ref-classes-gzipnative: + +``gzipnative.bbclass`` +====================== + +The ``gzipnative`` class enables the use of different native versions of +``gzip`` and ``pigz`` rather than the versions of these tools from the +build host. + +.. _ref-classes-icecc: + +``icecc.bbclass`` +================= + +The ``icecc`` class supports +`Icecream `__, which facilitates +taking compile jobs and distributing them among remote machines. + +The class stages directories with symlinks from ``gcc`` and ``g++`` to +``icecc``, for both native and cross compilers. Depending on each +configure or compile, the OpenEmbedded build system adds the directories +at the head of the ``PATH`` list and then sets the ``ICECC_CXX`` and +``ICEC_CC`` variables, which are the paths to the ``g++`` and ``gcc`` +compilers, respectively. + +For the cross compiler, the class creates a ``tar.gz`` file that +contains the Yocto Project toolchain and sets ``ICECC_VERSION``, which +is the version of the cross-compiler used in the cross-development +toolchain, accordingly. + +The class handles all three different compile stages (i.e native +,cross-kernel and target) and creates the necessary environment +``tar.gz`` file to be used by the remote machines. The class also +supports SDK generation. + +If :term:`ICECC_PATH` is not set in your +``local.conf`` file, then the class tries to locate the ``icecc`` binary +using ``which``. If :term:`ICECC_ENV_EXEC` is set +in your ``local.conf`` file, the variable should point to the +``icecc-create-env`` script provided by the user. If you do not point to +a user-provided script, the build system uses the default script +provided by the recipe ``icecc-create-env-native.bb``. + +.. note:: + + This script is a modified version and not the one that comes with + icecc. + +If you do not want the Icecream distributed compile support to apply to +specific recipes or classes, you can effectively "blacklist" them by +listing the recipes and classes using the +:term:`ICECC_USER_PACKAGE_BL` and +:term:`ICECC_USER_CLASS_BL`, variables, +respectively, in your ``local.conf`` file. Doing so causes the +OpenEmbedded build system to handle these compilations locally. + +Additionally, you can list recipes using the +:term:`ICECC_USER_PACKAGE_WL` variable in +your ``local.conf`` file to force ``icecc`` to be enabled for recipes +using an empty :term:`PARALLEL_MAKE` variable. + +Inheriting the ``icecc`` class changes all sstate signatures. +Consequently, if a development team has a dedicated build system that +populates :term:`SSTATE_MIRRORS` and they want to +reuse sstate from ``SSTATE_MIRRORS``, then all developers and the build +system need to either inherit the ``icecc`` class or nobody should. + +At the distribution level, you can inherit the ``icecc`` class to be +sure that all builders start with the same sstate signatures. After +inheriting the class, you can then disable the feature by setting the +:term:`ICECC_DISABLED` variable to "1" as follows: +:: + + INHERIT_DISTRO_append = " icecc" + ICECC_DISABLED ??= "1" + +This practice +makes sure everyone is using the same signatures but also requires +individuals that do want to use Icecream to enable the feature +individually as follows in your ``local.conf`` file: +:: + + ICECC_DISABLED = "" + +.. _ref-classes-image: + +``image.bbclass`` +================= + +The ``image`` class helps support creating images in different formats. +First, the root filesystem is created from packages using one of the +``rootfs*.bbclass`` files (depending on the package format used) and +then one or more image files are created. + +- The ``IMAGE_FSTYPES`` variable controls the types of images to + generate. + +- The ``IMAGE_INSTALL`` variable controls the list of packages to + install into the image. + +For information on customizing images, see the +":ref:`usingpoky-extend-customimage`" section +in the Yocto Project Development Tasks Manual. For information on how +images are created, see the +":ref:`images-dev-environment`" section in the +Yocto Project Overview and Concpets Manual. + +.. _ref-classes-image-buildinfo: + +``image-buildinfo.bbclass`` +=========================== + +The ``image-buildinfo`` class writes information to the target +filesystem on ``/etc/build``. + +.. _ref-classes-image_types: + +``image_types.bbclass`` +======================= + +The ``image_types`` class defines all of the standard image output types +that you can enable through the +:term:`IMAGE_FSTYPES` variable. You can use this +class as a reference on how to add support for custom image output +types. + +By default, the :ref:`image ` class automatically +enables the ``image_types`` class. The ``image`` class uses the +``IMGCLASSES`` variable as follows: +:: + + IMGCLASSES = "rootfs_${IMAGE_PKGTYPE} image_types ${IMAGE_CLASSES}" + IMGCLASSES += "${@['populate_sdk_base', 'populate_sdk_ext']['linux' in d.getVar("SDK_OS")]}" + IMGCLASSES += "${@bb.utils.contains_any('IMAGE_FSTYPES', 'live iso hddimg', 'image-live', '', d)}" + IMGCLASSES += "${@bb.utils.contains('IMAGE_FSTYPES', 'container', 'image-container', '', d)}" + IMGCLASSES += "image_types_wic" + IMGCLASSES += "rootfs-postcommands" + IMGCLASSES += "image-postinst-intercepts" + inherit ${IMGCLASSES} + +The ``image_types`` class also handles conversion and compression of images. + +.. note:: + + To build a VMware VMDK image, you need to add "wic.vmdk" to + IMAGE_FSTYPES + . This would also be similar for Virtual Box Virtual Disk Image + ("vdi") and QEMU Copy On Write Version 2 ("qcow2") images. + +.. _ref-classes-image-live: + +``image-live.bbclass`` +====================== + +This class controls building "live" (i.e. HDDIMG and ISO) images. Live +images contain syslinux for legacy booting, as well as the bootloader +specified by :term:`EFI_PROVIDER` if +:term:`MACHINE_FEATURES` contains "efi". + +Normally, you do not use this class directly. Instead, you add "live" to +:term:`IMAGE_FSTYPES`. + +.. _ref-classes-image-mklibs: + +``image-mklibs.bbclass`` +======================== + +The ``image-mklibs`` class enables the use of the ``mklibs`` utility +during the :ref:`ref-tasks-rootfs` task, which optimizes +the size of libraries contained in the image. + +By default, the class is enabled in the ``local.conf.template`` using +the :term:`USER_CLASSES` variable as follows: +:: + + USER_CLASSES ?= "buildstats image-mklibs image-prelink" + +.. _ref-classes-image-prelink: + +``image-prelink.bbclass`` +========================= + +The ``image-prelink`` class enables the use of the ``prelink`` utility +during the :ref:`ref-tasks-rootfs` task, which optimizes +the dynamic linking of shared libraries to reduce executable startup +time. + +By default, the class is enabled in the ``local.conf.template`` using +the :term:`USER_CLASSES` variable as follows: +:: + + USER_CLASSES ?= "buildstats image-mklibs image-prelink" + +.. _ref-classes-insane: + +``insane.bbclass`` +================== + +The ``insane`` class adds a step to the package generation process so +that output quality assurance checks are generated by the OpenEmbedded +build system. A range of checks are performed that check the build's +output for common problems that show up during runtime. Distribution +policy usually dictates whether to include this class. + +You can configure the sanity checks so that specific test failures +either raise a warning or an error message. Typically, failures for new +tests generate a warning. Subsequent failures for the same test would +then generate an error message once the metadata is in a known and good +condition. See the "`QA Error and Warning Messages <#ref-qa-checks>`__" +Chapter for a list of all the warning and error messages you might +encounter using a default configuration. + +Use the :term:`WARN_QA` and +:term:`ERROR_QA` variables to control the behavior of +these checks at the global level (i.e. in your custom distro +configuration). However, to skip one or more checks in recipes, you +should use :term:`INSANE_SKIP`. For example, to skip +the check for symbolic link ``.so`` files in the main package of a +recipe, add the following to the recipe. You need to realize that the +package name override, in this example ``${PN}``, must be used: +:: + + INSANE_SKIP_${PN} += "dev-so" + +Please keep in mind that the QA checks +exist in order to detect real or potential problems in the packaged +output. So exercise caution when disabling these checks. + +The following list shows the tests you can list with the ``WARN_QA`` and +``ERROR_QA`` variables: + +- ``already-stripped:`` Checks that produced binaries have not + already been stripped prior to the build system extracting debug + symbols. It is common for upstream software projects to default to + stripping debug symbols for output binaries. In order for debugging + to work on the target using ``-dbg`` packages, this stripping must be + disabled. + +- ``arch:`` Checks the Executable and Linkable Format (ELF) type, bit + size, and endianness of any binaries to ensure they match the target + architecture. This test fails if any binaries do not match the type + since there would be an incompatibility. The test could indicate that + the wrong compiler or compiler options have been used. Sometimes + software, like bootloaders, might need to bypass this check. + +- ``buildpaths:`` Checks for paths to locations on the build host + inside the output files. Currently, this test triggers too many false + positives and thus is not normally enabled. + +- ``build-deps:`` Determines if a build-time dependency that is + specified through :term:`DEPENDS`, explicit + :term:`RDEPENDS`, or task-level dependencies exists + to match any runtime dependency. This determination is particularly + useful to discover where runtime dependencies are detected and added + during packaging. If no explicit dependency has been specified within + the metadata, at the packaging stage it is too late to ensure that + the dependency is built, and thus you can end up with an error when + the package is installed into the image during the + :ref:`ref-tasks-rootfs` task because the auto-detected + dependency was not satisfied. An example of this would be where the + :ref:`update-rc.d ` class automatically + adds a dependency on the ``initscripts-functions`` package to + packages that install an initscript that refers to + ``/etc/init.d/functions``. The recipe should really have an explicit + ``RDEPENDS`` for the package in question on ``initscripts-functions`` + so that the OpenEmbedded build system is able to ensure that the + ``initscripts`` recipe is actually built and thus the + ``initscripts-functions`` package is made available. + +- ``compile-host-path:`` Checks the + :ref:`ref-tasks-compile` log for indications that + paths to locations on the build host were used. Using such paths + might result in host contamination of the build output. + +- ``debug-deps:`` Checks that all packages except ``-dbg`` packages + do not depend on ``-dbg`` packages, which would cause a packaging + bug. + +- ``debug-files:`` Checks for ``.debug`` directories in anything but + the ``-dbg`` package. The debug files should all be in the ``-dbg`` + package. Thus, anything packaged elsewhere is incorrect packaging. + +- ``dep-cmp:`` Checks for invalid version comparison statements in + runtime dependency relationships between packages (i.e. in + :term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, + :term:`RPROVIDES`, + :term:`RREPLACES`, and + :term:`RCONFLICTS` variable values). Any invalid + comparisons might trigger failures or undesirable behavior when + passed to the package manager. + +- ``desktop:`` Runs the ``desktop-file-validate`` program against any + ``.desktop`` files to validate their contents against the + specification for ``.desktop`` files. + +- ``dev-deps:`` Checks that all packages except ``-dev`` or + ``-staticdev`` packages do not depend on ``-dev`` packages, which + would be a packaging bug. + +- ``dev-so:`` Checks that the ``.so`` symbolic links are in the + ``-dev`` package and not in any of the other packages. In general, + these symlinks are only useful for development purposes. Thus, the + ``-dev`` package is the correct location for them. Some very rare + cases do exist for dynamically loaded modules where these symlinks + are needed instead in the main package. + +- ``file-rdeps:`` Checks that file-level dependencies identified by + the OpenEmbedded build system at packaging time are satisfied. For + example, a shell script might start with the line ``#!/bin/bash``. + This line would translate to a file dependency on ``/bin/bash``. Of + the three package managers that the OpenEmbedded build system + supports, only RPM directly handles file-level dependencies, + resolving them automatically to packages providing the files. + However, the lack of that functionality in the other two package + managers does not mean the dependencies do not still need resolving. + This QA check attempts to ensure that explicitly declared + :term:`RDEPENDS` exist to handle any file-level + dependency detected in packaged files. + +- ``files-invalid:`` Checks for :term:`FILES` variable + values that contain "//", which is invalid. + +- ``host-user-contaminated:`` Checks that no package produced by the + recipe contains any files outside of ``/home`` with a user or group + ID that matches the user running BitBake. A match usually indicates + that the files are being installed with an incorrect UID/GID, since + target IDs are independent from host IDs. For additional information, + see the section describing the + :ref:`ref-tasks-install` task. + +- ``incompatible-license:`` Report when packages are excluded from + being created due to being marked with a license that is in + :term:`INCOMPATIBLE_LICENSE`. + +- ``install-host-path:`` Checks the + :ref:`ref-tasks-install` log for indications that + paths to locations on the build host were used. Using such paths + might result in host contamination of the build output. + +- ``installed-vs-shipped:`` Reports when files have been installed + within ``do_install`` but have not been included in any package by + way of the :term:`FILES` variable. Files that do not + appear in any package cannot be present in an image later on in the + build process. Ideally, all installed files should be packaged or not + installed at all. These files can be deleted at the end of + ``do_install`` if the files are not needed in any package. + +- ``invalid-chars:`` Checks that the recipe metadata variables + :term:`DESCRIPTION`, + :term:`SUMMARY`, :term:`LICENSE`, and + :term:`SECTION` do not contain non-UTF-8 characters. + Some package managers do not support such characters. + +- ``invalid-packageconfig:`` Checks that no undefined features are + being added to :term:`PACKAGECONFIG`. For + example, any name "foo" for which the following form does not exist: + :: + + PACKAGECONFIG[foo] = "..." + +- ``la:`` Checks ``.la`` files for any ``TMPDIR`` paths. Any ``.la`` + file containing these paths is incorrect since ``libtool`` adds the + correct sysroot prefix when using the files automatically itself. + +- ``ldflags:`` Ensures that the binaries were linked with the + :term:`LDFLAGS` options provided by the build system. + If this test fails, check that the ``LDFLAGS`` variable is being + passed to the linker command. + +- ``libdir:`` Checks for libraries being installed into incorrect + (possibly hardcoded) installation paths. For example, this test will + catch recipes that install ``/lib/bar.so`` when ``${base_libdir}`` is + "lib32". Another example is when recipes install + ``/usr/lib64/foo.so`` when ``${libdir}`` is "/usr/lib". + +- ``libexec:`` Checks if a package contains files in + ``/usr/libexec``. This check is not performed if the ``libexecdir`` + variable has been set explicitly to ``/usr/libexec``. + +- ``packages-list:`` Checks for the same package being listed + multiple times through the :term:`PACKAGES` variable + value. Installing the package in this manner can cause errors during + packaging. + +- ``perm-config:`` Reports lines in ``fs-perms.txt`` that have an + invalid format. + +- ``perm-line:`` Reports lines in ``fs-perms.txt`` that have an + invalid format. + +- ``perm-link:`` Reports lines in ``fs-perms.txt`` that specify + 'link' where the specified target already exists. + +- ``perms:`` Currently, this check is unused but reserved. + +- ``pkgconfig:`` Checks ``.pc`` files for any + :term:`TMPDIR`/:term:`WORKDIR` paths. + Any ``.pc`` file containing these paths is incorrect since + ``pkg-config`` itself adds the correct sysroot prefix when the files + are accessed. + +- ``pkgname:`` Checks that all packages in + :term:`PACKAGES` have names that do not contain + invalid characters (i.e. characters other than 0-9, a-z, ., +, and + -). + +- ``pkgv-undefined:`` Checks to see if the ``PKGV`` variable is + undefined during :ref:`ref-tasks-package`. + +- ``pkgvarcheck:`` Checks through the variables + :term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, + :term:`RCONFLICTS`, + :term:`RPROVIDES`, + :term:`RREPLACES`, :term:`FILES`, + :term:`ALLOW_EMPTY`, ``pkg_preinst``, + ``pkg_postinst``, ``pkg_prerm`` and ``pkg_postrm``, and reports if + there are variable sets that are not package-specific. Using these + variables without a package suffix is bad practice, and might + unnecessarily complicate dependencies of other packages within the + same recipe or have other unintended consequences. + +- ``pn-overrides:`` Checks that a recipe does not have a name + (:term:`PN`) value that appears in + :term:`OVERRIDES`. If a recipe is named such that + its ``PN`` value matches something already in ``OVERRIDES`` (e.g. + ``PN`` happens to be the same as :term:`MACHINE` or + :term:`DISTRO`), it can have unexpected consequences. + For example, assignments such as ``FILES_${PN} = "xyz"`` effectively + turn into ``FILES = "xyz"``. + +- ``rpaths:`` Checks for rpaths in the binaries that contain build + system paths such as ``TMPDIR``. If this test fails, bad ``-rpath`` + options are being passed to the linker commands and your binaries + have potential security issues. + +- ``split-strip:`` Reports that splitting or stripping debug symbols + from binaries has failed. + +- ``staticdev:`` Checks for static library files (``*.a``) in + non-``staticdev`` packages. + +- ``symlink-to-sysroot:`` Checks for symlinks in packages that point + into :term:`TMPDIR` on the host. Such symlinks will + work on the host, but are clearly invalid when running on the target. + +- ``textrel:`` Checks for ELF binaries that contain relocations in + their ``.text`` sections, which can result in a performance impact at + runtime. See the explanation for the + ```ELF binary`` <#qa-issue-textrel>`__ message for more information + regarding runtime performance issues. + +- ``unlisted-pkg-lics:`` Checks that all declared licenses applying + for a package are also declared on the recipe level (i.e. any license + in ``LICENSE_*`` should appear in :term:`LICENSE`). + +- ``useless-rpaths:`` Checks for dynamic library load paths (rpaths) + in the binaries that by default on a standard system are searched by + the linker (e.g. ``/lib`` and ``/usr/lib``). While these paths will + not cause any breakage, they do waste space and are unnecessary. + +- ``var-undefined:`` Reports when variables fundamental to packaging + (i.e. :term:`WORKDIR`, + :term:`DEPLOY_DIR`, :term:`D`, + :term:`PN`, and :term:`PKGD`) are undefined + during :ref:`ref-tasks-package`. + +- ``version-going-backwards:`` If Build History is enabled, reports + when a package being written out has a lower version than the + previously written package under the same name. If you are placing + output packages into a feed and upgrading packages on a target system + using that feed, the version of a package going backwards can result + in the target system not correctly upgrading to the "new" version of + the package. + + .. note:: + + If you are not using runtime package management on your target + system, then you do not need to worry about this situation. + +- ``xorg-driver-abi:`` Checks that all packages containing Xorg + drivers have ABI dependencies. The ``xserver-xorg`` recipe provides + driver ABI names. All drivers should depend on the ABI versions that + they have been built against. Driver recipes that include + ``xorg-driver-input.inc`` or ``xorg-driver-video.inc`` will + automatically get these versions. Consequently, you should only need + to explicitly add dependencies to binary driver recipes. + +.. _ref-classes-insserv: + +``insserv.bbclass`` +=================== + +The ``insserv`` class uses the ``insserv`` utility to update the order +of symbolic links in ``/etc/rc?.d/`` within an image based on +dependencies specified by LSB headers in the ``init.d`` scripts +themselves. + +.. _ref-classes-kernel: + +``kernel.bbclass`` +================== + +The ``kernel`` class handles building Linux kernels. The class contains +code to build all kernel trees. All needed headers are staged into the +``STAGING_KERNEL_DIR`` directory to allow out-of-tree module builds +using the :ref:`module ` class. + +This means that each built kernel module is packaged separately and +inter-module dependencies are created by parsing the ``modinfo`` output. +If all modules are required, then installing the ``kernel-modules`` +package installs all packages with modules and various other kernel +packages such as ``kernel-vmlinux``. + +The ``kernel`` class contains logic that allows you to embed an initial +RAM filesystem (initramfs) image when you build the kernel image. For +information on how to build an initramfs, see the +":ref:`building-an-initramfs-image`" section in +the Yocto Project Development Tasks Manual. + +Various other classes are used by the ``kernel`` and ``module`` classes +internally including the :ref:`kernel-arch `, +:ref:`module-base `, and +:ref:`linux-kernel-base ` classes. + +.. _ref-classes-kernel-arch: + +``kernel-arch.bbclass`` +======================= + +The ``kernel-arch`` class sets the ``ARCH`` environment variable for +Linux kernel compilation (including modules). + +.. _ref-classes-kernel-devicetree: + +``kernel-devicetree.bbclass`` +============================= + +The ``kernel-devicetree`` class, which is inherited by the +:ref:`kernel ` class, supports device tree +generation. + +.. _ref-classes-kernel-fitimage: + +``kernel-fitimage.bbclass`` +=========================== + +The ``kernel-fitimage`` class provides support to pack a kernel Image, +device trees and a RAM disk into a single FIT image. In theory, a FIT +image can support any number of kernels, RAM disks and device-trees. +However, ``kernel-fitimage`` currently only supports +limited usescases: just one kernel image, an optional RAM disk, and +any number of device tree. + +To create a FIT image, it is required that :term:`KERNEL_CLASSES` +is set to "kernel-fitimage" and :term:`KERNEL_IMAGETYPE` +is set to "fitImage". + +The options for the device tree compiler passed to mkimage -D feature +when creating the FIT image are specified using the +:term:`UBOOT_MKIMAGE_DTCOPTS` variable. + +Only a single kernel can be added to the FIT image created by +``kernel-fitimage`` and the kernel image in FIT is mandatory. The +address where the kernel image is to be loaded by U-boot is +specified by :term:`UBOOT_LOADADDRESS` and the entrypoint by +:term:`UBOOT_ENTRYPOINT`. + +Multiple device trees can be added to the FIT image created by +``kernel-fitimage`` and the device tree is optional. +The address where the device tree is to be loaded by U-boot is +specified by :term:`UBOOT_DTBO_LOADADDRESS` for device tree overlays +and by `:term:`UBOOT_DTB_LOADADDRESS` for device tree binaries. + +Only a single RAM disk can be added to the FIT image created by +``kernel-fitimage`` and the RAM disk in FIT is optional. +The address where the RAM disk image is to be loaded by U-boot +is specified by :term:`UBOOT_RD_LOADADDRESS` and the entrypoint by +:term:`UBOOT_RD_ENTRYPOINT`. The ramdisk is added to FIT image when +:term:`INITRAMFS_IMAGE` is specified. + +The FIT image generated by ``kernel-fitimage`` class is signed when the +variables :term:`UBOOT_SIGN_ENABLE`, :term:`UBOOT_MKIMAGE_DTCOPTS`, +:term:`UBOOT_SIGN_KEYDIR` and :term:`UBOOT_SIGN_KEYNAME` are set +appropriately. The default values used for :term:`FIT_HASH_ALG` and +:term:`FIT_SIGN_ALG` in ``kernel-fitimage`` are "sha256" and +"rsa2048" respectively. + + +.. _ref-classes-kernel-grub: + +``kernel-grub.bbclass`` +======================= + +The ``kernel-grub`` class updates the boot area and the boot menu with +the kernel as the priority boot mechanism while installing a RPM to +update the kernel on a deployed target. + +.. _ref-classes-kernel-module-split: + +``kernel-module-split.bbclass`` +=============================== + +The ``kernel-module-split`` class provides common functionality for +splitting Linux kernel modules into separate packages. + +.. _ref-classes-kernel-uboot: + +``kernel-uboot.bbclass`` +======================== + +The ``kernel-uboot`` class provides support for building from +vmlinux-style kernel sources. + +.. _ref-classes-kernel-uimage: + +``kernel-uimage.bbclass`` +========================= + +The ``kernel-uimage`` class provides support to pack uImage. + +.. _ref-classes-kernel-yocto: + +``kernel-yocto.bbclass`` +======================== + +The ``kernel-yocto`` class provides common functionality for building +from linux-yocto style kernel source repositories. + +.. _ref-classes-kernelsrc: + +``kernelsrc.bbclass`` +===================== + +The ``kernelsrc`` class sets the Linux kernel source and version. + +.. _ref-classes-lib_package: + +``lib_package.bbclass`` +======================= + +The ``lib_package`` class supports recipes that build libraries and +produce executable binaries, where those binaries should not be +installed by default along with the library. Instead, the binaries are +added to a separate ``${``\ :term:`PN`\ ``}-bin`` package to +make their installation optional. + +.. _ref-classes-libc*: + +``libc*.bbclass`` +================= + +The ``libc*`` classes support recipes that build packages with ``libc``: + +- The ``libc-common`` class provides common support for building with + ``libc``. + +- The ``libc-package`` class supports packaging up ``glibc`` and + ``eglibc``. + +.. _ref-classes-license: + +``license.bbclass`` +=================== + +The ``license`` class provides license manifest creation and license +exclusion. This class is enabled by default using the default value for +the :term:`INHERIT_DISTRO` variable. + +.. _ref-classes-linux-kernel-base: + +``linux-kernel-base.bbclass`` +============================= + +The ``linux-kernel-base`` class provides common functionality for +recipes that build out of the Linux kernel source tree. These builds +goes beyond the kernel itself. For example, the Perf recipe also +inherits this class. + +.. _ref-classes-linuxloader: + +``linuxloader.bbclass`` +======================= + +Provides the function ``linuxloader()``, which gives the value of the +dynamic loader/linker provided on the platform. This value is used by a +number of other classes. + +.. _ref-classes-logging: + +``logging.bbclass`` +=================== + +The ``logging`` class provides the standard shell functions used to log +messages for various BitBake severity levels (i.e. ``bbplain``, +``bbnote``, ``bbwarn``, ``bberror``, ``bbfatal``, and ``bbdebug``). + +This class is enabled by default since it is inherited by the ``base`` +class. + +.. _ref-classes-meta: + +``meta.bbclass`` +================ + +The ``meta`` class is inherited by recipes that do not build any output +packages themselves, but act as a "meta" target for building other +recipes. + +.. _ref-classes-metadata_scm: + +``metadata_scm.bbclass`` +======================== + +The ``metadata_scm`` class provides functionality for querying the +branch and revision of a Source Code Manager (SCM) repository. + +The :ref:`base ` class uses this class to print the +revisions of each layer before starting every build. The +``metadata_scm`` class is enabled by default because it is inherited by +the ``base`` class. + +.. _ref-classes-migrate_localcount: + +``migrate_localcount.bbclass`` +============================== + +The ``migrate_localcount`` class verifies a recipe's localcount data and +increments it appropriately. + +.. _ref-classes-mime: + +``mime.bbclass`` +================ + +The ``mime`` class generates the proper post-install and post-remove +(postinst/postrm) scriptlets for packages that install MIME type files. +These scriptlets call ``update-mime-database`` to add the MIME types to +the shared database. + +.. _ref-classes-mirrors: + +``mirrors.bbclass`` +=================== + +The ``mirrors`` class sets up some standard +:term:`MIRRORS` entries for source code mirrors. These +mirrors provide a fall-back path in case the upstream source specified +in :term:`SRC_URI` within recipes is unavailable. + +This class is enabled by default since it is inherited by the +:ref:`base ` class. + +.. _ref-classes-module: + +``module.bbclass`` +================== + +The ``module`` class provides support for building out-of-tree Linux +kernel modules. The class inherits the +:ref:`module-base ` and +:ref:`kernel-module-split ` classes, +and implements the :ref:`ref-tasks-compile` and +:ref:`ref-tasks-install` tasks. The class provides +everything needed to build and package a kernel module. + +For general information on out-of-tree Linux kernel modules, see the +":ref:`kernel-dev/kernel-dev-common:incorporating out-of-tree modules`" +section in the Yocto Project Linux Kernel Development Manual. + +.. _ref-classes-module-base: + +``module-base.bbclass`` +======================= + +The ``module-base`` class provides the base functionality for building +Linux kernel modules. Typically, a recipe that builds software that +includes one or more kernel modules and has its own means of building +the module inherits this class as opposed to inheriting the +:ref:`module ` class. + +.. _ref-classes-multilib*: + +``multilib*.bbclass`` +===================== + +The ``multilib*`` classes provide support for building libraries with +different target optimizations or target architectures and installing +them side-by-side in the same image. + +For more information on using the Multilib feature, see the +":ref:`combining-multiple-versions-library-files-into-one-image`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-native: + +``native.bbclass`` +================== + +The ``native`` class provides common functionality for recipes that +build tools to run on the `build host <#hardware-build-system-term>`__ +(i.e. tools that use the compiler or other tools from the build host). + +You can create a recipe that builds tools that run natively on the host +a couple different ways: + +- Create a myrecipe\ ``-native.bb`` recipe that inherits the ``native`` + class. If you use this method, you must order the inherit statement + in the recipe after all other inherit statements so that the + ``native`` class is inherited last. + + .. note:: + + When creating a recipe this way, the recipe name must follow this + naming convention: + :: + + myrecipe-native.bb + + + Not using this naming convention can lead to subtle problems + caused by existing code that depends on that naming convention. + +- Create or modify a target recipe that contains the following: + :: + + BBCLASSEXTEND = "native" + + Inside the + recipe, use ``_class-native`` and ``_class-target`` overrides to + specify any functionality specific to the respective native or target + case. + +Although applied differently, the ``native`` class is used with both +methods. The advantage of the second method is that you do not need to +have two separate recipes (assuming you need both) for native and +target. All common parts of the recipe are automatically shared. + +.. _ref-classes-nativesdk: + +``nativesdk.bbclass`` +===================== + +The ``nativesdk`` class provides common functionality for recipes that +wish to build tools to run as part of an SDK (i.e. tools that run on +:term:`SDKMACHINE`). + +You can create a recipe that builds tools that run on the SDK machine a +couple different ways: + +- Create a ``nativesdk-``\ myrecipe\ ``.bb`` recipe that inherits the + ``nativesdk`` class. If you use this method, you must order the + inherit statement in the recipe after all other inherit statements so + that the ``nativesdk`` class is inherited last. + +- Create a ``nativesdk`` variant of any recipe by adding the following: + :: + + BBCLASSEXTEND = "nativesdk" + + Inside the + recipe, use ``_class-nativesdk`` and ``_class-target`` overrides to + specify any functionality specific to the respective SDK machine or + target case. + +.. note:: + + When creating a recipe, you must follow this naming convention: + :: + + nativesdk-myrecipe.bb + + + Not doing so can lead to subtle problems because code exists that + depends on the naming convention. + +Although applied differently, the ``nativesdk`` class is used with both +methods. The advantage of the second method is that you do not need to +have two separate recipes (assuming you need both) for the SDK machine +and the target. All common parts of the recipe are automatically shared. + +.. _ref-classes-nopackages: + +``nopackages.bbclass`` +====================== + +Disables packaging tasks for those recipes and classes where packaging +is not needed. + +.. _ref-classes-npm: + +``npm.bbclass`` +=============== + +Provides support for building Node.js software fetched using the `node +package manager (NPM) `__. + +.. note:: + + Currently, recipes inheriting this class must use the + npm:// + fetcher to have dependencies fetched and packaged automatically. + +For information on how to create NPM packages, see the +":ref:`dev-manual/dev-manual-common-tasks:creating node package manager (npm) packages`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-oelint: + +``oelint.bbclass`` +================== + +The ``oelint`` class is an obsolete lint checking tool that exists in +``meta/classes`` in the :term:`Source Directory`. + +A number of classes exist that could be generally useful in OE-Core but +are never actually used within OE-Core itself. The ``oelint`` class is +one such example. However, being aware of this class can reduce the +proliferation of different versions of similar classes across multiple +layers. + +.. _ref-classes-own-mirrors: + +``own-mirrors.bbclass`` +======================= + +The ``own-mirrors`` class makes it easier to set up your own +:term:`PREMIRRORS` from which to first fetch source +before attempting to fetch it from the upstream specified in +:term:`SRC_URI` within each recipe. + +To use this class, inherit it globally and specify +:term:`SOURCE_MIRROR_URL`. Here is an example: +:: + + INHERIT += "own-mirrors" + SOURCE_MIRROR_URL = "http://example.com/my-source-mirror" + +You can specify only a single URL +in ``SOURCE_MIRROR_URL``. + +.. _ref-classes-package: + +``package.bbclass`` +=================== + +The ``package`` class supports generating packages from a build's +output. The core generic functionality is in ``package.bbclass``. The +code specific to particular package types resides in these +package-specific classes: +:ref:`package_deb `, +:ref:`package_rpm `, +:ref:`package_ipk `, and +:ref:`package_tar `. + +.. note:: + + The + package_tar + class is broken and not supported. It is recommended that you do not + use this class. + +You can control the list of resulting package formats by using the +``PACKAGE_CLASSES`` variable defined in your ``conf/local.conf`` +configuration file, which is located in the :term:`Build Directory`. +When defining the variable, you can +specify one or more package types. Since images are generated from +packages, a packaging class is needed to enable image generation. The +first class listed in this variable is used for image generation. + +If you take the optional step to set up a repository (package feed) on +the development host that can be used by DNF, you can install packages +from the feed while you are running the image on the target (i.e. +runtime installation of packages). For more information, see the +":ref:`dev-manual/dev-manual-common-tasks:using runtime package management`" +section in the Yocto Project Development Tasks Manual. + +The package-specific class you choose can affect build-time performance +and has space ramifications. In general, building a package with IPK +takes about thirty percent less time as compared to using RPM to build +the same or similar package. This comparison takes into account a +complete build of the package with all dependencies previously built. +The reason for this discrepancy is because the RPM package manager +creates and processes more :term:`Metadata` than the IPK package +manager. Consequently, you might consider setting ``PACKAGE_CLASSES`` to +"package_ipk" if you are building smaller systems. + +Before making your package manager decision, however, you should +consider some further things about using RPM: + +- RPM starts to provide more abilities than IPK due to the fact that it + processes more Metadata. For example, this information includes + individual file types, file checksum generation and evaluation on + install, sparse file support, conflict detection and resolution for + Multilib systems, ACID style upgrade, and repackaging abilities for + rollbacks. + +- For smaller systems, the extra space used for the Berkeley Database + and the amount of metadata when using RPM can affect your ability to + perform on-device upgrades. + +You can find additional information on the effects of the package class +at these two Yocto Project mailing list links: + +- https://lists.yoctoproject.org/pipermail/poky/2011-May/006362.html + +- https://lists.yoctoproject.org/pipermail/poky/2011-May/006363.html + +.. _ref-classes-package_deb: + +``package_deb.bbclass`` +======================= + +The ``package_deb`` class provides support for creating packages that +use the Debian (i.e. ``.deb``) file format. The class ensures the +packages are written out in a ``.deb`` file format to the +``${``\ :term:`DEPLOY_DIR_DEB`\ ``}`` directory. + +This class inherits the :ref:`package ` class and +is enabled through the :term:`PACKAGE_CLASSES` +variable in the ``local.conf`` file. + +.. _ref-classes-package_ipk: + +``package_ipk.bbclass`` +======================= + +The ``package_ipk`` class provides support for creating packages that +use the IPK (i.e. ``.ipk``) file format. The class ensures the packages +are written out in a ``.ipk`` file format to the +``${``\ :term:`DEPLOY_DIR_IPK`\ ``}`` directory. + +This class inherits the :ref:`package ` class and +is enabled through the :term:`PACKAGE_CLASSES` +variable in the ``local.conf`` file. + +.. _ref-classes-package_rpm: + +``package_rpm.bbclass`` +======================= + +The ``package_rpm`` class provides support for creating packages that +use the RPM (i.e. ``.rpm``) file format. The class ensures the packages +are written out in a ``.rpm`` file format to the +``${``\ :term:`DEPLOY_DIR_RPM`\ ``}`` directory. + +This class inherits the :ref:`package ` class and +is enabled through the :term:`PACKAGE_CLASSES` +variable in the ``local.conf`` file. + +.. _ref-classes-package_tar: + +``package_tar.bbclass`` +======================= + +The ``package_tar`` class provides support for creating tarballs. The +class ensures the packages are written out in a tarball format to the +``${``\ :term:`DEPLOY_DIR_TAR`\ ``}`` directory. + +This class inherits the :ref:`package ` class and +is enabled through the :term:`PACKAGE_CLASSES` +variable in the ``local.conf`` file. + +.. note:: + + You cannot specify the + package_tar + class first using the + PACKAGE_CLASSES + variable. You must use + .deb + , + .ipk + , or + .rpm + file formats for your image or SDK. + +.. _ref-classes-packagedata: + +``packagedata.bbclass`` +======================= + +The ``packagedata`` class provides common functionality for reading +``pkgdata`` files found in :term:`PKGDATA_DIR`. These +files contain information about each output package produced by the +OpenEmbedded build system. + +This class is enabled by default because it is inherited by the +:ref:`package ` class. + +.. _ref-classes-packagegroup: + +``packagegroup.bbclass`` +======================== + +The ``packagegroup`` class sets default values appropriate for package +group recipes (e.g. ``PACKAGES``, ``PACKAGE_ARCH``, ``ALLOW_EMPTY``, and +so forth). It is highly recommended that all package group recipes +inherit this class. + +For information on how to use this class, see the +":ref:`usingpoky-extend-customimage-customtasks`" +section in the Yocto Project Development Tasks Manual. + +Previously, this class was called the ``task`` class. + +.. _ref-classes-patch: + +``patch.bbclass`` +================= + +The ``patch`` class provides all functionality for applying patches +during the :ref:`ref-tasks-patch` task. + +This class is enabled by default because it is inherited by the +:ref:`base ` class. + +.. _ref-classes-perlnative: + +``perlnative.bbclass`` +====================== + +When inherited by a recipe, the ``perlnative`` class supports using the +native version of Perl built by the build system rather than using the +version provided by the build host. + +.. _ref-classes-pixbufcache: + +``pixbufcache.bbclass`` +======================= + +The ``pixbufcache`` class generates the proper post-install and +post-remove (postinst/postrm) scriptlets for packages that install +pixbuf loaders, which are used with ``gdk-pixbuf``. These scriptlets +call ``update_pixbuf_cache`` to add the pixbuf loaders to the cache. +Since the cache files are architecture-specific, ``update_pixbuf_cache`` +is run using QEMU if the postinst scriptlets need to be run on the build +host during image creation. + +If the pixbuf loaders being installed are in packages other than the +recipe's main package, set +:term:`PIXBUF_PACKAGES` to specify the packages +containing the loaders. + +.. _ref-classes-pkgconfig: + +``pkgconfig.bbclass`` +===================== + +The ``pkgconfig`` class provides a standard way to get header and +library information by using ``pkg-config``. This class aims to smooth +integration of ``pkg-config`` into libraries that use it. + +During staging, BitBake installs ``pkg-config`` data into the +``sysroots/`` directory. By making use of sysroot functionality within +``pkg-config``, the ``pkgconfig`` class no longer has to manipulate the +files. + +.. _ref-classes-populate-sdk: + +``populate_sdk.bbclass`` +======================== + +The ``populate_sdk`` class provides support for SDK-only recipes. For +information on advantages gained when building a cross-development +toolchain using the :ref:`ref-tasks-populate_sdk` +task, see the ":ref:`sdk-manual/sdk-appendix-obtain:building an sdk installer`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) manual. + +.. _ref-classes-populate-sdk-*: + +``populate_sdk_*.bbclass`` +========================== + +The ``populate_sdk_*`` classes support SDK creation and consist of the +following classes: + +- ``populate_sdk_base``: The base class supporting SDK creation under + all package managers (i.e. DEB, RPM, and opkg). + +- ``populate_sdk_deb``: Supports creation of the SDK given the Debian + package manager. + +- ``populate_sdk_rpm``: Supports creation of the SDK given the RPM + package manager. + +- ``populate_sdk_ipk``: Supports creation of the SDK given the opkg + (IPK format) package manager. + +- ``populate_sdk_ext``: Supports extensible SDK creation under all + package managers. + +The ``populate_sdk_base`` class inherits the appropriate +``populate_sdk_*`` (i.e. ``deb``, ``rpm``, and ``ipk``) based on +:term:`IMAGE_PKGTYPE`. + +The base class ensures all source and destination directories are +established and then populates the SDK. After populating the SDK, the +``populate_sdk_base`` class constructs two sysroots: +``${``\ :term:`SDK_ARCH`\ ``}-nativesdk``, which +contains the cross-compiler and associated tooling, and the target, +which contains a target root filesystem that is configured for the SDK +usage. These two images reside in :term:`SDK_OUTPUT`, +which consists of the following: +:: + + ${SDK_OUTPUT}/${SDK_ARCH}-nativesdk-pkgs + ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/target-pkgs + +Finally, the base populate SDK class creates the toolchain environment +setup script, the tarball of the SDK, and the installer. + +The respective ``populate_sdk_deb``, ``populate_sdk_rpm``, and +``populate_sdk_ipk`` classes each support the specific type of SDK. +These classes are inherited by and used with the ``populate_sdk_base`` +class. + +For more information on the cross-development toolchain generation, see +the ":ref:`overview-manual/overview-manual-concepts:cross-development toolchain generation`" +section in the Yocto Project Overview and Concepts Manual. For +information on advantages gained when building a cross-development +toolchain using the :ref:`ref-tasks-populate_sdk` +task, see the +":ref:`sdk-manual/sdk-appendix-obtain:building an sdk installer`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) manual. + +.. _ref-classes-prexport: + +``prexport.bbclass`` +==================== + +The ``prexport`` class provides functionality for exporting +:term:`PR` values. + +.. note:: + + This class is not intended to be used directly. Rather, it is enabled + when using " + bitbake-prserv-tool export + ". + +.. _ref-classes-primport: + +``primport.bbclass`` +==================== + +The ``primport`` class provides functionality for importing +:term:`PR` values. + +.. note:: + + This class is not intended to be used directly. Rather, it is enabled + when using " + bitbake-prserv-tool import + ". + +.. _ref-classes-prserv: + +``prserv.bbclass`` +================== + +The ``prserv`` class provides functionality for using a :ref:`PR +service ` in order to +automatically manage the incrementing of the :term:`PR` +variable for each recipe. + +This class is enabled by default because it is inherited by the +:ref:`package ` class. However, the OpenEmbedded +build system will not enable the functionality of this class unless +:term:`PRSERV_HOST` has been set. + +.. _ref-classes-ptest: + +``ptest.bbclass`` +================= + +The ``ptest`` class provides functionality for packaging and installing +runtime tests for recipes that build software that provides these tests. + +This class is intended to be inherited by individual recipes. However, +the class' functionality is largely disabled unless "ptest" appears in +:term:`DISTRO_FEATURES`. See the +":ref:`dev-manual/dev-manual-common-tasks:testing packages with ptest`" +section in the Yocto Project Development Tasks Manual for more information +on ptest. + +.. _ref-classes-ptest-gnome: + +``ptest-gnome.bbclass`` +======================= + +Enables package tests (ptests) specifically for GNOME packages, which +have tests intended to be executed with ``gnome-desktop-testing``. + +For information on setting up and running ptests, see the +":ref:`dev-manual/dev-manual-common-tasks:testing packages with ptest`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-python-dir: + +``python-dir.bbclass`` +====================== + +The ``python-dir`` class provides the base version, location, and site +package location for Python. + +.. _ref-classes-python3native: + +``python3native.bbclass`` +========================= + +The ``python3native`` class supports using the native version of Python +3 built by the build system rather than support of the version provided +by the build host. + +.. _ref-classes-pythonnative: + +``pythonnative.bbclass`` +======================== + +When inherited by a recipe, the ``pythonnative`` class supports using +the native version of Python built by the build system rather than using +the version provided by the build host. + +.. _ref-classes-qemu: + +``qemu.bbclass`` +================ + +The ``qemu`` class provides functionality for recipes that either need +QEMU or test for the existence of QEMU. Typically, this class is used to +run programs for a target system on the build host using QEMU's +application emulation mode. + +.. _ref-classes-recipe_sanity: + +``recipe_sanity.bbclass`` +========================= + +The ``recipe_sanity`` class checks for the presence of any host system +recipe prerequisites that might affect the build (e.g. variables that +are set or software that is present). + +.. _ref-classes-relocatable: + +``relocatable.bbclass`` +======================= + +The ``relocatable`` class enables relocation of binaries when they are +installed into the sysroot. + +This class makes use of the :ref:`chrpath ` class +and is used by both the :ref:`cross ` and +:ref:`native ` classes. + +.. _ref-classes-remove-libtool: + +``remove-libtool.bbclass`` +========================== + +The ``remove-libtool`` class adds a post function to the +:ref:`ref-tasks-install` task to remove all ``.la`` files +installed by ``libtool``. Removing these files results in them being +absent from both the sysroot and target packages. + +If a recipe needs the ``.la`` files to be installed, then the recipe can +override the removal by setting ``REMOVE_LIBTOOL_LA`` to "0" as follows: +:: + + REMOVE_LIBTOOL_LA = "0" + +.. note:: + + The + remove-libtool + class is not enabled by default. + +.. _ref-classes-report-error: + +``report-error.bbclass`` +======================== + +The ``report-error`` class supports enabling the :ref:`error reporting +tool `", +which allows you to submit build error information to a central database. + +The class collects debug information for recipe, recipe version, task, +machine, distro, build system, target system, host distro, branch, +commit, and log. From the information, report files using a JSON format +are created and stored in +``${``\ :term:`LOG_DIR`\ ``}/error-report``. + +.. _ref-classes-rm-work: + +``rm_work.bbclass`` +=================== + +The ``rm_work`` class supports deletion of temporary workspace, which +can ease your hard drive demands during builds. + +The OpenEmbedded build system can use a substantial amount of disk space +during the build process. A portion of this space is the work files +under the ``${TMPDIR}/work`` directory for each recipe. Once the build +system generates the packages for a recipe, the work files for that +recipe are no longer needed. However, by default, the build system +preserves these files for inspection and possible debugging purposes. If +you would rather have these files deleted to save disk space as the +build progresses, you can enable ``rm_work`` by adding the following to +your ``local.conf`` file, which is found in the :term:`Build Directory`. +:: + + INHERIT += "rm_work" + +If you are +modifying and building source code out of the work directory for a +recipe, enabling ``rm_work`` will potentially result in your changes to +the source being lost. To exclude some recipes from having their work +directories deleted by ``rm_work``, you can add the names of the recipe +or recipes you are working on to the ``RM_WORK_EXCLUDE`` variable, which +can also be set in your ``local.conf`` file. Here is an example: +:: + + RM_WORK_EXCLUDE += "busybox glibc" + +.. _ref-classes-rootfs*: + +``rootfs*.bbclass`` +=================== + +The ``rootfs*`` classes support creating the root filesystem for an +image and consist of the following classes: + +- The ``rootfs-postcommands`` class, which defines filesystem + post-processing functions for image recipes. + +- The ``rootfs_deb`` class, which supports creation of root filesystems + for images built using ``.deb`` packages. + +- The ``rootfs_rpm`` class, which supports creation of root filesystems + for images built using ``.rpm`` packages. + +- The ``rootfs_ipk`` class, which supports creation of root filesystems + for images built using ``.ipk`` packages. + +- The ``rootfsdebugfiles`` class, which installs additional files found + on the build host directly into the root filesystem. + +The root filesystem is created from packages using one of the +``rootfs*.bbclass`` files as determined by the +:term:`PACKAGE_CLASSES` variable. + +For information on how root filesystem images are created, see the +:ref:`image-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual. + +.. _ref-classes-sanity: + +``sanity.bbclass`` +================== + +The ``sanity`` class checks to see if prerequisite software is present +on the host system so that users can be notified of potential problems +that might affect their build. The class also performs basic user +configuration checks from the ``local.conf`` configuration file to +prevent common mistakes that cause build failures. Distribution policy +usually determines whether to include this class. + +.. _ref-classes-scons: + +``scons.bbclass`` +================= + +The ``scons`` class supports recipes that need to build software that +uses the SCons build system. You can use the +:term:`EXTRA_OESCONS` variable to specify +additional configuration options you want to pass SCons command line. + +.. _ref-classes-sdl: + +``sdl.bbclass`` +=============== + +The ``sdl`` class supports recipes that need to build software that uses +the Simple DirectMedia Layer (SDL) library. + +.. _ref-classes-setuptools: + +``setuptools.bbclass`` +====================== + +The ``setuptools`` class supports Python version 2.x extensions that use +build systems based on ``setuptools``. If your recipe uses these build +systems, the recipe needs to inherit the ``setuptools`` class. + +.. _ref-classes-setuptools3: + +``setuptools3.bbclass`` +======================= + +The ``setuptools3`` class supports Python version 3.x extensions that +use build systems based on ``setuptools3``. If your recipe uses these +build systems, the recipe needs to inherit the ``setuptools3`` class. + +.. _ref-classes-sign_rpm: + +``sign_rpm.bbclass`` +==================== + +The ``sign_rpm`` class supports generating signed RPM packages. + +.. _ref-classes-sip: + +``sip.bbclass`` +=============== + +The ``sip`` class supports recipes that build or package SIP-based +Python bindings. + +.. _ref-classes-siteconfig: + +``siteconfig.bbclass`` +====================== + +The ``siteconfig`` class provides functionality for handling site +configuration. The class is used by the +:ref:`autotools ` class to accelerate the +:ref:`ref-tasks-configure` task. + +.. _ref-classes-siteinfo: + +``siteinfo.bbclass`` +==================== + +The ``siteinfo`` class provides information about the targets that might +be needed by other classes or recipes. + +As an example, consider Autotools, which can require tests that must +execute on the target hardware. Since this is not possible in general +when cross compiling, site information is used to provide cached test +results so these tests can be skipped over but still make the correct +values available. The ``meta/site directory`` contains test results +sorted into different categories such as architecture, endianness, and +the ``libc`` used. Site information provides a list of files containing +data relevant to the current build in the ``CONFIG_SITE`` variable that +Autotools automatically picks up. + +The class also provides variables like ``SITEINFO_ENDIANNESS`` and +``SITEINFO_BITS`` that can be used elsewhere in the metadata. + +.. _ref-classes-spdx: + +``spdx.bbclass`` +================ + +The ``spdx`` class integrates real-time license scanning, generation of +SPDX standard output, and verification of license information during the +build. + +.. note:: + + This class is currently at the prototype stage in the 1.6 release. + +.. _ref-classes-sstate: + +``sstate.bbclass`` +================== + +The ``sstate`` class provides support for Shared State (sstate). By +default, the class is enabled through the +:term:`INHERIT_DISTRO` variable's default value. + +For more information on sstate, see the +":ref:`overview-manual/overview-manual-concepts:shared state cache`" +section in the Yocto Project Overview and Concepts Manual. + +.. _ref-classes-staging: + +``staging.bbclass`` +=================== + +The ``staging`` class installs files into individual recipe work +directories for sysroots. The class contains the following key tasks: + +- The :ref:`ref-tasks-populate_sysroot` task, + which is responsible for handing the files that end up in the recipe + sysroots. + +- The + :ref:`ref-tasks-prepare_recipe_sysroot` + task (a "partner" task to the ``populate_sysroot`` task), which + installs the files into the individual recipe work directories (i.e. + :term:`WORKDIR`). + +The code in the ``staging`` class is complex and basically works in two +stages: + +- *Stage One:* The first stage addresses recipes that have files they + want to share with other recipes that have dependencies on the + originating recipe. Normally these dependencies are installed through + the :ref:`ref-tasks-install` task into + ``${``\ :term:`D`\ ``}``. The ``do_populate_sysroot`` task + copies a subset of these files into ``${SYSROOT_DESTDIR}``. This + subset of files is controlled by the + :term:`SYSROOT_DIRS`, + :term:`SYSROOT_DIRS_NATIVE`, and + :term:`SYSROOT_DIRS_BLACKLIST` + variables. + + .. note:: + + Additionally, a recipe can customize the files further by + declaring a processing function in the + SYSROOT_PREPROCESS_FUNCS + variable. + + A shared state (sstate) object is built from these files and the + files are placed into a subdirectory of + ```tmp/sysroots-components/`` <#structure-build-tmp-sysroots-components>`__. + The files are scanned for hardcoded paths to the original + installation location. If the location is found in text files, the + hardcoded locations are replaced by tokens and a list of the files + needing such replacements is created. These adjustments are referred + to as "FIXMEs". The list of files that are scanned for paths is + controlled by the :term:`SSTATE_SCAN_FILES` + variable. + +- *Stage Two:* The second stage addresses recipes that want to use + something from another recipe and declare a dependency on that recipe + through the :term:`DEPENDS` variable. The recipe will + have a + :ref:`ref-tasks-prepare_recipe_sysroot` + task and when this task executes, it creates the ``recipe-sysroot`` + and ``recipe-sysroot-native`` in the recipe work directory (i.e. + :term:`WORKDIR`). The OpenEmbedded build system + creates hard links to copies of the relevant files from + ``sysroots-components`` into the recipe work directory. + + .. note:: + + If hard links are not possible, the build system uses actual + copies. + + The build system then addresses any "FIXMEs" to paths as defined from + the list created in the first stage. + + Finally, any files in ``${bindir}`` within the sysroot that have the + prefix "``postinst-``" are executed. + + .. note:: + + Although such sysroot post installation scripts are not + recommended for general use, the files do allow some issues such + as user creation and module indexes to be addressed. + + Because recipes can have other dependencies outside of ``DEPENDS`` + (e.g. ``do_unpack[depends] += "tar-native:do_populate_sysroot"``), + the sysroot creation function ``extend_recipe_sysroot`` is also added + as a pre-function for those tasks whose dependencies are not through + ``DEPENDS`` but operate similarly. + + When installing dependencies into the sysroot, the code traverses the + dependency graph and processes dependencies in exactly the same way + as the dependencies would or would not be when installed from sstate. + This processing means, for example, a native tool would have its + native dependencies added but a target library would not have its + dependencies traversed or installed. The same sstate dependency code + is used so that builds should be identical regardless of whether + sstate was used or not. For a closer look, see the + ``setscene_depvalid()`` function in the + :ref:`sstate ` class. + + The build system is careful to maintain manifests of the files it + installs so that any given dependency can be installed as needed. The + sstate hash of the installed item is also stored so that if it + changes, the build system can reinstall it. + +.. _ref-classes-syslinux: + +``syslinux.bbclass`` +==================== + +The ``syslinux`` class provides syslinux-specific functions for building +bootable images. + +The class supports the following variables: + +- :term:`INITRD`: Indicates list of filesystem images to + concatenate and use as an initial RAM disk (initrd). This variable is + optional. + +- :term:`ROOTFS`: Indicates a filesystem image to include + as the root filesystem. This variable is optional. + +- :term:`AUTO_SYSLINUXMENU`: Enables creating + an automatic menu when set to "1". + +- :term:`LABELS`: Lists targets for automatic + configuration. + +- :term:`APPEND`: Lists append string overrides for each + label. + +- :term:`SYSLINUX_OPTS`: Lists additional options + to add to the syslinux file. Semicolon characters separate multiple + options. + +- :term:`SYSLINUX_SPLASH`: Lists a background + for the VGA boot menu when you are using the boot menu. + +- :term:`SYSLINUX_DEFAULT_CONSOLE`: Set + to "console=ttyX" to change kernel boot default console. + +- :term:`SYSLINUX_SERIAL`: Sets an alternate + serial port. Or, turns off serial when the variable is set with an + empty string. + +- :term:`SYSLINUX_SERIAL_TTY`: Sets an + alternate "console=tty..." kernel boot argument. + +.. _ref-classes-systemd: + +``systemd.bbclass`` +=================== + +The ``systemd`` class provides support for recipes that install systemd +unit files. + +The functionality for this class is disabled unless you have "systemd" +in :term:`DISTRO_FEATURES`. + +Under this class, the recipe or Makefile (i.e. whatever the recipe is +calling during the :ref:`ref-tasks-install` task) +installs unit files into +``${``\ :term:`D`\ ``}${systemd_unitdir}/system``. If the unit +files being installed go into packages other than the main package, you +need to set :term:`SYSTEMD_PACKAGES` in your +recipe to identify the packages in which the files will be installed. + +You should set :term:`SYSTEMD_SERVICE` to the +name of the service file. You should also use a package name override to +indicate the package to which the value applies. If the value applies to +the recipe's main package, use ``${``\ :term:`PN`\ ``}``. Here +is an example from the connman recipe: +:: + + SYSTEMD_SERVICE_${PN} = "connman.service" + +Services are set up to start on boot automatically +unless you have set +:term:`SYSTEMD_AUTO_ENABLE` to "disable". + +For more information on ``systemd``, see the +":ref:`dev-manual/dev-manual-common-tasks:selecting an initialization manager`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-systemd-boot: + +``systemd-boot.bbclass`` +======================== + +The ``systemd-boot`` class provides functions specific to the +systemd-boot bootloader for building bootable images. This is an +internal class and is not intended to be used directly. + +.. note:: + + The + systemd-boot + class is a result from merging the + gummiboot + class used in previous Yocto Project releases with the + systemd + project. + +Set the :term:`EFI_PROVIDER` variable to +"systemd-boot" to use this class. Doing so creates a standalone EFI +bootloader that is not dependent on systemd. + +For information on more variables used and supported in this class, see +the :term:`SYSTEMD_BOOT_CFG`, +:term:`SYSTEMD_BOOT_ENTRIES`, and +:term:`SYSTEMD_BOOT_TIMEOUT` variables. + +You can also see the `Systemd-boot +documentation `__ +for more information. + +.. _ref-classes-terminal: + +``terminal.bbclass`` +==================== + +The ``terminal`` class provides support for starting a terminal session. +The :term:`OE_TERMINAL` variable controls which +terminal emulator is used for the session. + +Other classes use the ``terminal`` class anywhere a separate terminal +session needs to be started. For example, the +:ref:`patch ` class assuming +:term:`PATCHRESOLVE` is set to "user", the +:ref:`cml1 ` class, and the +:ref:`devshell ` class all use the ``terminal`` +class. + +.. _ref-classes-testimage*: + +``testimage*.bbclass`` +====================== + +The ``testimage*`` classes support running automated tests against +images using QEMU and on actual hardware. The classes handle loading the +tests and starting the image. To use the classes, you need to perform +steps to set up the environment. + +.. note:: + + Best practices include using + IMAGE_CLASSES + rather than + INHERIT + to inherit the + testimage + class for automated image testing. + +The tests are commands that run on the target system over ``ssh``. Each +test is written in Python and makes use of the ``unittest`` module. + +The ``testimage.bbclass`` runs tests on an image when called using the +following: +:: + + $ bitbake -c testimage image + +The ``testimage-auto`` class +runs tests on an image after the image is constructed (i.e. +:term:`TESTIMAGE_AUTO` must be set to "1"). + +For information on how to enable, run, and create new tests, see the +":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-classes-testsdk: + +``testsdk.bbclass`` +=================== + +This class supports running automated tests against software development +kits (SDKs). The ``testsdk`` class runs tests on an SDK when called +using the following: +:: + + $ bitbake -c testsdk image + +.. note:: + + Best practices include using + IMAGE_CLASSES + rather than + INHERIT + to inherit the + testsdk + class for automated SDK testing. + +.. _ref-classes-texinfo: + +``texinfo.bbclass`` +=================== + +This class should be inherited by recipes whose upstream packages invoke +the ``texinfo`` utilities at build-time. Native and cross recipes are +made to use the dummy scripts provided by ``texinfo-dummy-native``, for +improved performance. Target architecture recipes use the genuine +Texinfo utilities. By default, they use the Texinfo utilities on the +host system. + +.. note:: + + If you want to use the Texinfo recipe shipped with the build system, + you can remove "texinfo-native" from + ASSUME_PROVIDED + and makeinfo from + SANITY_REQUIRED_UTILITIES + . + +.. _ref-classes-tinderclient: + +``tinderclient.bbclass`` +======================== + +The ``tinderclient`` class submits build results to an external +Tinderbox instance. + +.. note:: + + This class is currently unmaintained. + +.. _ref-classes-toaster: + +``toaster.bbclass`` +=================== + +The ``toaster`` class collects information about packages and images and +sends them as events that the BitBake user interface can receive. The +class is enabled when the Toaster user interface is running. + +This class is not intended to be used directly. + +.. _ref-classes-toolchain-scripts: + +``toolchain-scripts.bbclass`` +============================= + +The ``toolchain-scripts`` class provides the scripts used for setting up +the environment for installed SDKs. + +.. _ref-classes-typecheck: + +``typecheck.bbclass`` +===================== + +The ``typecheck`` class provides support for validating the values of +variables set at the configuration level against their defined types. +The OpenEmbedded build system allows you to define the type of a +variable using the "type" varflag. Here is an example: +:: + + IMAGE_FEATURES[type] = "list" + +.. _ref-classes-uboot-config: + +``uboot-config.bbclass`` +======================== + +The ``uboot-config`` class provides support for U-Boot configuration for +a machine. Specify the machine in your recipe as follows: +:: + + UBOOT_CONFIG ??= + UBOOT_CONFIG[foo] = "config,images" + +You can also specify the machine using this method: +:: + + UBOOT_MACHINE = "config" + +See the :term:`UBOOT_CONFIG` and :term:`UBOOT_MACHINE` variables for additional +information. + +.. _ref-classes-uninative: + +``uninative.bbclass`` +===================== + +Attempts to isolate the build system from the host distribution's C +library in order to make re-use of native shared state artifacts across +different host distributions practical. With this class enabled, a +tarball containing a pre-built C library is downloaded at the start of +the build. In the Poky reference distribution this is enabled by default +through ``meta/conf/distro/include/yocto-uninative.inc``. Other +distributions that do not derive from poky can also +"``require conf/distro/include/yocto-uninative.inc``" to use this. +Alternatively if you prefer, you can build the uninative-tarball recipe +yourself, publish the resulting tarball (e.g. via HTTP) and set +``UNINATIVE_URL`` and ``UNINATIVE_CHECKSUM`` appropriately. For an +example, see the ``meta/conf/distro/include/yocto-uninative.inc``. + +The ``uninative`` class is also used unconditionally by the extensible +SDK. When building the extensible SDK, ``uninative-tarball`` is built +and the resulting tarball is included within the SDK. + +.. _ref-classes-update-alternatives: + +``update-alternatives.bbclass`` +=============================== + +The ``update-alternatives`` class helps the alternatives system when +multiple sources provide the same command. This situation occurs when +several programs that have the same or similar function are installed +with the same name. For example, the ``ar`` command is available from +the ``busybox``, ``binutils`` and ``elfutils`` packages. The +``update-alternatives`` class handles renaming the binaries so that +multiple packages can be installed without conflicts. The ``ar`` command +still works regardless of which packages are installed or subsequently +removed. The class renames the conflicting binary in each package and +symlinks the highest priority binary during installation or removal of +packages. + +To use this class, you need to define a number of variables: + +- :term:`ALTERNATIVE` + +- :term:`ALTERNATIVE_LINK_NAME` + +- :term:`ALTERNATIVE_TARGET` + +- :term:`ALTERNATIVE_PRIORITY` + +These variables list alternative commands needed by a package, provide +pathnames for links, default links for targets, and so forth. For +details on how to use this class, see the comments in the +:yocto_git:`update-alternatives.bbclass ` +file. + +.. note:: + + You can use the + update-alternatives + command directly in your recipes. However, this class simplifies + things in most cases. + +.. _ref-classes-update-rc.d: + +``update-rc.d.bbclass`` +======================= + +The ``update-rc.d`` class uses ``update-rc.d`` to safely install an +initialization script on behalf of the package. The OpenEmbedded build +system takes care of details such as making sure the script is stopped +before a package is removed and started when the package is installed. + +Three variables control this class: ``INITSCRIPT_PACKAGES``, +``INITSCRIPT_NAME`` and ``INITSCRIPT_PARAMS``. See the variable links +for details. + +.. _ref-classes-useradd: + +``useradd*.bbclass`` +==================== + +The ``useradd*`` classes support the addition of users or groups for +usage by the package on the target. For example, if you have packages +that contain system services that should be run under their own user or +group, you can use these classes to enable creation of the user or +group. The ``meta-skeleton/recipes-skeleton/useradd/useradd-example.bb`` +recipe in the :term:`Source Directory` provides a simple +example that shows how to add three users and groups to two packages. +See the ``useradd-example.bb`` recipe for more information on how to use +these classes. + +The ``useradd_base`` class provides basic functionality for user or +groups settings. + +The ``useradd*`` classes support the +:term:`USERADD_PACKAGES`, +:term:`USERADD_PARAM`, +:term:`GROUPADD_PARAM`, and +:term:`GROUPMEMS_PARAM` variables. + +The ``useradd-staticids`` class supports the addition of users or groups +that have static user identification (``uid``) and group identification +(``gid``) values. + +The default behavior of the OpenEmbedded build system for assigning +``uid`` and ``gid`` values when packages add users and groups during +package install time is to add them dynamically. This works fine for +programs that do not care what the values of the resulting users and +groups become. In these cases, the order of the installation determines +the final ``uid`` and ``gid`` values. However, if non-deterministic +``uid`` and ``gid`` values are a problem, you can override the default, +dynamic application of these values by setting static values. When you +set static values, the OpenEmbedded build system looks in +:term:`BBPATH` for ``files/passwd`` and ``files/group`` +files for the values. + +To use static ``uid`` and ``gid`` values, you need to set some +variables. See the :term:`USERADDEXTENSION`, +:term:`USERADD_UID_TABLES`, +:term:`USERADD_GID_TABLES`, and +:term:`USERADD_ERROR_DYNAMIC` variables. +You can also see the :ref:`useradd ` class for +additional information. + +.. note:: + + You do not use the + useradd-staticids + class directly. You either enable or disable the class by setting the + USERADDEXTENSION + variable. If you enable or disable the class in a configured system, + TMPDIR + might contain incorrect + uid + and + gid + values. Deleting the + TMPDIR + directory will correct this condition. + +.. _ref-classes-utility-tasks: + +``utility-tasks.bbclass`` +========================= + +The ``utility-tasks`` class provides support for various "utility" type +tasks that are applicable to all recipes, such as +:ref:`ref-tasks-clean` and +:ref:`ref-tasks-listtasks`. + +This class is enabled by default because it is inherited by the +:ref:`base ` class. + +.. _ref-classes-utils: + +``utils.bbclass`` +================= + +The ``utils`` class provides some useful Python functions that are +typically used in inline Python expressions (e.g. ``${@...}``). One +example use is for ``bb.utils.contains()``. + +This class is enabled by default because it is inherited by the +:ref:`base ` class. + +.. _ref-classes-vala: + +``vala.bbclass`` +================ + +The ``vala`` class supports recipes that need to build software written +using the Vala programming language. + +.. _ref-classes-waf: + +``waf.bbclass`` +=============== + +The ``waf`` class supports recipes that need to build software that uses +the Waf build system. You can use the +:term:`EXTRA_OECONF` or +:term:`PACKAGECONFIG_CONFARGS` variables +to specify additional configuration options to be passed on the Waf +command line. diff --git a/poky/documentation/ref-manual/ref-devtool-reference.rst b/poky/documentation/ref-manual/ref-devtool-reference.rst new file mode 100644 index 000000000..eaca45ae2 --- /dev/null +++ b/poky/documentation/ref-manual/ref-devtool-reference.rst @@ -0,0 +1,625 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*************************** +``devtool`` Quick Reference +*************************** + +The ``devtool`` command-line tool provides a number of features that +help you build, test, and package software. This command is available +alongside the ``bitbake`` command. Additionally, the ``devtool`` command +is a key part of the extensible SDK. + +This chapter provides a Quick Reference for the ``devtool`` command. For +more information on how to apply the command when using the extensible +SDK, see the ":doc:`../sdk-manual/sdk-extensible`" chapter in the Yocto +Project Application Development and the Extensible Software Development +Kit (eSDK) manual. + +.. _devtool-getting-help: + +Getting Help +============ + +The ``devtool`` command line is organized similarly to Git in that it +has a number of sub-commands for each function. You can run +``devtool --help`` to see all the commands: +:: + + $ devtool -h + NOTE: Starting bitbake server... + usage: devtool [--basepath BASEPATH] [--bbpath BBPATH] [-d] [-q] [--color COLOR] [-h] ... + + OpenEmbedded development tool + + options: + --basepath BASEPATH Base directory of SDK / build directory + --bbpath BBPATH Explicitly specify the BBPATH, rather than getting it from the metadata + -d, --debug Enable debug output + -q, --quiet Print only errors + --color COLOR Colorize output (where COLOR is auto, always, never) + -h, --help show this help message and exit + + subcommands: + Beginning work on a recipe: + add Add a new recipe + modify Modify the source for an existing recipe + upgrade Upgrade an existing recipe + Getting information: + status Show workspace status + latest-version Report the latest version of an existing recipe + check-upgrade-status Report upgradability for multiple (or all) recipes + search Search available recipes + Working on a recipe in the workspace: + build Build a recipe + rename Rename a recipe file in the workspace + edit-recipe Edit a recipe file + find-recipe Find a recipe file + configure-help Get help on configure script options + update-recipe Apply changes from external source tree to recipe + reset Remove a recipe from your workspace + finish Finish working on a recipe in your workspace + Testing changes on target: + deploy-target Deploy recipe output files to live target machine + undeploy-target Undeploy recipe output files in live target machine + build-image Build image including workspace recipe packages + Advanced: + create-workspace Set up workspace in an alternative location + extract Extract the source for an existing recipe + sync Synchronize the source tree for an existing recipe + menuconfig Alter build-time configuration for a recipe + import Import exported tar archive into workspace + export Export workspace into a tar archive + other: + selftest-reverse Reverse value (for selftest) + pluginfile Print the filename of this plugin + bbdir Print the BBPATH directory of this plugin + count How many times have this plugin been registered. + multiloaded How many times have this plugin been initialized + Use devtool --help to get help on a specific command + +As directed in the general help output, you can +get more syntax on a specific command by providing the command name and +using "--help": +:: + + $ devtool add --help + NOTE: Starting bitbake server... + usage: devtool add [-h] [--same-dir | --no-same-dir] [--fetch URI] [--npm-dev] [--version VERSION] [--no-git] [--srcrev SRCREV | --autorev] [--srcbranch SRCBRANCH] [--binary] [--also-native] [--src-subdir SUBDIR] [--mirrors] + [--provides PROVIDES] + [recipename] [srctree] [fetchuri] + + Adds a new recipe to the workspace to build a specified source tree. Can optionally fetch a remote URI and unpack it to create the source tree. + + arguments: + recipename Name for new recipe to add (just name - no version, path or extension). If not specified, will attempt to auto-detect it. + srctree Path to external source tree. If not specified, a subdirectory of /media/build1/poky/build/workspace/sources will be used. + fetchuri Fetch the specified URI and extract it to create the source tree + + options: + -h, --help show this help message and exit + --same-dir, -s Build in same directory as source + --no-same-dir Force build in a separate build directory + --fetch URI, -f URI Fetch the specified URI and extract it to create the source tree (deprecated - pass as positional argument instead) + --npm-dev For npm, also fetch devDependencies + --version VERSION, -V VERSION + Version to use within recipe (PV) + --no-git, -g If fetching source, do not set up source tree as a git repository + --srcrev SRCREV, -S SRCREV + Source revision to fetch if fetching from an SCM such as git (default latest) + --autorev, -a When fetching from a git repository, set SRCREV in the recipe to a floating revision instead of fixed + --srcbranch SRCBRANCH, -B SRCBRANCH + Branch in source repository if fetching from an SCM such as git (default master) + --binary, -b Treat the source tree as something that should be installed verbatim (no compilation, same directory structure). Useful with binary packages e.g. RPMs. + --also-native Also add native variant (i.e. support building recipe for the build host as well as the target machine) + --src-subdir SUBDIR Specify subdirectory within source tree to use + --mirrors Enable PREMIRRORS and MIRRORS for source tree fetching (disable by default). + --provides PROVIDES, -p PROVIDES + Specify an alias for the item provided by the recipe. E.g. virtual/libgl + +.. _devtool-the-workspace-layer-structure: + +The Workspace Layer Structure +============================= + +``devtool`` uses a "Workspace" layer in which to accomplish builds. This +layer is not specific to any single ``devtool`` command but is rather a +common working area used across the tool. + +The following figure shows the workspace structure: + +.. image:: figures/build-workspace-directory.png + :align: center + :scale: 70% + +:: + + attic - A directory created if devtool believes it must preserve + anything when you run "devtool reset". For example, if you + run "devtool add", make changes to the recipe, and then + run "devtool reset", devtool takes notice that the file has + been changed and moves it into the attic should you still + want the recipe. + + README - Provides information on what is in workspace layer and how to + manage it. + + .devtool_md5 - A checksum file used by devtool. + + appends - A directory that contains *.bbappend files, which point to + external source. + + conf - A configuration directory that contains the layer.conf file. + + recipes - A directory containing recipes. This directory contains a + folder for each directory added whose name matches that of the + added recipe. devtool places the recipe.bb file + within that sub-directory. + + sources - A directory containing a working copy of the source files used + when building the recipe. This is the default directory used + as the location of the source tree when you do not provide a + source tree path. This directory contains a folder for each + set of source files matched to a corresponding recipe. + +.. _devtool-adding-a-new-recipe-to-the-workspace: + +Adding a New Recipe to the Workspace Layer +========================================== + +Use the ``devtool add`` command to add a new recipe to the workspace +layer. The recipe you add should not exist - ``devtool`` creates it for +you. The source files the recipe uses should exist in an external area. + +The following example creates and adds a new recipe named ``jackson`` to +a workspace layer the tool creates. The source code built by the recipes +resides in ``/home/user/sources/jackson``: +:: + + $ devtool add jackson /home/user/sources/jackson + +If you add a recipe and the workspace layer does not exist, the command +creates the layer and populates it as described in "`The Workspace Layer +Structure <#devtool-the-workspace-layer-structure>`__" section. + +Running ``devtool add`` when the workspace layer exists causes the tool +to add the recipe, append files, and source files into the existing +workspace layer. The ``.bbappend`` file is created to point to the +external source tree. + +.. note:: + + If your recipe has runtime dependencies defined, you must be sure + that these packages exist on the target hardware before attempting to + run your application. If dependent packages (e.g. libraries) do not + exist on the target, your application, when run, will fail to find + those functions. For more information, see the + ":ref:`ref-manual/ref-devtool-reference:deploying your software on the target machine`" + section. + +By default, ``devtool add`` uses the latest revision (i.e. master) when +unpacking files from a remote URI. In some cases, you might want to +specify a source revision by branch, tag, or commit hash. You can +specify these options when using the ``devtool add`` command: + +- To specify a source branch, use the ``--srcbranch`` option: + :: + + $ devtool add --srcbranch DISTRO_NAME_NO_CAP jackson /home/user/sources/jackson + + In the previous example, you are checking out the DISTRO_NAME_NO_CAP + branch. + +- To specify a specific tag or commit hash, use the ``--srcrev`` + option: + :: + + $ devtool add --srcrev DISTRO_REL_TAG jackson /home/user/sources/jackson + $ devtool add --srcrev some_commit_hash /home/user/sources/jackson + + The previous examples check out the + DISTRO_REL_TAG tag and the commit associated with the + some_commit_hash hash. + +.. note:: + + If you prefer to use the latest revision every time the recipe is + built, use the options --autorev or -a. + +.. _devtool-extracting-the-source-for-an-existing-recipe: + +Extracting the Source for an Existing Recipe +============================================ + +Use the ``devtool extract`` command to extract the source for an +existing recipe. When you use this command, you must supply the root +name of the recipe (i.e. no version, paths, or extensions), and you must +supply the directory to which you want the source extracted. + +Additional command options let you control the name of a development +branch into which you can checkout the source and whether or not to keep +a temporary directory, which is useful for debugging. + +.. _devtool-synchronizing-a-recipes-extracted-source-tree: + +Synchronizing a Recipe's Extracted Source Tree +============================================== + +Use the ``devtool sync`` command to synchronize a previously extracted +source tree for an existing recipe. When you use this command, you must +supply the root name of the recipe (i.e. no version, paths, or +extensions), and you must supply the directory to which you want the +source extracted. + +Additional command options let you control the name of a development +branch into which you can checkout the source and whether or not to keep +a temporary directory, which is useful for debugging. + +.. _devtool-modifying-a-recipe: + +Modifying an Existing Recipe +============================ + +Use the ``devtool modify`` command to begin modifying the source of an +existing recipe. This command is very similar to the +```add`` <#devtool-adding-a-new-recipe-to-the-workspace>`__ command +except that it does not physically create the recipe in the workspace +layer because the recipe already exists in an another layer. + +The ``devtool modify`` command extracts the source for a recipe, sets it +up as a Git repository if the source had not already been fetched from +Git, checks out a branch for development, and applies any patches from +the recipe as commits on top. You can use the following command to +checkout the source files: +:: + + $ devtool modify recipe + +Using the above command form, ``devtool`` uses the existing recipe's +:term:`SRC_URI` statement to locate the upstream source, +extracts the source into the default sources location in the workspace. +The default development branch used is "devtool". + +.. _devtool-edit-an-existing-recipe: + +Edit an Existing Recipe +======================= + +Use the ``devtool edit-recipe`` command to run the default editor, which +is identified using the ``EDITOR`` variable, on the specified recipe. + +When you use the ``devtool edit-recipe`` command, you must supply the +root name of the recipe (i.e. no version, paths, or extensions). Also, +the recipe file itself must reside in the workspace as a result of the +``devtool add`` or ``devtool upgrade`` commands. However, you can +override that requirement by using the "-a" or "--any-recipe" option. +Using either of these options allows you to edit any recipe regardless +of its location. + +.. _devtool-updating-a-recipe: + +Updating a Recipe +================= + +Use the ``devtool update-recipe`` command to update your recipe with +patches that reflect changes you make to the source files. For example, +if you know you are going to work on some code, you could first use the +```devtool modify`` <#devtool-modifying-a-recipe>`__ command to extract +the code and set up the workspace. After which, you could modify, +compile, and test the code. + +When you are satisfied with the results and you have committed your +changes to the Git repository, you can then run the +``devtool update-recipe`` to create the patches and update the recipe: +:: + + $ devtool update-recipe recipe + +If you run the ``devtool update-recipe`` +without committing your changes, the command ignores the changes. + +Often, you might want to apply customizations made to your software in +your own layer rather than apply them to the original recipe. If so, you +can use the ``-a`` or ``--append`` option with the +``devtool update-recipe`` command. These options allow you to specify +the layer into which to write an append file: +:: + + $ devtool update-recipe recipe -a base-layer-directory + +The ``*.bbappend`` file is created at the +appropriate path within the specified layer directory, which may or may +not be in your ``bblayers.conf`` file. If an append file already exists, +the command updates it appropriately. + +.. _devtool-checking-on-the-upgrade-status-of-a-recipe: + +Checking on the Upgrade Status of a Recipe +========================================== + +Upstream recipes change over time. Consequently, you might find that you +need to determine if you can upgrade a recipe to a newer version. + +To check on the upgrade status of a recipe, use the +``devtool check-upgrade-status`` command. The command displays a table +of your current recipe versions, the latest upstream versions, the email +address of the recipe's maintainer, and any additional information such +as commit hash strings and reasons you might not be able to upgrade a +particular recipe. + +.. note:: + + - For the ``oe-core`` layer, recipe maintainers come from the + `maintainers.inc `_ + file. + + - If the recipe is using the :ref:`bitbake:git-fetcher` + rather than a + tarball, the commit hash points to the commit that matches the + recipe's latest version tag. + +As with all ``devtool`` commands, you can get help on the individual +command: +:: + + $ devtool check-upgrade-status -h + NOTE: Starting bitbake server... + usage: devtool check-upgrade-status [-h] [--all] [recipe [recipe ...]] + + Prints a table of recipes together with versions currently provided by recipes, and latest upstream versions, when there is a later version available + + arguments: + recipe Name of the recipe to report (omit to report upgrade info for all recipes) + + options: + -h, --help show this help message and exit + --all, -a Show all recipes, not just recipes needing upgrade + +Unless you provide a specific recipe name on the command line, the +command checks all recipes in all configured layers. + +Following is a partial example table that reports on all the recipes. +Notice the reported reason for not upgrading the ``base-passwd`` recipe. +In this example, while a new version is available upstream, you do not +want to use it because the dependency on ``cdebconf`` is not easily +satisfied. + +.. note:: + + When a reason for not upgrading displays, the reason is usually + written into the recipe using the RECIPE_NO_UPDATE_REASON + variable. See the base-passwd.bb recipe for an example. + +:: + + $ devtool check-upgrade-status ... + NOTE: acpid 2.0.30 2.0.31 Ross Burton + NOTE: u-boot-fw-utils 2018.11 2019.01 Marek Vasut d3689267f92c5956e09cc7d1baa4700141662bff + NOTE: u-boot-tools 2018.11 2019.01 Marek Vasut d3689267f92c5956e09cc7d1baa4700141662bff . . . + NOTE: base-passwd 3.5.29 3.5.45 Anuj Mittal cannot be updated due to: Version 3.5.38 requires cdebconf for update-passwd utility + NOTE: busybox 1.29.2 1.30.0 Andrej Valek + NOTE: dbus-test 1.12.10 1.12.12 Chen Qi + +.. _devtool-upgrading-a-recipe: + +Upgrading a Recipe +================== + +As software matures, upstream recipes are upgraded to newer versions. As +a developer, you need to keep your local recipes up-to-date with the +upstream version releases. Several methods exist by which you can +upgrade recipes. You can read about them in the ":ref:`gs-upgrading-recipes`" +section of the Yocto Project Development Tasks Manual. This section +overviews the ``devtool upgrade`` command. + +Before you upgrade a recipe, you can check on its upgrade status. See +the ":ref:`devtool-checking-on-the-upgrade-status-of-a-recipe`" section +for more information. + +The ``devtool upgrade`` command upgrades an existing recipe to a more +recent version of the recipe upstream. The command puts the upgraded +recipe file along with any associated files into a "workspace" and, if +necessary, extracts the source tree to a specified location. During the +upgrade, patches associated with the recipe are rebased or added as +needed. + +When you use the ``devtool upgrade`` command, you must supply the root +name of the recipe (i.e. no version, paths, or extensions), and you must +supply the directory to which you want the source extracted. Additional +command options let you control things such as the version number to +which you want to upgrade (i.e. the :term:`PV`), the source +revision to which you want to upgrade (i.e. the +:term:`SRCREV`), whether or not to apply patches, and so +forth. + +You can read more on the ``devtool upgrade`` workflow in the +":ref:`sdk-devtool-use-devtool-upgrade-to-create-a-version-of-the-recipe-that-supports-a-newer-version-of-the-software`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) manual. You can also see an example of +how to use ``devtool upgrade`` in the ":ref:`gs-using-devtool-upgrade`" +section in the Yocto Project Development Tasks Manual. + +.. _devtool-resetting-a-recipe: + +Resetting a Recipe +================== + +Use the ``devtool reset`` command to remove a recipe and its +configuration (e.g. the corresponding ``.bbappend`` file) from the +workspace layer. Realize that this command deletes the recipe and the +append file. The command does not physically move them for you. +Consequently, you must be sure to physically relocate your updated +recipe and the append file outside of the workspace layer before running +the ``devtool reset`` command. + +If the ``devtool reset`` command detects that the recipe or the append +files have been modified, the command preserves the modified files in a +separate "attic" subdirectory under the workspace layer. + +Here is an example that resets the workspace directory that contains the +``mtr`` recipe: +:: + + $ devtool reset mtr + NOTE: Cleaning sysroot for recipe mtr... + NOTE: Leaving source tree /home/scottrif/poky/build/workspace/sources/mtr as-is; if you no longer need it then please delete it manually + $ + +.. _devtool-building-your-recipe: + +Building Your Recipe +==================== + +Use the ``devtool build`` command to build your recipe. The +``devtool build`` command is equivalent to the +``bitbake -c populate_sysroot`` command. + +When you use the ``devtool build`` command, you must supply the root +name of the recipe (i.e. do not provide versions, paths, or extensions). +You can use either the "-s" or the "--disable-parallel-make" options to +disable parallel makes during the build. Here is an example: +:: + + $ devtool build recipe + +.. _devtool-building-your-image: + +Building Your Image +=================== + +Use the ``devtool build-image`` command to build an image, extending it +to include packages from recipes in the workspace. Using this command is +useful when you want an image that ready for immediate deployment onto a +device for testing. For proper integration into a final image, you need +to edit your custom image recipe appropriately. + +When you use the ``devtool build-image`` command, you must supply the +name of the image. This command has no command line options: +:: + + $ devtool build-image image + +.. _devtool-deploying-your-software-on-the-target-machine: + +Deploying Your Software on the Target Machine +============================================= + +Use the ``devtool deploy-target`` command to deploy the recipe's build +output to the live target machine: +:: + + $ devtool deploy-target recipe target + +The target is the address of the target machine, which must be running +an SSH server (i.e. ``user@hostname[:destdir]``). + +This command deploys all files installed during the +:ref:`ref-tasks-install` task. Furthermore, you do not +need to have package management enabled within the target machine. If +you do, the package manager is bypassed. + +.. note:: + + The ``deploy-target`` functionality is for development only. You + should never use it to update an image that will be used in + production. + +Some conditions exist that could prevent a deployed application from +behaving as expected. When both of the following conditions exist, your +application has the potential to not behave correctly when run on the +target: + +- You are deploying a new application to the target and the recipe you + used to build the application had correctly defined runtime + dependencies. + +- The target does not physically have the packages on which the + application depends installed. + +If both of these conditions exist, your application will not behave as +expected. The reason for this misbehavior is because the +``devtool deploy-target`` command does not deploy the packages (e.g. +libraries) on which your new application depends. The assumption is that +the packages are already on the target. Consequently, when a runtime +call is made in the application for a dependent function (e.g. a library +call), the function cannot be found. + +To be sure you have all the dependencies local to the target, you need +to be sure that the packages are pre-deployed (installed) on the target +before attempting to run your application. + +.. _devtool-removing-your-software-from-the-target-machine: + +Removing Your Software from the Target Machine +============================================== + +Use the ``devtool undeploy-target`` command to remove deployed build +output from the target machine. For the ``devtool undeploy-target`` +command to work, you must have previously used the +":ref:`devtool deploy-target `" +command. +:: + + $ devtool undeploy-target recipe target + +The target is the +address of the target machine, which must be running an SSH server (i.e. +``user@hostname``). + +.. _devtool-creating-the-workspace: + +Creating the Workspace Layer in an Alternative Location +======================================================= + +Use the ``devtool create-workspace`` command to create a new workspace +layer in your :term:`Build Directory`. When you create a +new workspace layer, it is populated with the ``README`` file and the +``conf`` directory only. + +The following example creates a new workspace layer in your current +working and by default names the workspace layer "workspace": +:: + + $ devtool create-workspace + +You can create a workspace layer anywhere by supplying a pathname with +the command. The following command creates a new workspace layer named +"new-workspace": +:: + + $ devtool create-workspace /home/scottrif/new-workspace + +.. _devtool-get-the-status-of-the-recipes-in-your-workspace: + +Get the Status of the Recipes in Your Workspace +=============================================== + +Use the ``devtool status`` command to list the recipes currently in your +workspace. Information includes the paths to their respective external +source trees. + +The ``devtool status`` command has no command-line options: +:: + + $ devtool status + +Following is sample output after using +:ref:`devtool add ` +to create and add the ``mtr_0.86.bb`` recipe to the ``workspace`` directory: +:: + + $ devtool status mtr + :/home/scottrif/poky/build/workspace/sources/mtr (/home/scottrif/poky/build/workspace/recipes/mtr/mtr_0.86.bb) + $ + +.. _devtool-search-for-available-target-recipes: + +Search for Available Target Recipes +=================================== + +Use the ``devtool search`` command to search for available target +recipes. The command matches the recipe name, package name, description, +and installed files. The command displays the recipe name as a result of +a match. + +When you use the ``devtool search`` command, you must supply a keyword. +The command uses the keyword when searching for a match. diff --git a/poky/documentation/ref-manual/ref-features.rst b/poky/documentation/ref-manual/ref-features.rst new file mode 100644 index 000000000..ae5a0e3b2 --- /dev/null +++ b/poky/documentation/ref-manual/ref-features.rst @@ -0,0 +1,353 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******** +Features +******** + +This chapter provides a reference of shipped machine and distro features +you can include as part of your image, a reference on image features you +can select, and a reference on feature backfilling. + +Features provide a mechanism for working out which packages should be +included in the generated images. Distributions can select which +features they want to support through the ``DISTRO_FEATURES`` variable, +which is set or appended to in a distribution's configuration file such +as ``poky.conf``, ``poky-tiny.conf``, ``poky-lsb.conf`` and so forth. +Machine features are set in the ``MACHINE_FEATURES`` variable, which is +set in the machine configuration file and specifies the hardware +features for a given machine. + +These two variables combine to work out which kernel modules, utilities, +and other packages to include. A given distribution can support a +selected subset of features so some machine features might not be +included if the distribution itself does not support them. + +One method you can use to determine which recipes are checking to see if +a particular feature is contained or not is to ``grep`` through the +:term:`Metadata` for the feature. Here is an example that +discovers the recipes whose build is potentially changed based on a +given feature: +:: + + $ cd poky + $ git grep 'contains.*MACHINE_FEATURES.*feature' + +.. _ref-features-machine: + +Machine Features +================ + +The items below are features you can use with +:term:`MACHINE_FEATURES`. Features do not have a +one-to-one correspondence to packages, and they can go beyond simply +controlling the installation of a package or packages. Sometimes a +feature can influence how certain recipes are built. For example, a +feature might determine whether a particular configure option is +specified within the :ref:`ref-tasks-configure` task +for a particular recipe. + +This feature list only represents features as shipped with the Yocto +Project metadata: + +- *acpi:* Hardware has ACPI (x86/x86_64 only) + +- *alsa:* Hardware has ALSA audio drivers + +- *apm:* Hardware uses APM (or APM emulation) + +- *bluetooth:* Hardware has integrated BT + +- *efi:* Support for booting through EFI + +- *ext2:* Hardware HDD or Microdrive + +- *keyboard:* Hardware has a keyboard + +- *pcbios:* Support for booting through BIOS + +- *pci:* Hardware has a PCI bus + +- *pcmcia:* Hardware has PCMCIA or CompactFlash sockets + +- *phone:* Mobile phone (voice) support + +- *qvga:* Machine has a QVGA (320x240) display + +- *rtc:* Machine has a Real-Time Clock + +- *screen:* Hardware has a screen + +- *serial:* Hardware has serial support (usually RS232) + +- *touchscreen:* Hardware has a touchscreen + +- *usbgadget:* Hardware is USB gadget device capable + +- *usbhost:* Hardware is USB Host capable + +- *vfat:* FAT file system support + +- *wifi:* Hardware has integrated WiFi + +.. _ref-features-distro: + +Distro Features +=============== + +The items below are features you can use with +:term:`DISTRO_FEATURES` to enable features across +your distribution. Features do not have a one-to-one correspondence to +packages, and they can go beyond simply controlling the installation of +a package or packages. In most cases, the presence or absence of a +feature translates to the appropriate option supplied to the configure +script during the :ref:`ref-tasks-configure` task for +the recipes that optionally support the feature. + +Some distro features are also machine features. These select features +make sense to be controlled both at the machine and distribution +configuration level. See the +:term:`COMBINED_FEATURES` variable for more +information. + +This list only represents features as shipped with the Yocto Project +metadata: + +- *alsa:* Include ALSA support (OSS compatibility kernel modules + installed if available). + +- *api-documentation:* Enables generation of API documentation during + recipe builds. The resulting documentation is added to SDK tarballs + when the ``bitbake -c populate_sdk`` command is used. See the + ":ref:`sdk-manual/sdk-appendix-customizing-standard:adding api documentation to the standard sdk`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + +- *bluetooth:* Include bluetooth support (integrated BT only). + +- *cramfs:* Include CramFS support. + +- *directfb:* Include DirectFB support. + +- *ext2:* Include tools for supporting for devices with internal + HDD/Microdrive for storing files (instead of Flash only devices). + +- *ipsec:* Include IPSec support. + +- *ipv6:* Include IPv6 support. + +- *keyboard:* Include keyboard support (e.g. keymaps will be loaded + during boot). + +- *ldconfig:* Include support for ldconfig and ``ld.so.conf`` on the + target. + +- *nfs:* Include NFS client support (for mounting NFS exports on + device). + +- *opengl:* Include the Open Graphics Library, which is a + cross-language, multi-platform application programming interface used + for rendering two and three-dimensional graphics. + +- *pci:* Include PCI bus support. + +- *pcmcia:* Include PCMCIA/CompactFlash support. + +- *ppp:* Include PPP dialup support. + +- *ptest:* Enables building the package tests where supported by + individual recipes. For more information on package tests, see the + ":ref:`dev-manual/dev-manual-common-tasks:testing packages with ptest`" section + in the Yocto Project Development Tasks Manual. + +- *smbfs:* Include SMB networks client support (for mounting + Samba/Microsoft Windows shares on device). + +- *systemd:* Include support for this ``init`` manager, which is a full + replacement of for ``init`` with parallel starting of services, + reduced shell overhead, and other features. This ``init`` manager is + used by many distributions. + +- *usbgadget:* Include USB Gadget Device support (for USB + networking/serial/storage). + +- *usbhost:* Include USB Host support (allows to connect external + keyboard, mouse, storage, network etc). + +- *usrmerge:* Merges the ``/bin``, ``/sbin``, ``/lib``, and ``/lib64`` + directories into their respective counterparts in the ``/usr`` + directory to provide better package and application compatibility. + +- *wayland:* Include the Wayland display server protocol and the + library that supports it. + +- *wifi:* Include WiFi support (integrated only). + +- *x11:* Include the X server and libraries. + +.. _ref-features-image: + +Image Features +============== + +The contents of images generated by the OpenEmbedded build system can be +controlled by the :term:`IMAGE_FEATURES` and +:term:`EXTRA_IMAGE_FEATURES` variables that +you typically configure in your image recipes. Through these variables, +you can add several different predefined packages such as development +utilities or packages with debug information needed to investigate +application problems or profile applications. + +The following image features are available for all images: + +- *allow-empty-password:* Allows Dropbear and OpenSSH to accept root + logins and logins from accounts having an empty password string. + +- *dbg-pkgs:* Installs debug symbol packages for all packages installed + in a given image. + +- *debug-tweaks:* Makes an image suitable for development (e.g. allows + root logins without passwords and enables post-installation logging). + See the 'allow-empty-password', 'empty-root-password', and + 'post-install-logging' features in this list for additional + information. + +- *dev-pkgs:* Installs development packages (headers and extra library + links) for all packages installed in a given image. + +- *doc-pkgs:* Installs documentation packages for all packages + installed in a given image. + +- *empty-root-password:* Sets the root password to an empty string, + which allows logins with a blank password. + +- *package-management:* Installs package management tools and preserves + the package manager database. + +- *post-install-logging:* Enables logging postinstall script runs to + the ``/var/log/postinstall.log`` file on first boot of the image on + the target system. + + .. note:: + + To make the + /var/log + directory on the target persistent, use the + VOLATILE_LOG_DIR + variable by setting it to "no". + +- *ptest-pkgs:* Installs ptest packages for all ptest-enabled recipes. + +- *read-only-rootfs:* Creates an image whose root filesystem is + read-only. See the + ":ref:`dev-manual/dev-manual-common-tasks:creating a read-only root filesystem`" + section in the Yocto Project Development Tasks Manual for more + information. + +- *splash:* Enables showing a splash screen during boot. By default, + this screen is provided by ``psplash``, which does allow + customization. If you prefer to use an alternative splash screen + package, you can do so by setting the ``SPLASH`` variable to a + different package name (or names) within the image recipe or at the + distro configuration level. + +- *staticdev-pkgs:* Installs static development packages, which are + static libraries (i.e. ``*.a`` files), for all packages installed in + a given image. + +Some image features are available only when you inherit the +:ref:`core-image ` class. The current list of +these valid features is as follows: + +- *hwcodecs:* Installs hardware acceleration codecs. + +- *nfs-server:* Installs an NFS server. + +- *perf:* Installs profiling tools such as ``perf``, ``systemtap``, and + ``LTTng``. For general information on user-space tools, see the + :doc:`../sdk-manual/sdk-manual` manual. + +- *ssh-server-dropbear:* Installs the Dropbear minimal SSH server. + +- *ssh-server-openssh:* Installs the OpenSSH SSH server, which is more + full-featured than Dropbear. Note that if both the OpenSSH SSH server + and the Dropbear minimal SSH server are present in + ``IMAGE_FEATURES``, then OpenSSH will take precedence and Dropbear + will not be installed. + +- *tools-debug:* Installs debugging tools such as ``strace`` and + ``gdb``. For information on GDB, see the + ":ref:`platdev-gdb-remotedebug`" section + in the Yocto Project Development Tasks Manual. For information on + tracing and profiling, see the :doc:`../profile-manual/profile-manual`. + +- *tools-sdk:* Installs a full SDK that runs on the device. + +- *tools-testapps:* Installs device testing tools (e.g. touchscreen + debugging). + +- *x11:* Installs the X server. + +- *x11-base:* Installs the X server with a minimal environment. + +- *x11-sato:* Installs the OpenedHand Sato environment. + +.. _ref-features-backfill: + +Feature Backfilling +=================== + +Sometimes it is necessary in the OpenEmbedded build system to extend +:term:`MACHINE_FEATURES` or +:term:`DISTRO_FEATURES` to control functionality +that was previously enabled and not able to be disabled. For these +cases, we need to add an additional feature item to appear in one of +these variables, but we do not want to force developers who have +existing values of the variables in their configuration to add the new +feature in order to retain the same overall level of functionality. +Thus, the OpenEmbedded build system has a mechanism to automatically +"backfill" these added features into existing distro or machine +configurations. You can see the list of features for which this is done +by finding the +:term:`DISTRO_FEATURES_BACKFILL` and +:term:`MACHINE_FEATURES_BACKFILL` +variables in the ``meta/conf/bitbake.conf`` file. + +Because such features are backfilled by default into all configurations +as described in the previous paragraph, developers who wish to disable +the new features need to be able to selectively prevent the backfilling +from occurring. They can do this by adding the undesired feature or +features to the +:term:`DISTRO_FEATURES_BACKFILL_CONSIDERED` +or +:term:`MACHINE_FEATURES_BACKFILL_CONSIDERED` +variables for distro features and machine features respectively. + +Here are two examples to help illustrate feature backfilling: + +- *The "pulseaudio" distro feature option*: Previously, PulseAudio + support was enabled within the Qt and GStreamer frameworks. Because + of this, the feature is backfilled and thus enabled for all distros + through the ``DISTRO_FEATURES_BACKFILL`` variable in the + ``meta/conf/bitbake.conf`` file. However, your distro needs to + disable the feature. You can disable the feature without affecting + other existing distro configurations that need PulseAudio support by + adding "pulseaudio" to ``DISTRO_FEATURES_BACKFILL_CONSIDERED`` in + your distro's ``.conf`` file. Adding the feature to this variable + when it also exists in the ``DISTRO_FEATURES_BACKFILL`` variable + prevents the build system from adding the feature to your + configuration's ``DISTRO_FEATURES``, effectively disabling the + feature for that particular distro. + +- *The "rtc" machine feature option*: Previously, real time clock (RTC) + support was enabled for all target devices. Because of this, the + feature is backfilled and thus enabled for all machines through the + ``MACHINE_FEATURES_BACKFILL`` variable in the + ``meta/conf/bitbake.conf`` file. However, your target device does not + have this capability. You can disable RTC support for your device + without affecting other machines that need RTC support by adding the + feature to your machine's ``MACHINE_FEATURES_BACKFILL_CONSIDERED`` + list in the machine's ``.conf`` file. Adding the feature to this + variable when it also exists in the ``MACHINE_FEATURES_BACKFILL`` + variable prevents the build system from adding the feature to your + configuration's ``MACHINE_FEATURES``, effectively disabling RTC + support for that particular machine. diff --git a/poky/documentation/ref-manual/ref-images.rst b/poky/documentation/ref-manual/ref-images.rst new file mode 100644 index 000000000..f0229c3bb --- /dev/null +++ b/poky/documentation/ref-manual/ref-images.rst @@ -0,0 +1,139 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****** +Images +****** + +The OpenEmbedded build system provides several example images to satisfy +different needs. When you issue the ``bitbake`` command you provide a +"top-level" recipe that essentially begins the build for the type of +image you want. + +.. note:: + + Building an image without GNU General Public License Version 3 + (GPLv3), GNU Lesser General Public License Version 3 (LGPLv3), and + the GNU Affero General Public License Version 3 (AGPL-3.0) components + is only supported for minimal and base images. Furthermore, if you + are going to build an image using non-GPLv3 and similarly licensed + components, you must make the following changes in the + local.conf + file before using the BitBake command to build the minimal or base + image: + :: + + 1. Comment out the EXTRA_IMAGE_FEATURES line + 2. Set INCOMPATIBLE_LICENSE = "GPL-3.0 LGPL-3.0 AGPL-3.0" + + +From within the ``poky`` Git repository, you can use the following +command to display the list of directories within the :term:`Source Directory` +that contain image recipe files: :: + + $ ls meta*/recipes*/images/*.bb + +Following is a list of supported recipes: + +- ``build-appliance-image``: An example virtual machine that contains + all the pieces required to run builds using the build system as well + as the build system itself. You can boot and run the image using + either the `VMware + Player `__ or + `VMware + Workstation `__. + For more information on this image, see the :yocto_home:`Build + Appliance ` page + on the Yocto Project website. + +- ``core-image-base``: A console-only image that fully supports the + target device hardware. + +- ``core-image-clutter``: An image with support for the Open GL-based + toolkit Clutter, which enables development of rich and animated + graphical user interfaces. + +- ``core-image-full-cmdline``: A console-only image with more + full-featured Linux system functionality installed. + +- ``core-image-lsb``: An image that conforms to the Linux Standard Base + (LSB) specification. This image requires a distribution configuration + that enables LSB compliance (e.g. ``poky-lsb``). If you build + ``core-image-lsb`` without that configuration, the image will not be + LSB-compliant. + +- ``core-image-lsb-dev``: A ``core-image-lsb`` image that is suitable + for development work using the host. The image includes headers and + libraries you can use in a host development environment. This image + requires a distribution configuration that enables LSB compliance + (e.g. ``poky-lsb``). If you build ``core-image-lsb-dev`` without that + configuration, the image will not be LSB-compliant. + +- ``core-image-lsb-sdk``: A ``core-image-lsb`` that includes everything + in the cross-toolchain but also includes development headers and + libraries to form a complete standalone SDK. This image requires a + distribution configuration that enables LSB compliance (e.g. + ``poky-lsb``). If you build ``core-image-lsb-sdk`` without that + configuration, the image will not be LSB-compliant. This image is + suitable for development using the target. + +- ``core-image-minimal``: A small image just capable of allowing a + device to boot. + +- ``core-image-minimal-dev``: A ``core-image-minimal`` image suitable + for development work using the host. The image includes headers and + libraries you can use in a host development environment. + +- ``core-image-minimal-initramfs``: A ``core-image-minimal`` image that + has the Minimal RAM-based Initial Root Filesystem (initramfs) as part + of the kernel, which allows the system to find the first "init" + program more efficiently. See the + :term:`PACKAGE_INSTALL` variable for + additional information helpful when working with initramfs images. + +- ``core-image-minimal-mtdutils``: A ``core-image-minimal`` image that + has support for the Minimal MTD Utilities, which let the user + interact with the MTD subsystem in the kernel to perform operations + on flash devices. + +- ``core-image-rt``: A ``core-image-minimal`` image plus a real-time + test suite and tools appropriate for real-time use. + +- ``core-image-rt-sdk``: A ``core-image-rt`` image that includes + everything in the cross-toolchain. The image also includes + development headers and libraries to form a complete stand-alone SDK + and is suitable for development using the target. + +- ``core-image-sato``: An image with Sato support, a mobile environment + and visual style that works well with mobile devices. The image + supports X11 with a Sato theme and applications such as a terminal, + editor, file manager, media player, and so forth. + +- ``core-image-sato-dev``: A ``core-image-sato`` image suitable for + development using the host. The image includes libraries needed to + build applications on the device itself, testing and profiling tools, + and debug symbols. This image was formerly ``core-image-sdk``. + +- ``core-image-sato-sdk``: A ``core-image-sato`` image that includes + everything in the cross-toolchain. The image also includes + development headers and libraries to form a complete standalone SDK + and is suitable for development using the target. + +- ``core-image-testmaster``: A "master" image designed to be used for + automated runtime testing. Provides a "known good" image that is + deployed to a separate partition so that you can boot into it and use + it to deploy a second image to be tested. You can find more + information about runtime testing in the + ":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" + section in the Yocto Project Development Tasks Manual. + +- ``core-image-testmaster-initramfs``: A RAM-based Initial Root + Filesystem (initramfs) image tailored for use with the + ``core-image-testmaster`` image. + +- ``core-image-weston``: A very basic Wayland image with a terminal. + This image provides the Wayland protocol libraries and the reference + Weston compositor. For more information, see the + ":ref:`dev-manual/dev-manual-common-tasks:using wayland and weston`" + section in the Yocto Project Development Tasks Manual. + +- ``core-image-x11``: A very basic X11 image with a terminal. diff --git a/poky/documentation/ref-manual/ref-images.xml b/poky/documentation/ref-manual/ref-images.xml index aaeda5522..6f10a6fd2 100644 --- a/poky/documentation/ref-manual/ref-images.xml +++ b/poky/documentation/ref-manual/ref-images.xml @@ -9,7 +9,7 @@ The OpenEmbedded build system provides several example images to satisfy different needs. - When you issue the bitbake command you provide a “top-level” recipe + When you issue the bitbake command you provide a "top-level" recipe that essentially begins the build for the type of image you want. @@ -100,7 +100,7 @@ core-image-minimal-initramfs: A core-image-minimal image that has the Minimal RAM-based Initial Root Filesystem (initramfs) as part of the kernel, - which allows the system to find the first “init” program more efficiently. + which allows the system to find the first "init" program more efficiently. See the PACKAGE_INSTALL variable for additional information helpful when working with diff --git a/poky/documentation/ref-manual/ref-kickstart.rst b/poky/documentation/ref-manual/ref-kickstart.rst new file mode 100644 index 000000000..45222de05 --- /dev/null +++ b/poky/documentation/ref-manual/ref-kickstart.rst @@ -0,0 +1,212 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************************************* +OpenEmbedded Kickstart (``.wks``) Reference +******************************************* + +.. _openembedded-kickstart-wks-reference: + +Introduction +============ + +The current Wic implementation supports only the basic kickstart +partitioning commands: ``partition`` (or ``part`` for short) and +``bootloader``. + +.. note:: + + Future updates will implement more commands and options. If you use + anything that is not specifically supported, results can be + unpredictable. + +This chapter provides a reference on the available kickstart commands. +The information lists the commands, their syntax, and meanings. +Kickstart commands are based on the Fedora kickstart versions but with +modifications to reflect Wic capabilities. You can see the original +documentation for those commands at the following link: +http://pykickstart.readthedocs.io/en/latest/kickstart-docs.html + +Command: part or partition +========================== + +Either of these commands creates a partition on the system and uses the +following syntax: +:: + + part [mntpoint] + partition [mntpoint] + +If you do not +provide mntpoint, Wic creates a partition but does not mount it. + +The ``mntpoint`` is where the partition is mounted and must be in one of +the following forms: + +- ``/path``: For example, "/", "/usr", or "/home" + +- ``swap``: The created partition is used as swap space + +Specifying a mntpoint causes the partition to automatically be mounted. +Wic achieves this by adding entries to the filesystem table (fstab) +during image generation. In order for Wic to generate a valid fstab, you +must also provide one of the ``--ondrive``, ``--ondisk``, or +``--use-uuid`` partition options as part of the command. + +.. note:: + + The mount program must understand the PARTUUID syntax you use with + --use-uuid + and non-root + mountpoint + , including swap. The busybox versions of these application are + currently excluded. + +Here is an example that uses "/" as the mountpoint. The command uses +``--ondisk`` to force the partition onto the ``sdb`` disk: part / +--source rootfs --ondisk sdb --fstype=ext3 --label platform --align 1024 + +Here is a list that describes other supported options you can use with +the ``part`` and ``partition`` commands: + +- ``--size``: The minimum partition size in MBytes. Specify an + integer value such as 500. Do not append the number with "MB". You do + not need this option if you use ``--source``. + +- ``--fixed-size``: The exact partition size in MBytes. You cannot + specify with ``--size``. An error occurs when assembling the disk + image if the partition data is larger than ``--fixed-size``. + +- ``--source``: This option is a Wic-specific option that names the + source of the data that populates the partition. The most common + value for this option is "rootfs", but you can use any value that + maps to a valid source plugin. For information on the source plugins, + see the ":ref:`dev-manual/dev-manual-common-tasks:using the wic plugin interface`" + section in the Yocto Project Development Tasks Manual. + + If you use ``--source rootfs``, Wic creates a partition as large as + needed and fills it with the contents of the root filesystem pointed + to by the ``-r`` command-line option or the equivalent rootfs derived + from the ``-e`` command-line option. The filesystem type used to + create the partition is driven by the value of the ``--fstype`` + option specified for the partition. See the entry on ``--fstype`` + that follows for more information. + + If you use ``--source plugin-name``, Wic creates a partition as large + as needed and fills it with the contents of the partition that is + generated by the specified plugin name using the data pointed to by + the ``-r`` command-line option or the equivalent rootfs derived from + the ``-e`` command-line option. Exactly what those contents are and + filesystem type used are dependent on the given plugin + implementation. + + If you do not use the ``--source`` option, the ``wic`` command + creates an empty partition. Consequently, you must use the ``--size`` + option to specify the size of the empty partition. + +- ``--ondisk`` or ``--ondrive``: Forces the partition to be created + on a particular disk. + +- ``--fstype``: Sets the file system type for the partition. Valid + values are: + + - ``ext4`` + + - ``ext3`` + + - ``ext2`` + + - ``btrfs`` + + - ``squashfs`` + + - ``swap`` + +- ``--fsoptions``: Specifies a free-form string of options to be used + when mounting the filesystem. This string is copied into the + ``/etc/fstab`` file of the installed system and should be enclosed in + quotes. If not specified, the default string is "defaults". + +- ``--label label``: Specifies the label to give to the filesystem to + be made on the partition. If the given label is already in use by + another filesystem, a new label is created for the partition. + +- ``--active``: Marks the partition as active. + +- ``--align (in KBytes)``: This option is a Wic-specific option that + says to start partitions on boundaries given x KBytes. + +- ``--no-table``: This option is a Wic-specific option. Using the + option reserves space for the partition and causes it to become + populated. However, the partition is not added to the partition + table. + +- ``--exclude-path``: This option is a Wic-specific option that + excludes the given relative path from the resulting image. This + option is only effective with the rootfs source plugin. + +- ``--extra-space``: This option is a Wic-specific option that adds + extra space after the space filled by the content of the partition. + The final size can exceed the size specified by the ``--size`` + option. The default value is 10 Mbytes. + +- ``--overhead-factor``: This option is a Wic-specific option that + multiplies the size of the partition by the option's value. You must + supply a value greater than or equal to "1". The default value is + "1.3". + +- ``--part-name``: This option is a Wic-specific option that + specifies a name for GPT partitions. + +- ``--part-type``: This option is a Wic-specific option that + specifies the partition type globally unique identifier (GUID) for + GPT partitions. You can find the list of partition type GUIDs at + http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs. + +- ``--use-uuid``: This option is a Wic-specific option that causes + Wic to generate a random GUID for the partition. The generated + identifier is used in the bootloader configuration to specify the + root partition. + +- ``--uuid``: This option is a Wic-specific option that specifies the + partition UUID. + +- ``--fsuuid``: This option is a Wic-specific option that specifies + the filesystem UUID. You can generate or modify + :term:`WKS_FILE` with this option if a preconfigured + filesystem UUID is added to the kernel command line in the bootloader + configuration before you run Wic. + +- ``--system-id``: This option is a Wic-specific option that + specifies the partition system ID, which is a one byte long, + hexadecimal parameter with or without the 0x prefix. + +- ``--mkfs-extraopts``: This option specifies additional options to + pass to the ``mkfs`` utility. Some default options for certain + filesystems do not take effect. See Wic's help on kickstart (i.e. + ``wic help kickstart``). + +Command: bootloader +=================== + +This command specifies how the bootloader should be configured and +supports the following options: + +.. note:: + + Bootloader functionality and boot partitions are implemented by the + various + --source + plugins that implement bootloader functionality. The bootloader + command essentially provides a means of modifying bootloader + configuration. + +- ``--timeout``: Specifies the number of seconds before the + bootloader times out and boots the default option. + +- ``--append``: Specifies kernel parameters. These parameters will be + added to the syslinux ``APPEND`` or ``grub`` kernel command line. + +- ``--configfile``: Specifies a user-defined configuration file for + the bootloader. You can provide a full pathname for the file or a + file that exists in the ``canned-wks`` folder. This option overrides + all other bootloader options. diff --git a/poky/documentation/ref-manual/ref-manual.rst b/poky/documentation/ref-manual/ref-manual.rst new file mode 100644 index 000000000..a106af21d --- /dev/null +++ b/poky/documentation/ref-manual/ref-manual.rst @@ -0,0 +1,31 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +============================== +Yocto Project Reference Manual +============================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + ref-system-requirements + ref-terms + ref-release-process + migration + ref-structure + ref-classes + ref-tasks + ref-devtool-reference + ref-kickstart + ref-qa-checks + ref-images + ref-features + ref-variables + ref-varlocality + faq + resources + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/ref-manual/ref-qa-checks.rst b/poky/documentation/ref-manual/ref-qa-checks.rst new file mode 100644 index 000000000..3e76ac150 --- /dev/null +++ b/poky/documentation/ref-manual/ref-qa-checks.rst @@ -0,0 +1,533 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************** +QA Error and Warning Messages +***************************** + +.. _qa-introduction: + +Introduction +============ + +When building a recipe, the OpenEmbedded build system performs various +QA checks on the output to ensure that common issues are detected and +reported. Sometimes when you create a new recipe to build new software, +it will build with no problems. When this is not the case, or when you +have QA issues building any software, it could take a little time to +resolve them. + +While it is tempting to ignore a QA message or even to disable QA +checks, it is best to try and resolve any reported QA issues. This +chapter provides a list of the QA messages and brief explanations of the +issues you could encounter so that you can properly resolve problems. + +The next section provides a list of all QA error and warning messages +based on a default configuration. Each entry provides the message or +error form along with an explanation. + +.. note:: + + - At the end of each message, the name of the associated QA test (as + listed in the ":ref:`insane.bbclass `" + section) appears within square brackets. + + - As mentioned, this list of error and warning messages is for QA + checks only. The list does not cover all possible build errors or + warnings you could encounter. + + - Because some QA checks are disabled by default, this list does not + include all possible QA check errors and warnings. + +.. _qa-errors-and-warnings: + +Errors and Warnings +=================== + +- ``: is using libexec please relocate to [libexec]`` + + The specified package contains files in ``/usr/libexec`` when the + distro configuration uses a different path for ```` By + default, ```` is ``$prefix/libexec``. However, this + default can be changed (e.g. ``${libdir}``). + +   + +- ``package contains bad RPATH in file [rpaths]`` + + The specified binary produced by the recipe contains dynamic library + load paths (rpaths) that contain build system paths such as + :term:`TMPDIR`, which are incorrect for the target and + could potentially be a security issue. Check for bad ``-rpath`` + options being passed to the linker in your + :ref:`ref-tasks-compile` log. Depending on the build + system used by the software being built, there might be a configure + option to disable rpath usage completely within the build of the + software. + +   + +- ``: contains probably-redundant RPATH [useless-rpaths]`` + + The specified binary produced by the recipe contains dynamic library + load paths (rpaths) that on a standard system are searched by default + by the linker (e.g. ``/lib`` and ``/usr/lib``). While these paths + will not cause any breakage, they do waste space and are unnecessary. + Depending on the build system used by the software being built, there + might be a configure option to disable rpath usage completely within + the build of the software. + +   + +- `` requires , but no providers in its RDEPENDS [file-rdeps]`` + + A file-level dependency has been identified from the specified + package on the specified files, but there is no explicit + corresponding entry in :term:`RDEPENDS`. If + particular files are required at runtime then ``RDEPENDS`` should be + declared in the recipe to ensure the packages providing them are + built. + +   + +- `` rdepends on , but it isn't a build dependency? [build-deps]`` + + A runtime dependency exists between the two specified packages, but + there is nothing explicit within the recipe to enable the + OpenEmbedded build system to ensure that dependency is satisfied. + This condition is usually triggered by an + :term:`RDEPENDS` value being added at the packaging + stage rather than up front, which is usually automatic based on the + contents of the package. In most cases, you should change the recipe + to add an explicit ``RDEPENDS`` for the dependency. + +   + +- ``non -dev/-dbg/nativesdk- package contains symlink .so: path '' [dev-so]`` + + Symlink ``.so`` files are for development only, and should therefore + go into the ``-dev`` package. This situation might occur if you add + ``*.so*`` rather than ``*.so.*`` to a non-dev package. Change + :term:`FILES` (and possibly + :term:`PACKAGES`) such that the specified ``.so`` + file goes into an appropriate ``-dev`` package. + +   + +- ``non -staticdev package contains static .a library: path '' [staticdev]`` + + Static ``.a`` library files should go into a ``-staticdev`` package. + Change :term:`FILES` (and possibly + :term:`PACKAGES`) such that the specified ``.a`` file + goes into an appropriate ``-staticdev`` package. + +   + +- ``: found library in wrong location [libdir]`` + + The specified file may have been installed into an incorrect + (possibly hardcoded) installation path. For example, this test will + catch recipes that install ``/lib/bar.so`` when ``${base_libdir}`` is + "lib32". Another example is when recipes install + ``/usr/lib64/foo.so`` when ``${libdir}`` is "/usr/lib". False + positives occasionally exist. For these cases add "libdir" to + :term:`INSANE_SKIP` for the package. + +   + +- ``non debug package contains .debug directory: path [debug-files]`` + + The specified package contains a ``.debug`` directory, which should + not appear in anything but the ``-dbg`` package. This situation might + occur if you add a path which contains a ``.debug`` directory and do + not explicitly add the ``.debug`` directory to the ``-dbg`` package. + If this is the case, add the ``.debug`` directory explicitly to + ``FILES_${PN}-dbg``. See :term:`FILES` for additional + information on ``FILES``. + +   + +- ``Architecture did not match ( to ) on [arch]`` + + By default, the OpenEmbedded build system checks the Executable and + Linkable Format (ELF) type, bit size, and endianness of any binaries + to ensure they match the target architecture. This test fails if any + binaries do not match the type since there would be an + incompatibility. The test could indicate that the wrong compiler or + compiler options have been used. Sometimes software, like + bootloaders, might need to bypass this check. If the file you receive + the error for is firmware that is not intended to be executed within + the target operating system or is intended to run on a separate + processor within the device, you can add "arch" to + :term:`INSANE_SKIP` for the package. Another + option is to check the :ref:`ref-tasks-compile` log + and verify that the compiler options being used are correct. + +   + +- ``Bit size did not match ( to ) on [arch]`` + + By default, the OpenEmbedded build system checks the Executable and + Linkable Format (ELF) type, bit size, and endianness of any binaries + to ensure they match the target architecture. This test fails if any + binaries do not match the type since there would be an + incompatibility. The test could indicate that the wrong compiler or + compiler options have been used. Sometimes software, like + bootloaders, might need to bypass this check. If the file you receive + the error for is firmware that is not intended to be executed within + the target operating system or is intended to run on a separate + processor within the device, you can add "arch" to + :term:`INSANE_SKIP` for the package. Another + option is to check the :ref:`ref-tasks-compile` log + and verify that the compiler options being used are correct. + +   + +- ``Endianness did not match ( to ) on [arch]`` + + By default, the OpenEmbedded build system checks the Executable and + Linkable Format (ELF) type, bit size, and endianness of any binaries + to ensure they match the target architecture. This test fails if any + binaries do not match the type since there would be an + incompatibility. The test could indicate that the wrong compiler or + compiler options have been used. Sometimes software, like + bootloaders, might need to bypass this check. If the file you receive + the error for is firmware that is not intended to be executed within + the target operating system or is intended to run on a separate + processor within the device, you can add "arch" to + :term:`INSANE_SKIP` for the package. Another + option is to check the :ref:`ref-tasks-compile` log + and verify that the compiler options being used are correct. + +   + +- ``ELF binary '' has relocations in .text [textrel]`` + + The specified ELF binary contains relocations in its ``.text`` + sections. This situation can result in a performance impact at + runtime. + + Typically, the way to solve this performance issue is to add "-fPIC" + or "-fpic" to the compiler command-line options. For example, given + software that reads :term:`CFLAGS` when you build it, + you could add the following to your recipe: + :: + + CFLAGS_append = " -fPIC " + + For more information on text relocations at runtime, see + http://www.akkadia.org/drepper/textrelocs.html. + +   + +- ``No GNU_HASH in the elf binary: '' [ldflags]`` + + This indicates that binaries produced when building the recipe have + not been linked with the :term:`LDFLAGS` options + provided by the build system. Check to be sure that the ``LDFLAGS`` + variable is being passed to the linker command. A common workaround + for this situation is to pass in ``LDFLAGS`` using + :term:`TARGET_CC_ARCH` within the recipe as + follows: + :: + + TARGET_CC_ARCH += "${LDFLAGS}" + +   + +- ``Package contains Xorg driver () but no xorg-abi- dependencies [xorg-driver-abi]`` + + The specified package contains an Xorg driver, but does not have a + corresponding ABI package dependency. The xserver-xorg recipe + provides driver ABI names. All drivers should depend on the ABI + versions that they have been built against. Driver recipes that + include ``xorg-driver-input.inc`` or ``xorg-driver-video.inc`` will + automatically get these versions. Consequently, you should only need + to explicitly add dependencies to binary driver recipes. + +   + +- ``The /usr/share/info/dir file is not meant to be shipped in a particular package. [infodir]`` + + The ``/usr/share/info/dir`` should not be packaged. Add the following + line to your :ref:`ref-tasks-install` task or to your + ``do_install_append`` within the recipe as follows: + :: + + rm ${D}${infodir}/dir +   + +- ``Symlink in points to TMPDIR [symlink-to-sysroot]`` + + The specified symlink points into :term:`TMPDIR` on the + host. Such symlinks will work on the host. However, they are clearly + invalid when running on the target. You should either correct the + symlink to use a relative path or remove the symlink. + +   + +- `` failed sanity test (workdir) in path [la]`` + + The specified ``.la`` file contains :term:`TMPDIR` + paths. Any ``.la`` file containing these paths is incorrect since + ``libtool`` adds the correct sysroot prefix when using the files + automatically itself. + +   + +- `` failed sanity test (tmpdir) in path [pkgconfig]`` + + The specified ``.pc`` file contains + :term:`TMPDIR`\ ``/``\ :term:`WORKDIR` + paths. Any ``.pc`` file containing these paths is incorrect since + ``pkg-config`` itself adds the correct sysroot prefix when the files + are accessed. + +   + +- `` rdepends on [debug-deps]`` + + A dependency exists between the specified non-dbg package (i.e. a + package whose name does not end in ``-dbg``) and a package that is a + ``dbg`` package. The ``dbg`` packages contain debug symbols and are + brought in using several different methods: + + - Using the ``dbg-pkgs`` + :term:`IMAGE_FEATURES` value. + + - Using :term:`IMAGE_INSTALL`. + + - As a dependency of another ``dbg`` package that was brought in + using one of the above methods. + + The dependency might have been automatically added because the + ``dbg`` package erroneously contains files that it should not contain + (e.g. a non-symlink ``.so`` file) or it might have been added + manually (e.g. by adding to :term:`RDEPENDS`). + +   + +- `` rdepends on [dev-deps]`` + + A dependency exists between the specified non-dev package (a package + whose name does not end in ``-dev``) and a package that is a ``dev`` + package. The ``dev`` packages contain development headers and are + usually brought in using several different methods: + + - Using the ``dev-pkgs`` + :term:`IMAGE_FEATURES` value. + + - Using :term:`IMAGE_INSTALL`. + + - As a dependency of another ``dev`` package that was brought in + using one of the above methods. + + The dependency might have been automatically added (because the + ``dev`` package erroneously contains files that it should not have + (e.g. a non-symlink ``.so`` file) or it might have been added + manually (e.g. by adding to :term:`RDEPENDS`). + +   + +- ``_ is invalid: () only comparisons <, =, >, <=, and >= are allowed [dep-cmp]`` + + If you are adding a versioned dependency relationship to one of the + dependency variables (:term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, + :term:`RPROVIDES`, + :term:`RREPLACES`, or + :term:`RCONFLICTS`), you must only use the named + comparison operators. Change the versioned dependency values you are + adding to match those listed in the message. + +   + +- ``: The compile log indicates that host include and/or library paths were used. Please check the log '' for more information. [compile-host-path]`` + + The log for the :ref:`ref-tasks-compile` task + indicates that paths on the host were searched for files, which is + not appropriate when cross-compiling. Look for "is unsafe for + cross-compilation" or "CROSS COMPILE Badness" in the specified log + file. + +   + +- ``: The install log indicates that host include and/or library paths were used. Please check the log '' for more information. [install-host-path]`` + + The log for the :ref:`ref-tasks-install` task + indicates that paths on the host were searched for files, which is + not appropriate when cross-compiling. Look for "is unsafe for + cross-compilation" or "CROSS COMPILE Badness" in the specified log + file. + +   + +- ``This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities. Rerun configure task after fixing this. The path was ''`` + + The log for the :ref:`ref-tasks-configure` task + indicates that paths on the host were searched for files, which is + not appropriate when cross-compiling. Look for "is unsafe for + cross-compilation" or "CROSS COMPILE Badness" in the specified log + file. + +   + +- `` doesn't match the [a-z0-9.+-]+ regex [pkgname]`` + + The convention within the OpenEmbedded build system (sometimes + enforced by the package manager itself) is to require that package + names are all lower case and to allow a restricted set of characters. + If your recipe name does not match this, or you add packages to + :term:`PACKAGES` that do not conform to the + convention, then you will receive this error. Rename your recipe. Or, + if you have added a non-conforming package name to ``PACKAGES``, + change the package name appropriately. + +   + +- ``: configure was passed unrecognized options: [unknown-configure-option]`` + + The configure script is reporting that the specified options are + unrecognized. This situation could be because the options were + previously valid but have been removed from the configure script. Or, + there was a mistake when the options were added and there is another + option that should be used instead. If you are unsure, consult the + upstream build documentation, the ``./configure --help`` output, and + the upstream change log or release notes. Once you have worked out + what the appropriate change is, you can update + :term:`EXTRA_OECONF`, + :term:`PACKAGECONFIG_CONFARGS`, or the + individual :term:`PACKAGECONFIG` option values + accordingly. + +   + +- ``Recipe has PN of "" which is in OVERRIDES, this can result in unexpected behavior. [pn-overrides]`` + + The specified recipe has a name (:term:`PN`) value that + appears in :term:`OVERRIDES`. If a recipe is named + such that its ``PN`` value matches something already in ``OVERRIDES`` + (e.g. ``PN`` happens to be the same as :term:`MACHINE` + or :term:`DISTRO`), it can have unexpected + consequences. For example, assignments such as + ``FILES_${PN} = "xyz"`` effectively turn into ``FILES = "xyz"``. + Rename your recipe (or if ``PN`` is being set explicitly, change the + ``PN`` value) so that the conflict does not occur. See + :term:`FILES` for additional information. + +   + +- ``: Variable is set as not being package specific, please fix this. [pkgvarcheck]`` + + Certain variables (:term:`RDEPENDS`, + :term:`RRECOMMENDS`, + :term:`RSUGGESTS`, + :term:`RCONFLICTS`, + :term:`RPROVIDES`, + :term:`RREPLACES`, :term:`FILES`, + ``pkg_preinst``, ``pkg_postinst``, ``pkg_prerm``, ``pkg_postrm``, and + :term:`ALLOW_EMPTY`) should always be set specific + to a package (i.e. they should be set with a package name override + such as ``RDEPENDS_${PN} = "value"`` rather than + ``RDEPENDS = "value"``). If you receive this error, correct any + assignments to these variables within your recipe. + +   + +- ``File '' from was already stripped, this will prevent future debugging! [already-stripped]`` + + Produced binaries have already been stripped prior to the build + system extracting debug symbols. It is common for upstream software + projects to default to stripping debug symbols for output binaries. + In order for debugging to work on the target using ``-dbg`` packages, + this stripping must be disabled. + + Depending on the build system used by the software being built, + disabling this stripping could be as easy as specifying an additional + configure option. If not, disabling stripping might involve patching + the build scripts. In the latter case, look for references to "strip" + or "STRIP", or the "-s" or "-S" command-line options being specified + on the linker command line (possibly through the compiler command + line if preceded with "-Wl,"). + + .. note:: + + Disabling stripping here does not mean that the final packaged + binaries will be unstripped. Once the OpenEmbedded build system + splits out debug symbols to the + -dbg + package, it will then strip the symbols from the binaries. + +   + +- `` is listed in PACKAGES multiple times, this leads to packaging errors. [packages-list]`` + + Package names must appear only once in the + :term:`PACKAGES` variable. You might receive this + error if you are attempting to add a package to ``PACKAGES`` that is + already in the variable's value. + +   + +- ``FILES variable for package contains '//' which is invalid. Attempting to fix this but you should correct the metadata. [files-invalid]`` + + The string "//" is invalid in a Unix path. Correct all occurrences + where this string appears in a :term:`FILES` variable so + that there is only a single "/". + +   + +- ``: Files/directories were installed but not shipped in any package [installed-vs-shipped]`` + + Files have been installed within the + :ref:`ref-tasks-install` task but have not been + included in any package by way of the :term:`FILES` + variable. Files that do not appear in any package cannot be present + in an image later on in the build process. You need to do one of the + following: + + - Add the files to ``FILES`` for the package you want them to appear + in (e.g. ``FILES_${``\ :term:`PN`\ ``}`` for the main + package). + + - Delete the files at the end of the ``do_install`` task if the + files are not needed in any package. + +   + +- ``- was registered as shlib provider for , changing it to - because it was built later`` + + This message means that both ```` and ```` + provide the specified shared library. You can expect this message + when a recipe has been renamed. However, if that is not the case, the + message might indicate that a private version of a library is being + erroneously picked up as the provider for a common library. If that + is the case, you should add the library's ``.so`` file name to + :term:`PRIVATE_LIBS` in the recipe that provides + the private version of the library. + +- ``LICENSE_ includes licenses () that are not listed in LICENSE [unlisted-pkg-lics]`` + + The :term:`LICENSE` of the recipe should be a superset + of all the licenses of all packages produced by this recipe. In other + words, any license in ``LICENSE_*`` should also appear in + :term:`LICENSE`. + +   + +Configuring and Disabling QA Checks +=================================== + +You can configure the QA checks globally so that specific check failures +either raise a warning or an error message, using the +:term:`WARN_QA` and :term:`ERROR_QA` +variables, respectively. You can also disable checks within a particular +recipe using :term:`INSANE_SKIP`. For information on +how to work with the QA checks, see the +":ref:`insane.bbclass `" section. + +.. note:: + + Please keep in mind that the QA checks exist in order to detect real + or potential problems in the packaged output. So exercise caution + when disabling these checks. diff --git a/poky/documentation/ref-manual/ref-release-process.rst b/poky/documentation/ref-manual/ref-release-process.rst new file mode 100644 index 000000000..be041e725 --- /dev/null +++ b/poky/documentation/ref-manual/ref-release-process.rst @@ -0,0 +1,193 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************************************** +Yocto Project Releases and the Stable Release Process +***************************************************** + +The Yocto Project release process is predictable and consists of both +major and minor (point) releases. This brief chapter provides +information on how releases are named, their life cycle, and their +stability. + +Major and Minor Release Cadence +=============================== + +The Yocto Project delivers major releases (e.g. DISTRO) using a six +month cadence roughly timed each April and October of the year. +Following are examples of some major YP releases with their codenames +also shown. See the "`Major Release +Codenames <#major-release-codenames>`__" section for information on +codenames used with major releases. + + - 2.2 (Morty) + - 2.1 (Krogoth) + - 2.0 (Jethro) + +While the cadence is never perfect, this timescale facilitates +regular releases that have strong QA cycles while not overwhelming users +with too many new releases. The cadence is predictable and avoids many +major holidays in various geographies. + +The Yocto project delivers minor (point) releases on an unscheduled +basis and are usually driven by the accumulation of enough significant +fixes or enhancements to the associated major release. Following are +some example past point releases: + + - 2.1.1 + - 2.1.2 + - 2.2.1 + +The point release +indicates a point in the major release branch where a full QA cycle and +release process validates the content of the new branch. + +.. note:: + + Realize that there can be patches merged onto the stable release + branches as and when they become available. + +Major Release Codenames +======================= + +Each major release receives a codename that identifies the release in +the :ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`. +The concept is that branches of :term:`Metadata` with the same +codename are likely to be compatible and thus work together. + +.. note:: + + Codenames are associated with major releases because a Yocto Project + release number (e.g. DISTRO) could conflict with a given layer or + company versioning scheme. Codenames are unique, interesting, and + easily identifiable. + +Releases are given a nominal release version as well but the codename is +used in repositories for this reason. You can find information on Yocto +Project releases and codenames at +https://wiki.yoctoproject.org/wiki/Releases. + +Stable Release Process +====================== + +Once released, the release enters the stable release process at which +time a person is assigned as the maintainer for that stable release. +This maintainer monitors activity for the release by investigating and +handling nominated patches and backport activity. Only fixes and +enhancements that have first been applied on the "master" branch (i.e. +the current, in-development branch) are considered for backporting to a +stable release. + +.. note:: + + The current Yocto Project policy regarding backporting is to consider + bug fixes and security fixes only. Policy dictates that features are + not backported to a stable release. This policy means generic recipe + version upgrades are unlikely to be accepted for backporting. The + exception to this policy occurs when a strong reason exists such as + the fix happens to also be the preferred upstream approach. + +Stable release branches have strong maintenance for about a year after +their initial release. Should significant issues be found for any +release regardless of its age, fixes could be backported to older +releases. For issues that are not backported given an older release, +Community LTS trees and branches exist where community members share +patches for older releases. However, these types of patches do not go +through the same release process as do point releases. You can find more +information about stable branch maintenance at +https://wiki.yoctoproject.org/wiki/Stable_branch_maintenance. + +Testing and Quality Assurance +============================= + +Part of the Yocto Project development and release process is quality +assurance through the execution of test strategies. Test strategies +provide the Yocto Project team a way to ensure a release is validated. +Additionally, because the test strategies are visible to you as a +developer, you can validate your projects. This section overviews the +available test infrastructure used in the Yocto Project. For information +on how to run available tests on your projects, see the +":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" +section in the Yocto Project Development Tasks Manual. + +The QA/testing infrastructure is woven into the project to the point +where core developers take some of it for granted. The infrastructure +consists of the following pieces: + +- ``bitbake-selftest``: A standalone command that runs unit tests on + key pieces of BitBake and its fetchers. + +- :ref:`sanity.bbclass `: This automatically + included class checks the build environment for missing tools (e.g. + ``gcc``) or common misconfigurations such as + :term:`MACHINE` set incorrectly. + +- :ref:`insane.bbclass `: This class checks the + generated output from builds for sanity. For example, if building for + an ARM target, did the build produce ARM binaries. If, for example, + the build produced PPC binaries then there is a problem. + +- :ref:`testimage.bbclass `: This class + performs runtime testing of images after they are built. The tests + are usually used with :doc:`QEMU <../dev-manual/dev-manual-qemu>` + to boot the images and check the combined runtime result boot + operation and functions. However, the test can also use the IP + address of a machine to test. + +- :ref:`ptest `: + Runs tests against packages produced during the build for a given + piece of software. The test allows the packages to be be run within a + target image. + +- ``oe-selftest``: Tests combination BitBake invocations. These tests + operate outside the OpenEmbedded build system itself. The + ``oe-selftest`` can run all tests by default or can run selected + tests or test suites. + + .. note:: + + Running + oe-selftest + requires host packages beyond the "Essential" grouping. See the " + Required Packages for the Build Host + " section for more information. + +Originally, much of this testing was done manually. However, significant +effort has been made to automate the tests so that more people can use +them and the Yocto Project development team can run them faster and more +efficiently. + +The Yocto Project's main Autobuilder (https://autobuilder.yoctoproject.org/) +publicly tests each Yocto Project release's code in the +:term:`OpenEmbedded-Core (OE-Core)`, Poky, and BitBake repositories. The testing +occurs for both the current state of the "master" branch and also for +submitted patches. Testing for submitted patches usually occurs in the +"ross/mut" branch in the ``poky-contrib`` repository (i.e. the +master-under-test branch) or in the "master-next" branch in the ``poky`` +repository. + +.. note:: + + You can find all these branches in the Yocto Project + Source Repositories + . + +Testing within these public branches ensures in a publicly visible way +that all of the main supposed architectures and recipes in OE-Core +successfully build and behave properly. + +Various features such as ``multilib``, sub architectures (e.g. ``x32``, +``poky-tiny``, ``musl``, ``no-x11`` and and so forth), +``bitbake-selftest``, and ``oe-selftest`` are tested as part of the QA +process of a release. Complete testing and validation for a release +takes the Autobuilder workers several hours. + +.. note:: + + The Autobuilder workers are non-homogeneous, which means regular + testing across a variety of Linux distributions occurs. The + Autobuilder is limited to only testing QEMU-based setups and not real + hardware. + +Finally, in addition to the Autobuilder's tests, the Yocto Project QA +team also performs testing on a variety of platforms, which includes +actual hardware, to ensure expected results. diff --git a/poky/documentation/ref-manual/ref-structure.rst b/poky/documentation/ref-manual/ref-structure.rst new file mode 100644 index 000000000..48a443331 --- /dev/null +++ b/poky/documentation/ref-manual/ref-structure.rst @@ -0,0 +1,890 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************** +Source Directory Structure +************************** + +The :term:`Source Directory` consists of numerous files, +directories and subdirectories; understanding their locations and +contents is key to using the Yocto Project effectively. This chapter +describes the Source Directory and gives information about those files +and directories. + +For information on how to establish a local Source Directory on your +development system, see the +":ref:`dev-manual/dev-manual-start:locating yocto project source files`" +section in the Yocto Project Development Tasks Manual. + +.. note:: + + The OpenEmbedded build system does not support file or directory + names that contain spaces. Be sure that the Source Directory you use + does not contain these types of names. + +.. _structure-core: + +Top-Level Core Components +========================= + +This section describes the top-level components of the :term:`Source Directory`. + +.. _structure-core-bitbake: + +``bitbake/`` +------------ + +This directory includes a copy of BitBake for ease of use. The copy +usually matches the current stable BitBake release from the BitBake +project. BitBake, a :term:`Metadata` interpreter, reads the +Yocto Project Metadata and runs the tasks defined by that data. Failures +are usually caused by errors in your Metadata and not from BitBake +itself; consequently, most users do not need to worry about BitBake. + +When you run the ``bitbake`` command, the main BitBake executable (which +resides in the ``bitbake/bin/`` directory) starts. Sourcing the +environment setup script (i.e. :ref:`structure-core-script`) places +the ``scripts/`` and ``bitbake/bin/`` directories (in that order) into +the shell's ``PATH`` environment variable. + +For more information on BitBake, see the :doc:`BitBake User Manual +`. + +.. _structure-core-build: + +``build/`` +---------- + +This directory contains user configuration files and the output +generated by the OpenEmbedded build system in its standard configuration +where the source tree is combined with the output. The :term:`Build Directory` +is created initially when you ``source`` +the OpenEmbedded build environment setup script (i.e. +:ref:`structure-core-script`). + +It is also possible to place output and configuration files in a +directory separate from the :term:`Source Directory` by +providing a directory name when you ``source`` the setup script. For +information on separating output from your local Source Directory files +(commonly described as an "out of tree" build), see the +":ref:`structure-core-script`" section. + +.. _handbook: + +``documentation/`` +------------------ + +This directory holds the source for the Yocto Project documentation as +well as templates and tools that allow you to generate PDF and HTML +versions of the manuals. Each manual is contained in its own sub-folder; +for example, the files for this reference manual reside in the +``ref-manual/`` directory. + +.. _structure-core-meta: + +``meta/`` +--------- + +This directory contains the minimal, underlying OpenEmbedded-Core +metadata. The directory holds recipes, common classes, and machine +configuration for strictly emulated targets (``qemux86``, ``qemuarm``, +and so forth.) + +.. _structure-core-meta-poky: + +``meta-poky/`` +-------------- + +Designed above the ``meta/`` content, this directory adds just enough +metadata to define the Poky reference distribution. + +.. _structure-core-meta-yocto-bsp: + +``meta-yocto-bsp/`` +------------------- + +This directory contains the Yocto Project reference hardware Board +Support Packages (BSPs). For more information on BSPs, see the +:doc:`../bsp-guide/bsp-guide`. + +.. _structure-meta-selftest: + +``meta-selftest/`` +------------------ + +This directory adds additional recipes and append files used by the +OpenEmbedded selftests to verify the behavior of the build system. You +do not have to add this layer to your ``bblayers.conf`` file unless you +want to run the selftests. + +.. _structure-meta-skeleton: + +``meta-skeleton/`` +------------------ + +This directory contains template recipes for BSP and kernel development. + +.. _structure-core-scripts: + +``scripts/`` +------------ + +This directory contains various integration scripts that implement extra +functionality in the Yocto Project environment (e.g. QEMU scripts). The +:ref:`structure-core-script` script prepends this directory to the +shell's ``PATH`` environment variable. + +The ``scripts`` directory has useful scripts that assist in contributing +back to the Yocto Project, such as ``create-pull-request`` and +``send-pull-request``. + +.. _structure-core-script: + +``oe-init-build-env`` +--------------------- + +This script sets up the OpenEmbedded build environment. Running this +script with the ``source`` command in a shell makes changes to ``PATH`` +and sets other core BitBake variables based on the current working +directory. You need to run an environment setup script before running +BitBake commands. The script uses other scripts within the ``scripts`` +directory to do the bulk of the work. + +When you run this script, your Yocto Project environment is set up, a +:term:`Build Directory` is created, your working +directory becomes the Build Directory, and you are presented with some +simple suggestions as to what to do next, including a list of some +possible targets to build. Here is an example: +:: + + $ source oe-init-build-env + + ### Shell environment set up for builds. ### + + You can now run 'bitbake ' + + Common targets are: + core-image-minimal + core-image-sato + meta-toolchain + meta-ide-support + + You can also run generated qemu images with a command like 'runqemu qemux86-64' + +The default output of the ``oe-init-build-env`` script is from the +``conf-notes.txt`` file, which is found in the ``meta-poky`` directory +within the :term:`Source Directory`. If you design a +custom distribution, you can include your own version of this +configuration file to mention the targets defined by your distribution. +See the +":ref:`dev-manual/dev-manual-common-tasks:creating a custom template configuration directory`" +section in the Yocto Project Development Tasks Manual for more +information. + +By default, running this script without a Build Directory argument +creates the ``build/`` directory in your current working directory. If +you provide a Build Directory argument when you ``source`` the script, +you direct the OpenEmbedded build system to create a Build Directory of +your choice. For example, the following command creates a Build +Directory named ``mybuilds/`` that is outside of the :term:`Source Directory`: +:: + + $ source OE_INIT_FILE ~/mybuilds + +The OpenEmbedded build system uses the template configuration files, which +are found by default in the ``meta-poky/conf/`` directory in the Source +Directory. See the +":ref:`dev-manual/dev-manual-common-tasks:creating a custom template configuration directory`" +section in the Yocto Project Development Tasks Manual for more +information. + +.. note:: + + The OpenEmbedded build system does not support file or directory + names that contain spaces. If you attempt to run the + OE_INIT_FILE + script from a Source Directory that contains spaces in either the + filenames or directory names, the script returns an error indicating + no such file or directory. Be sure to use a Source Directory free of + names containing spaces. + +.. _structure-basic-top-level: + +``LICENSE, README, and README.hardware`` +---------------------------------------- + +These files are standard top-level files. + +.. _structure-build: + +The Build Directory - ``build/`` +================================ + +The OpenEmbedded build system creates the :term:`Build Directory` +when you run the build environment setup +script :ref:`structure-core-script`. If you do not give the Build +Directory a specific name when you run the setup script, the name +defaults to ``build/``. + +For subsequent parsing and processing, the name of the Build directory +is available via the :term:`TOPDIR` variable. + +.. _structure-build-buildhistory: + +``build/buildhistory/`` +----------------------- + +The OpenEmbedded build system creates this directory when you enable +build history via the ``buildhistory`` class file. The directory +organizes build information into image, packages, and SDK +subdirectories. For information on the build history feature, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining build output quality`" +section in the Yocto Project Development Tasks Manual. + +.. _structure-build-conf-local.conf: + +``build/conf/local.conf`` +------------------------- + +This configuration file contains all the local user configurations for +your build environment. The ``local.conf`` file contains documentation +on the various configuration options. Any variable set here overrides +any variable set elsewhere within the environment unless that variable +is hard-coded within a file (e.g. by using '=' instead of '?='). Some +variables are hard-coded for various reasons but such variables are +relatively rare. + +At a minimum, you would normally edit this file to select the target +``MACHINE``, which package types you wish to use +(:term:`PACKAGE_CLASSES`), and the location from +which you want to access downloaded files (``DL_DIR``). + +If ``local.conf`` is not present when you start the build, the +OpenEmbedded build system creates it from ``local.conf.sample`` when you +``source`` the top-level build environment setup script +:ref:`structure-core-script`. + +The source ``local.conf.sample`` file used depends on the +``$TEMPLATECONF`` script variable, which defaults to ``meta-poky/conf/`` +when you are building from the Yocto Project development environment, +and to ``meta/conf/`` when you are building from the OpenEmbedded-Core +environment. Because the script variable points to the source of the +``local.conf.sample`` file, this implies that you can configure your +build environment from any layer by setting the variable in the +top-level build environment setup script as follows: +:: + + TEMPLATECONF=your_layer/conf + +Once the build process gets the sample +file, it uses ``sed`` to substitute final +``${``\ :term:`OEROOT`\ ``}`` values for all +``##OEROOT##`` values. + +.. note:: + + You can see how the + TEMPLATECONF + variable is used by looking at the + scripts/oe-setup-builddir + script in the + Source Directory + . You can find the Yocto Project version of the + local.conf.sample + file in the + meta-poky/conf + directory. + +.. _structure-build-conf-bblayers.conf: + +``build/conf/bblayers.conf`` +---------------------------- + +This configuration file defines +:ref:`layers `, +which are directory trees, traversed (or walked) by BitBake. The +``bblayers.conf`` file uses the :term:`BBLAYERS` +variable to list the layers BitBake tries to find. + +If ``bblayers.conf`` is not present when you start the build, the +OpenEmbedded build system creates it from ``bblayers.conf.sample`` when +you ``source`` the top-level build environment setup script (i.e. +:ref:`structure-core-script`). + +As with the ``local.conf`` file, the source ``bblayers.conf.sample`` +file used depends on the ``$TEMPLATECONF`` script variable, which +defaults to ``meta-poky/conf/`` when you are building from the Yocto +Project development environment, and to ``meta/conf/`` when you are +building from the OpenEmbedded-Core environment. Because the script +variable points to the source of the ``bblayers.conf.sample`` file, this +implies that you can base your build from any layer by setting the +variable in the top-level build environment setup script as follows: +:: + + TEMPLATECONF=your_layer/conf + +Once the build process gets the sample file, it uses ``sed`` to substitute final +``${``\ :term:`OEROOT`\ ``}`` values for all ``##OEROOT##`` values. + +.. note:: + + You can see how the + TEMPLATECONF + variable + scripts/oe-setup-builddir + script in the + Source Directory + . You can find the Yocto Project version of the + bblayers.conf.sample + file in the + meta-poky/conf/ + directory. + +.. _structure-build-conf-sanity_info: + +``build/cache/sanity_info`` +--------------------------- + +This file indicates the state of the sanity checks and is created during +the build. + +.. _structure-build-downloads: + +``build/downloads/`` +-------------------- + +This directory contains downloaded upstream source tarballs. You can +reuse the directory for multiple builds or move the directory to another +location. You can control the location of this directory through the +``DL_DIR`` variable. + +.. _structure-build-sstate-cache: + +``build/sstate-cache/`` +----------------------- + +This directory contains the shared state cache. You can reuse the +directory for multiple builds or move the directory to another location. +You can control the location of this directory through the +``SSTATE_DIR`` variable. + +.. _structure-build-tmp: + +``build/tmp/`` +-------------- + +The OpenEmbedded build system creates and uses this directory for all +the build system's output. The :term:`TMPDIR` variable +points to this directory. + +BitBake creates this directory if it does not exist. As a last resort, +to clean up a build and start it from scratch (other than the +downloads), you can remove everything in the ``tmp`` directory or get +rid of the directory completely. If you do, you should also completely +remove the ``build/sstate-cache`` directory. + +.. _structure-build-tmp-buildstats: + +``build/tmp/buildstats/`` +------------------------- + +This directory stores the build statistics. + +.. _structure-build-tmp-cache: + +``build/tmp/cache/`` +-------------------- + +When BitBake parses the metadata (recipes and configuration files), it +caches the results in ``build/tmp/cache/`` to speed up future builds. +The results are stored on a per-machine basis. + +During subsequent builds, BitBake checks each recipe (together with, for +example, any files included or appended to it) to see if they have been +modified. Changes can be detected, for example, through file +modification time (mtime) changes and hashing of file contents. If no +changes to the file are detected, then the parsed result stored in the +cache is reused. If the file has changed, it is reparsed. + +.. _structure-build-tmp-deploy: + +``build/tmp/deploy/`` +--------------------- + +This directory contains any "end result" output from the OpenEmbedded +build process. The :term:`DEPLOY_DIR` variable points +to this directory. For more detail on the contents of the ``deploy`` +directory, see the +":ref:`images-dev-environment`" and +":ref:`sdk-dev-environment`" sections in the Yocto +Project Overview and Concepts Manual. + +.. _structure-build-tmp-deploy-deb: + +``build/tmp/deploy/deb/`` +------------------------- + +This directory receives any ``.deb`` packages produced by the build +process. The packages are sorted into feeds for different architecture +types. + +.. _structure-build-tmp-deploy-rpm: + +``build/tmp/deploy/rpm/`` +------------------------- + +This directory receives any ``.rpm`` packages produced by the build +process. The packages are sorted into feeds for different architecture +types. + +.. _structure-build-tmp-deploy-ipk: + +``build/tmp/deploy/ipk/`` +------------------------- + +This directory receives ``.ipk`` packages produced by the build process. + +.. _structure-build-tmp-deploy-licenses: + +``build/tmp/deploy/licenses/`` +------------------------------ + +This directory receives package licensing information. For example, the +directory contains sub-directories for ``bash``, ``busybox``, and +``glibc`` (among others) that in turn contain appropriate ``COPYING`` +license files with other licensing information. For information on +licensing, see the +":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`" +section in the Yocto Project Development Tasks Manual. + +.. _structure-build-tmp-deploy-images: + +``build/tmp/deploy/images/`` +---------------------------- + +This directory is populated with the basic output objects of the build +(think of them as the "generated artifacts" of the build process), +including things like the boot loader image, kernel, root filesystem and +more. If you want to flash the resulting image from a build onto a +device, look here for the necessary components. + +Be careful when deleting files in this directory. You can safely delete +old images from this directory (e.g. ``core-image-*``). However, the +kernel (``*zImage*``, ``*uImage*``, etc.), bootloader and other +supplementary files might be deployed here prior to building an image. +Because these files are not directly produced from the image, if you +delete them they will not be automatically re-created when you build the +image again. + +If you do accidentally delete files here, you will need to force them to +be re-created. In order to do that, you will need to know the target +that produced them. For example, these commands rebuild and re-create +the kernel files: +:: + + $ bitbake -c clean virtual/kernel + $ bitbake virtual/kernel + +.. _structure-build-tmp-deploy-sdk: + +``build/tmp/deploy/sdk/`` +------------------------- + +The OpenEmbedded build system creates this directory to hold toolchain +installer scripts which, when executed, install the sysroot that matches +your target hardware. You can find out more about these installers in +the ":ref:`sdk-manual/sdk-appendix-obtain:building an sdk installer`" +section in the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) manual. + +.. _structure-build-tmp-sstate-control: + +``build/tmp/sstate-control/`` +----------------------------- + +The OpenEmbedded build system uses this directory for the shared state +manifest files. The shared state code uses these files to record the +files installed by each sstate task so that the files can be removed +when cleaning the recipe or when a newer version is about to be +installed. The build system also uses the manifests to detect and +produce a warning when files from one task are overwriting those from +another. + +.. _structure-build-tmp-sysroots-components: + +``build/tmp/sysroots-components/`` +---------------------------------- + +This directory is the location of the sysroot contents that the task +:ref:`ref-tasks-prepare_recipe_sysroot` +links or copies into the recipe-specific sysroot for each recipe listed +in :term:`DEPENDS`. Population of this directory is +handled through shared state, while the path is specified by the +:term:`COMPONENTS_DIR` variable. Apart from a few +unusual circumstances, handling of the ``sysroots-components`` directory +should be automatic, and recipes should not directly reference +``build/tmp/sysroots-components``. + +.. _structure-build-tmp-sysroots: + +``build/tmp/sysroots/`` +----------------------- + +Previous versions of the OpenEmbedded build system used to create a +global shared sysroot per machine along with a native sysroot. Beginning +with the DISTRO version of the Yocto Project, sysroots exist in +recipe-specific :term:`WORKDIR` directories. Thus, the +``build/tmp/sysroots/`` directory is unused. + +.. note:: + + The + build/tmp/sysroots/ + directory can still be populated using the + bitbake build-sysroots + command and can be used for compatibility in some cases. However, in + general it is not recommended to populate this directory. Individual + recipe-specific sysroots should be used. + +.. _structure-build-tmp-stamps: + +``build/tmp/stamps/`` +--------------------- + +This directory holds information that BitBake uses for accounting +purposes to track what tasks have run and when they have run. The +directory is sub-divided by architecture, package name, and version. +Following is an example: +stamps/all-poky-linux/distcc-config/1.0-r0.do_build-2fdd....2do Although +the files in the directory are empty of data, BitBake uses the filenames +and timestamps for tracking purposes. + +For information on how BitBake uses stamp files to determine if a task +should be rerun, see the +":ref:`overview-manual/overview-manual-concepts:stamp files and the rerunning of tasks`" +section in the Yocto Project Overview and Concepts Manual. + +.. _structure-build-tmp-log: + +``build/tmp/log/`` +------------------ + +This directory contains general logs that are not otherwise placed using +the package's ``WORKDIR``. Examples of logs are the output from the +``do_check_pkg`` or ``do_distro_check`` tasks. Running a build does not +necessarily mean this directory is created. + +.. _structure-build-tmp-work: + +``build/tmp/work/`` +------------------- + +This directory contains architecture-specific work sub-directories for +packages built by BitBake. All tasks execute from the appropriate work +directory. For example, the source for a particular package is unpacked, +patched, configured and compiled all within its own work directory. +Within the work directory, organization is based on the package group +and version for which the source is being compiled as defined by the +:term:`WORKDIR`. + +It is worth considering the structure of a typical work directory. As an +example, consider ``linux-yocto-kernel-3.0`` on the machine ``qemux86`` +built within the Yocto Project. For this package, a work directory of +``tmp/work/qemux86-poky-linux/linux-yocto/3.0+git1+<.....>``, referred +to as the ``WORKDIR``, is created. Within this directory, the source is +unpacked to ``linux-qemux86-standard-build`` and then patched by Quilt. +(See the ":ref:`using-a-quilt-workflow`" section in +the Yocto Project Development Tasks Manual for more information.) Within +the ``linux-qemux86-standard-build`` directory, standard Quilt +directories ``linux-3.0/patches`` and ``linux-3.0/.pc`` are created, and +standard Quilt commands can be used. + +There are other directories generated within ``WORKDIR``. The most +important directory is ``WORKDIR/temp/``, which has log files for each +task (``log.do_*.pid``) and contains the scripts BitBake runs for each +task (``run.do_*.pid``). The ``WORKDIR/image/`` directory is where "make +install" places its output that is then split into sub-packages within +``WORKDIR/packages-split/``. + +.. _structure-build-tmp-work-tunearch-recipename-version: + +``build/tmp/work/tunearch/recipename/version/`` +----------------------------------------------- + +The recipe work directory - ``${WORKDIR}``. + +As described earlier in the +"```build/tmp/sysroots/`` <#structure-build-tmp-sysroots>`__" section, +beginning with the DISTRO release of the Yocto Project, the OpenEmbedded +build system builds each recipe in its own work directory (i.e. +:term:`WORKDIR`). The path to the work directory is +constructed using the architecture of the given build (e.g. +:term:`TUNE_PKGARCH`, +:term:`MACHINE_ARCH`, or "allarch"), the recipe +name, and the version of the recipe (i.e. +:term:`PE`\ ``:``\ :term:`PV`\ ``-``\ :term:`PR`). + +A number of key subdirectories exist within each recipe work directory: + +- ``${WORKDIR}/temp``: Contains the log files of each task executed for + this recipe, the "run" files for each executed task, which contain + the code run, and a ``log.task_order`` file, which lists the order in + which tasks were executed. + +- ``${WORKDIR}/image``: Contains the output of the + :ref:`ref-tasks-install` task, which corresponds to + the ``${``\ :term:`D`\ ``}`` variable in that task. + +- ``${WORKDIR}/pseudo``: Contains the pseudo database and log for any + tasks executed under pseudo for the recipe. + +- ``${WORKDIR}/sysroot-destdir``: Contains the output of the + :ref:`ref-tasks-populate_sysroot` task. + +- ``${WORKDIR}/package``: Contains the output of the + :ref:`ref-tasks-package` task before the output is + split into individual packages. + +- ``${WORKDIR}/packages-split``: Contains the output of the + ``do_package`` task after the output has been split into individual + packages. Subdirectories exist for each individual package created by + the recipe. + +- ``${WORKDIR}/recipe-sysroot``: A directory populated with the target + dependencies of the recipe. This directory looks like the target + filesystem and contains libraries that the recipe might need to link + against (e.g. the C library). + +- ``${WORKDIR}/recipe-sysroot-native``: A directory populated with the + native dependencies of the recipe. This directory contains the tools + the recipe needs to build (e.g. the compiler, Autoconf, libtool, and + so forth). + +- ``${WORKDIR}/build``: This subdirectory applies only to recipes that + support builds where the source is separate from the build artifacts. + The OpenEmbedded build system uses this directory as a separate build + directory (i.e. ``${``\ :term:`B`\ ``}``). + +.. _structure-build-work-shared: + +``build/tmp/work-shared/`` +-------------------------- + +For efficiency, the OpenEmbedded build system creates and uses this +directory to hold recipes that share a work directory with other +recipes. In practice, this is only used for ``gcc`` and its variants +(e.g. ``gcc-cross``, ``libgcc``, ``gcc-runtime``, and so forth). + +.. _structure-meta: + +The Metadata - ``meta/`` +======================== + +As mentioned previously, :term:`Metadata` is the core of the +Yocto Project. Metadata has several important subdivisions: + +.. _structure-meta-classes: + +``meta/classes/`` +----------------- + +This directory contains the ``*.bbclass`` files. Class files are used to +abstract common code so it can be reused by multiple packages. Every +package inherits the ``base.bbclass`` file. Examples of other important +classes are ``autotools.bbclass``, which in theory allows any +Autotool-enabled package to work with the Yocto Project with minimal +effort. Another example is ``kernel.bbclass`` that contains common code +and functions for working with the Linux kernel. Functions like image +generation or packaging also have their specific class files such as +``image.bbclass``, ``rootfs_*.bbclass`` and ``package*.bbclass``. + +For reference information on classes, see the +":ref:`ref-manual/ref-classes:Classes`" chapter. + +.. _structure-meta-conf: + +``meta/conf/`` +-------------- + +This directory contains the core set of configuration files that start +from ``bitbake.conf`` and from which all other configuration files are +included. See the include statements at the end of the ``bitbake.conf`` +file and you will note that even ``local.conf`` is loaded from there. +While ``bitbake.conf`` sets up the defaults, you can often override +these by using the (``local.conf``) file, machine file or the +distribution configuration file. + +.. _structure-meta-conf-machine: + +``meta/conf/machine/`` +---------------------- + +This directory contains all the machine configuration files. If you set +``MACHINE = "qemux86"``, the OpenEmbedded build system looks for a +``qemux86.conf`` file in this directory. The ``include`` directory +contains various data common to multiple machines. If you want to add +support for a new machine to the Yocto Project, look in this directory. + +.. _structure-meta-conf-distro: + +``meta/conf/distro/`` +--------------------- + +The contents of this directory controls any distribution-specific +configurations. For the Yocto Project, the ``defaultsetup.conf`` is the +main file here. This directory includes the versions and the ``SRCDATE`` +definitions for applications that are configured here. An example of an +alternative configuration might be ``poky-bleeding.conf``. Although this +file mainly inherits its configuration from Poky. + +.. _structure-meta-conf-machine-sdk: + +``meta/conf/machine-sdk/`` +-------------------------- + +The OpenEmbedded build system searches this directory for configuration +files that correspond to the value of +:term:`SDKMACHINE`. By default, 32-bit and 64-bit x86 +files ship with the Yocto Project that support some SDK hosts. However, +it is possible to extend that support to other SDK hosts by adding +additional configuration files in this subdirectory within another +layer. + +.. _structure-meta-files: + +``meta/files/`` +--------------- + +This directory contains common license files and several text files used +by the build system. The text files contain minimal device information +and lists of files and directories with known permissions. + +.. _structure-meta-lib: + +``meta/lib/`` +------------- + +This directory contains OpenEmbedded Python library code used during the +build process. + +.. _structure-meta-recipes-bsp: + +``meta/recipes-bsp/`` +--------------------- + +This directory contains anything linking to specific hardware or +hardware configuration information such as "u-boot" and "grub". + +.. _structure-meta-recipes-connectivity: + +``meta/recipes-connectivity/`` +------------------------------ + +This directory contains libraries and applications related to +communication with other devices. + +.. _structure-meta-recipes-core: + +``meta/recipes-core/`` +---------------------- + +This directory contains what is needed to build a basic working Linux +image including commonly used dependencies. + +.. _structure-meta-recipes-devtools: + +``meta/recipes-devtools/`` +-------------------------- + +This directory contains tools that are primarily used by the build +system. The tools, however, can also be used on targets. + +.. _structure-meta-recipes-extended: + +``meta/recipes-extended/`` +-------------------------- + +This directory contains non-essential applications that add features +compared to the alternatives in core. You might need this directory for +full tool functionality or for Linux Standard Base (LSB) compliance. + +.. _structure-meta-recipes-gnome: + +``meta/recipes-gnome/`` +----------------------- + +This directory contains all things related to the GTK+ application +framework. + +.. _structure-meta-recipes-graphics: + +``meta/recipes-graphics/`` +-------------------------- + +This directory contains X and other graphically related system +libraries. + +.. _structure-meta-recipes-kernel: + +``meta/recipes-kernel/`` +------------------------ + +This directory contains the kernel and generic applications and +libraries that have strong kernel dependencies. + +.. _structure-meta-recipes-lsb4: + +``meta/recipes-lsb4/`` +---------------------- + +This directory contains recipes specifically added to support the Linux +Standard Base (LSB) version 4.x. + +.. _structure-meta-recipes-multimedia: + +``meta/recipes-multimedia/`` +---------------------------- + +This directory contains codecs and support utilities for audio, images +and video. + +.. _structure-meta-recipes-rt: + +``meta/recipes-rt/`` +-------------------- + +This directory contains package and image recipes for using and testing +the ``PREEMPT_RT`` kernel. + +.. _structure-meta-recipes-sato: + +``meta/recipes-sato/`` +---------------------- + +This directory contains the Sato demo/reference UI/UX and its associated +applications and configuration data. + +.. _structure-meta-recipes-support: + +``meta/recipes-support/`` +------------------------- + +This directory contains recipes used by other recipes, but that are not +directly included in images (i.e. dependencies of other recipes). + +.. _structure-meta-site: + +``meta/site/`` +-------------- + +This directory contains a list of cached results for various +architectures. Because certain "autoconf" test results cannot be +determined when cross-compiling due to the tests not able to run on a +live system, the information in this directory is passed to "autoconf" +for the various architectures. + +.. _structure-meta-recipes-txt: + +``meta/recipes.txt`` +-------------------- + +This file is a description of the contents of ``recipes-*``. diff --git a/poky/documentation/ref-manual/ref-system-requirements.rst b/poky/documentation/ref-manual/ref-system-requirements.rst new file mode 100644 index 000000000..56218e4eb --- /dev/null +++ b/poky/documentation/ref-manual/ref-system-requirements.rst @@ -0,0 +1,437 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************* +System Requirements +******************* + +Welcome to the Yocto Project Reference Manual! This manual provides +reference information for the current release of the Yocto Project, and +is most effectively used after you have an understanding of the basics +of the Yocto Project. The manual is neither meant to be read as a +starting point to the Yocto Project, nor read from start to finish. +Rather, use this manual to find variable definitions, class +descriptions, and so forth as needed during the course of using the +Yocto Project. + +For introductory information on the Yocto Project, see the +:yocto_home:`Yocto Project Website <>` and the +":ref:`overview-manual/overview-manual-development-environment:the yocto project development environment`" +chapter in the Yocto Project Overview and Concepts Manual. + +If you want to use the Yocto Project to quickly build an image without +having to understand concepts, work through the +:doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document. You can find "how-to" +information in the :doc:`../dev-manual/dev-manual`. You can find Yocto Project overview +and conceptual information in the :doc:`../overview-manual/overview-manual`. + +.. note:: + + For more information about the Yocto Project Documentation set, see + the " + Links and Related Documentation + " section. + +.. _detailed-supported-distros: + +Supported Linux Distributions +============================= + +Currently, the Yocto Project is supported on the following +distributions: + +- Ubuntu 16.04 (LTS) + +- Ubuntu 18.04 (LTS) + +- Ubuntu 20.04 + +- Fedora 30 + +- Fedora 31 + +- Fedora 32 + +- CentOS 7.x + +- CentOS 8.x + +- Debian GNU/Linux 8.x (Jessie) + +- Debian GNU/Linux 9.x (Stretch) + +- Debian GNU/Linux 10.x (Buster) + +- OpenSUSE Leap 15.1 + + +.. note:: + + - While the Yocto Project Team attempts to ensure all Yocto Project + releases are one hundred percent compatible with each officially + supported Linux distribution, instances might exist where you + encounter a problem while using the Yocto Project on a specific + distribution. + + - Yocto Project releases are tested against the stable Linux + distributions in the above list. The Yocto Project should work + on other distributions but validation is not performed against + them. + + - In particular, the Yocto Project does not support and currently + has no plans to support rolling-releases or development + distributions due to their constantly changing nature. We welcome + patches and bug reports, but keep in mind that our priority is on + the supported platforms listed below. + + - You may use Windows Subsystem For Linux v2 to set up a build host + using Windows 10, but validation is not performed against build + hosts using WSLv2. + + - The Yocto Project is not compatible with WSLv1, it is + compatible but not officially supported nor validated with + WSLv2, if you still decide to use WSL please upgrade to WSLv2. + + - If you encounter problems, please go to `Yocto Project + Bugzilla `__ and submit a bug. We are + interested in hearing about your experience. For information on + how to submit a bug, see the Yocto Project + :yocto_wiki:`Bugzilla wiki page ` + and the ":ref:`dev-manual/dev-manual-common-tasks:submitting a defect against the yocto project`" + section in the Yocto Project Development Tasks Manual. + + +Required Packages for the Build Host +==================================== + +The list of packages you need on the host development system can be +large when covering all build scenarios using the Yocto Project. This +section describes required packages according to Linux distribution and +function. + +.. _ubuntu-packages: + +Ubuntu and Debian +----------------- + +The following list shows the required packages by function given a +supported Ubuntu or Debian Linux distribution: + +.. note:: + + - If your build system has the ``oss4-dev`` package installed, you + might experience QEMU build failures due to the package installing + its own custom ``/usr/include/linux/soundcard.h`` on the Debian + system. If you run into this situation, either of the following + solutions exist: + :: + + $ sudo apt-get build-dep qemu + $ sudo apt-get remove oss4-dev + + - For Debian-8, ``python3-git`` and ``pylint3`` are no longer + available via ``apt-get``. + :: + + $ sudo pip3 install GitPython pylint==1.9.5 + +- *Essentials:* Packages needed to build an image on a headless system: + :: + + $ sudo apt-get install &UBUNTU_HOST_PACKAGES_ESSENTIAL; + +- *Documentation:* Packages needed if you are going to build out the + Yocto Project documentation manuals: + :: + + $ sudo apt-get install make xsltproc docbook-utils fop dblatex xmlto + +Fedora Packages +--------------- + +The following list shows the required packages by function given a +supported Fedora Linux distribution: + +- *Essentials:* Packages needed to build an image for a headless + system: + :: + + $ sudo dnf install &FEDORA_HOST_PACKAGES_ESSENTIAL; + +- *Documentation:* Packages needed if you are going to build out the + Yocto Project documentation manuals: + :: + + $ sudo dnf install docbook-style-dsssl docbook-style-xsl \ + docbook-dtds docbook-utils fop libxslt dblatex xmlto + +openSUSE Packages +----------------- + +The following list shows the required packages by function given a +supported openSUSE Linux distribution: + +- *Essentials:* Packages needed to build an image for a headless + system: + :: + + $ sudo zypper install &OPENSUSE_HOST_PACKAGES_ESSENTIAL; + +- *Documentation:* Packages needed if you are going to build out the + Yocto Project documentation manuals: $ sudo zypper install dblatex + xmlto + +CentOS-7 Packages +----------------- + +The following list shows the required packages by function given a +supported CentOS-7 Linux distribution: + +- *Essentials:* Packages needed to build an image for a headless + system: + :: + + $ sudo yum install &CENTOS7_HOST_PACKAGES_ESSENTIAL; + + .. note:: + + - Extra Packages for Enterprise Linux (i.e. ``epel-release``) is + a collection of packages from Fedora built on RHEL/CentOS for + easy installation of packages not included in enterprise Linux + by default. You need to install these packages separately. + + - The ``makecache`` command consumes additional Metadata from + ``epel-release``. + +- *Documentation:* Packages needed if you are going to build out the + Yocto Project documentation manuals: + :: + + $ sudo yum install docbook-style-dsssl docbook-style-xsl \ + docbook-dtds docbook-utils fop libxslt dblatex xmlto + +CentOS-8 Packages +----------------- + +The following list shows the required packages by function given a +supported CentOS-8 Linux distribution: + +- *Essentials:* Packages needed to build an image for a headless + system: + :: + + $ sudo dnf install &CENTOS8_HOST_PACKAGES_ESSENTIAL; + + .. note:: + + - Extra Packages for Enterprise Linux (i.e. ``epel-release``) is + a collection of packages from Fedora built on RHEL/CentOS for + easy installation of packages not included in enterprise Linux + by default. You need to install these packages separately. + + - The ``PowerTools`` repo provides additional packages such as + ``rpcgen`` and ``texinfo``. + + - The ``makecache`` command consumes additional Metadata from + ``epel-release``. + +- *Documentation:* Packages needed if you are going to build out the + Yocto Project documentation manuals: + :: + + $ sudo dnf install docbook-style-dsssl docbook-style-xsl \\ + docbook-dtds docbook-utils fop libxslt dblatex xmlto + +Required Git, tar, Python and gcc Versions +========================================== + +In order to use the build system, your host development system must meet +the following version requirements for Git, tar, and Python: + +- Git 1.8.3.1 or greater + +- tar 1.28 or greater + +- Python 3.5.0 or greater + +If your host development system does not meet all these requirements, +you can resolve this by installing a ``buildtools`` tarball that +contains these tools. You can get the tarball one of two ways: download +a pre-built tarball or use BitBake to build the tarball. + +In addition, your host development system must meet the following +version requirement for gcc: + +- gcc 5.0 or greater + +If your host development system does not meet this requirement, you can +resolve this by installing a ``buildtools-extended`` tarball that +contains additional tools, the equivalent of ``buildtools-essential``. + +Installing a Pre-Built ``buildtools`` Tarball with ``install-buildtools`` script +-------------------------------------------------------------------------------- + +The ``install-buildtools`` script is the easiest of the three methods by +which you can get these tools. It downloads a pre-built buildtools +installer and automatically installs the tools for you: + +1. Execute the ``install-buildtools`` script. Here is an example: + :: + + $ cd poky + $ scripts/install-buildtools --without-extended-buildtools \ + --base-url https://downloads.yoctoproject.org/releases/yocto \ + --release yocto-&DISTRO; \ + --installer-version &DISTRO; + + During execution, the buildtools tarball will be downloaded, the + checksum of the download will be verified, the installer will be run + for you, and some basic checks will be run to to make sure the + installation is functional. + + To avoid the need of ``sudo`` privileges, the ``install-buildtools`` + script will by default tell the installer to install in: + :: + + /path/to/poky/buildtools + + If your host development system needs the additional tools provided + in the ``buildtools-extended`` tarball, you can instead execute the + ``install-buildtools`` script with the default parameters: + :: + + $ cd poky + $ scripts/install-buildtools + +2. Source the tools environment setup script by using a command like the + following: + :: + + $ source /path/to/poky/buildtools/environment-setup-x86_64-pokysdk-linux + + Of course, you need to supply your installation directory and be sure to + use the right file (i.e. i586 or x86_64). + + After you have sourced the setup script, the tools are added to + ``PATH`` and any other environment variables required to run the + tools are initialized. The results are working versions versions of + Git, tar, Python and ``chrpath``. And in the case of the + ``buildtools-extended`` tarball, additional working versions of tools + including ``gcc``, ``make`` and the other tools included in + ``packagegroup-core-buildessential``. + +Downloading a Pre-Built ``buildtools`` Tarball +---------------------------------------------- + +Downloading and running a pre-built buildtools installer is the easiest +of the two methods by which you can get these tools: + +1. Locate and download the ``*.sh`` at &YOCTO_RELEASE_DL_URL;/buildtools/ + +2. Execute the installation script. Here is an example for the + traditional installer: + :: + + $ sh ~/Downloads/x86_64-buildtools-nativesdk-standalone-DISTRO.sh + + Here is an example for the extended installer: + :: + + $ sh ~/Downloads/x86_64-buildtools-extended-nativesdk-standalone-DISTRO.sh + + During execution, a prompt appears that allows you to choose the + installation directory. For example, you could choose the following: + /home/your-username/buildtools + +3. Source the tools environment setup script by using a command like the + following: + :: + + $ source /home/your_username/buildtools/environment-setup-i586-poky-linux + + Of + course, you need to supply your installation directory and be sure to + use the right file (i.e. i585 or x86-64). + + After you have sourced the setup script, the tools are added to + ``PATH`` and any other environment variables required to run the + tools are initialized. The results are working versions versions of + Git, tar, Python and ``chrpath``. And in the case of the + ``buildtools-extended`` tarball, additional working versions of tools + including ``gcc``, ``make`` and the other tools included in + ``packagegroup-core-buildessential``. + +Building Your Own ``buildtools`` Tarball +---------------------------------------- + +Building and running your own buildtools installer applies only when you +have a build host that can already run BitBake. In this case, you use +that machine to build the ``.sh`` file and then take steps to transfer +and run it on a machine that does not meet the minimal Git, tar, and +Python (or gcc) requirements. + +Here are the steps to take to build and run your own buildtools +installer: + +1. On the machine that is able to run BitBake, be sure you have set up + your build environment with the setup script + (:ref:`structure-core-script`). + +2. Run the BitBake command to build the tarball: + :: + + $ bitbake buildtools-tarball + + or run the BitBake command to build the extended tarball: + :: + + $ bitbake buildtools-extended-tarball + + .. note:: + + The + SDKMACHINE + variable in your + local.conf + file determines whether you build tools for a 32-bit or 64-bit + system. + + Once the build completes, you can find the ``.sh`` file that installs + the tools in the ``tmp/deploy/sdk`` subdirectory of the + :term:`Build Directory`. The installer file has the string + "buildtools" (or "buildtools-extended") in the name. + +3. Transfer the ``.sh`` file from the build host to the machine that + does not meet the Git, tar, or Python (or gcc) requirements. + +4. On the machine that does not meet the requirements, run the ``.sh`` + file to install the tools. Here is an example for the traditional + installer: + :: + + $ sh ~/Downloads/x86_64-buildtools-nativesdk-standalone-&DISTRO;.sh + + Here is an example for the extended installer: + :: + + $ sh ~/Downloads/x86_64-buildtools-extended-nativesdk-standalone-&DISTRO;.sh + + During execution, a prompt appears that allows you to choose the + installation directory. For example, you could choose the following: + /home/your_username/buildtools + +5. Source the tools environment setup script by using a command like the + following: + :: + + $ source /home/your_username/buildtools/environment-setup-x86_64-poky-linux + + Of course, you need to supply your installation directory and be sure to + use the right file (i.e. i586 or x86_64). + + After you have sourced the setup script, the tools are added to + ``PATH`` and any other environment variables required to run the + tools are initialized. The results are working versions versions of + Git, tar, Python and ``chrpath``. And in the case of the + ``buildtools-extended`` tarball, additional working versions of tools + including ``gcc``, ``make`` and the other tools included in + ``packagegroup-core-buildessential``. diff --git a/poky/documentation/ref-manual/ref-tasks.rst b/poky/documentation/ref-manual/ref-tasks.rst new file mode 100644 index 000000000..dcdff05dc --- /dev/null +++ b/poky/documentation/ref-manual/ref-tasks.rst @@ -0,0 +1,875 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***** +Tasks +***** + +Tasks are units of execution for BitBake. Recipes (``.bb`` files) use +tasks to complete configuring, compiling, and packaging software. This +chapter provides a reference of the tasks defined in the OpenEmbedded +build system. + +Normal Recipe Build Tasks +========================= + +The following sections describe normal tasks associated with building a +recipe. For more information on tasks and dependencies, see the +":ref:`Tasks `" and +":ref:`Dependencies `" sections in the +BitBake User Manual. + +.. _ref-tasks-build: + +``do_build`` +------------ + +The default task for all recipes. This task depends on all other normal +tasks required to build a recipe. + +.. _ref-tasks-compile: + +``do_compile`` +-------------- + +Compiles the source code. This task runs with the current working +directory set to ``${``\ :term:`B`\ ``}``. + +The default behavior of this task is to run the ``oe_runmake`` function +if a makefile (``Makefile``, ``makefile``, or ``GNUmakefile``) is found. +If no such file is found, the ``do_compile`` task does nothing. + +.. _ref-tasks-compile_ptest_base: + +``do_compile_ptest_base`` +------------------------- + +Compiles the runtime test suite included in the software being built. + +.. _ref-tasks-configure: + +``do_configure`` +---------------- + +Configures the source by enabling and disabling any build-time and +configuration options for the software being built. The task runs with +the current working directory set to ``${``\ :term:`B`\ ``}``. + +The default behavior of this task is to run ``oe_runmake clean`` if a +makefile (``Makefile``, ``makefile``, or ``GNUmakefile``) is found and +:term:`CLEANBROKEN` is not set to "1". If no such +file is found or the ``CLEANBROKEN`` variable is set to "1", the +``do_configure`` task does nothing. + +.. _ref-tasks-configure_ptest_base: + +``do_configure_ptest_base`` +--------------------------- + +Configures the runtime test suite included in the software being built. + +.. _ref-tasks-deploy: + +``do_deploy`` +------------- + +Writes output files that are to be deployed to +``${``\ :term:`DEPLOY_DIR_IMAGE`\ ``}``. The +task runs with the current working directory set to +``${``\ :term:`B`\ ``}``. + +Recipes implementing this task should inherit the +:ref:`deploy ` class and should write the output +to ``${``\ :term:`DEPLOYDIR`\ ``}``, which is not to be +confused with ``${DEPLOY_DIR}``. The ``deploy`` class sets up +``do_deploy`` as a shared state (sstate) task that can be accelerated +through sstate use. The sstate mechanism takes care of copying the +output from ``${DEPLOYDIR}`` to ``${DEPLOY_DIR_IMAGE}``. + +.. note:: + + Do not write the output directly to + ${DEPLOY_DIR_IMAGE} + , as this causes the sstate mechanism to malfunction. + +The ``do_deploy`` task is not added as a task by default and +consequently needs to be added manually. If you want the task to run +after :ref:`ref-tasks-compile`, you can add it by doing +the following: addtask deploy after do_compile Adding ``do_deploy`` +after other tasks works the same way. + +.. note:: + + You do not need to add + before do_build + to the + addtask + command (though it is harmless), because the + base + class contains the following: + :: + + do_build[recrdeptask] += "do_deploy" + + + See the " + Dependencies + " section in the BitBake User Manual for more information. + +If the ``do_deploy`` task re-executes, any previous output is removed +(i.e. "cleaned"). + +.. _ref-tasks-fetch: + +``do_fetch`` +------------ + +Fetches the source code. This task uses the +:term:`SRC_URI` variable and the argument's prefix to +determine the correct :ref:`fetcher ` +module. + +.. _ref-tasks-image: + +``do_image`` +------------ + +Starts the image generation process. The ``do_image`` task runs after +the OpenEmbedded build system has run the +:ref:`ref-tasks-rootfs` task during which packages are +identified for installation into the image and the root filesystem is +created, complete with post-processing. + +The ``do_image`` task performs pre-processing on the image through the +:term:`IMAGE_PREPROCESS_COMMAND` and +dynamically generates supporting ``do_image_*`` tasks as needed. + +For more information on image creation, see the ":ref:`image-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-image-complete: + +``do_image_complete`` +--------------------- + +Completes the image generation process. The ``do_image_complete`` task +runs after the OpenEmbedded build system has run the +:ref:`ref-tasks-image` task during which image +pre-processing occurs and through dynamically generated ``do_image_*`` +tasks the image is constructed. + +The ``do_image_complete`` task performs post-processing on the image +through the +:term:`IMAGE_POSTPROCESS_COMMAND`. + +For more information on image creation, see the +":ref:`image-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-install: + +``do_install`` +-------------- + +Copies files that are to be packaged into the holding area +``${``\ :term:`D`\ ``}``. This task runs with the current +working directory set to ``${``\ :term:`B`\ ``}``, which is the +compilation directory. The ``do_install`` task, as well as other tasks +that either directly or indirectly depend on the installed files (e.g. +:ref:`ref-tasks-package`, ``do_package_write_*``, and +:ref:`ref-tasks-rootfs`), run under +:ref:`fakeroot `. + +.. note:: + + When installing files, be careful not to set the owner and group IDs + of the installed files to unintended values. Some methods of copying + files, notably when using the recursive ``cp`` command, can preserve + the UID and/or GID of the original file, which is usually not what + you want. The ``host-user-contaminated`` QA check checks for files + that probably have the wrong ownership. + + Safe methods for installing files include the following: + + - The ``install`` utility. This utility is the preferred method. + + - The ``cp`` command with the "--no-preserve=ownership" option. + + - The ``tar`` command with the "--no-same-owner" option. See the + ``bin_package.bbclass`` file in the ``meta/classes`` directory of + the :term:`Source Directory` for an example. + +.. _ref-tasks-install_ptest_base: + +``do_install_ptest_base`` +------------------------- + +Copies the runtime test suite files from the compilation directory to a +holding area. + +.. _ref-tasks-package: + +``do_package`` +-------------- + +Analyzes the content of the holding area +``${``\ :term:`D`\ ``}`` and splits the content into subsets +based on available packages and files. This task makes use of the +:term:`PACKAGES` and :term:`FILES` +variables. + +The ``do_package`` task, in conjunction with the +:ref:`ref-tasks-packagedata` task, also saves some +important package metadata. For additional information, see the +:term:`PKGDESTWORK` variable and the +":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" +section in the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-package_qa: + +``do_package_qa`` +----------------- + +Runs QA checks on packaged files. For more information on these checks, +see the :ref:`insane ` class. + +.. _ref-tasks-package_write_deb: + +``do_package_write_deb`` +------------------------ + +Creates Debian packages (i.e. ``*.deb`` files) and places them in the +``${``\ :term:`DEPLOY_DIR_DEB`\ ``}`` directory in +the package feeds area. For more information, see the +":ref:`package-feeds-dev-environment`" section in +the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-package_write_ipk: + +``do_package_write_ipk`` +------------------------ + +Creates IPK packages (i.e. ``*.ipk`` files) and places them in the +``${``\ :term:`DEPLOY_DIR_IPK`\ ``}`` directory in +the package feeds area. For more information, see the +":ref:`package-feeds-dev-environment`" section in +the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-package_write_rpm: + +``do_package_write_rpm`` +------------------------ + +Creates RPM packages (i.e. ``*.rpm`` files) and places them in the +``${``\ :term:`DEPLOY_DIR_RPM`\ ``}`` directory in +the package feeds area. For more information, see the +":ref:`package-feeds-dev-environment`" section in +the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-package_write_tar: + +``do_package_write_tar`` +------------------------ + +Creates tarballs and places them in the +``${``\ :term:`DEPLOY_DIR_TAR`\ ``}`` directory in +the package feeds area. For more information, see the +":ref:`package-feeds-dev-environment`" section in +the Yocto Project Overview and Concepts Manual. + +.. _ref-tasks-packagedata: + +``do_packagedata`` +------------------ + +Saves package metadata generated by the +:ref:`ref-tasks-package` task in +:term:`PKGDATA_DIR` to make it available globally. + +.. _ref-tasks-patch: + +``do_patch`` +------------ + +Locates patch files and applies them to the source code. + +After fetching and unpacking source files, the build system uses the +recipe's :term:`SRC_URI` statements +to locate and apply patch files to the source code. + +.. note:: + + The build system uses the + FILESPATH + variable to determine the default set of directories when searching + for patches. + +Patch files, by default, are ``*.patch`` and ``*.diff`` files created +and kept in a subdirectory of the directory holding the recipe file. For +example, consider the +:yocto_git:`bluez5 ` +recipe from the OE-Core layer (i.e. ``poky/meta``): +:: + + poky/meta/recipes-connectivity/bluez5 + +This recipe has two patch files located here: +:: + + poky/meta/recipes-connectivity/bluez5/bluez5 + +In the ``bluez5`` recipe, the ``SRC_URI`` statements point to the source +and patch files needed to build the package. + +.. note:: + + In the case for the + bluez5_5.48.bb + recipe, the + SRC_URI + statements are from an include file + bluez5.inc + . + +As mentioned earlier, the build system treats files whose file types are +``.patch`` and ``.diff`` as patch files. However, you can use the +"apply=yes" parameter with the ``SRC_URI`` statement to indicate any +file as a patch file: +:: + + SRC_URI = " \\ + git://path_to_repo/some_package \\ + file://file;apply=yes \\ + " + +Conversely, if you have a directory full of patch files and you want to +exclude some so that the ``do_patch`` task does not apply them during +the patch phase, you can use the "apply=no" parameter with the +``SRC_URI`` statement: +:: + + SRC_URI = " \ + git://path_to_repo/some_package \ + file://path_to_lots_of_patch_files \ + file://path_to_lots_of_patch_files/patch_file5;apply=no \ + " + +In the +previous example, assuming all the files in the directory holding the +patch files end with either ``.patch`` or ``.diff``, every file would be +applied as a patch by default except for the patch_file5 patch. + +You can find out more about the patching process in the +":ref:`patching-dev-environment`" section in +the Yocto Project Overview and Concepts Manual and the +":ref:`new-recipe-patching-code`" section in the +Yocto Project Development Tasks Manual. + +.. _ref-tasks-populate_lic: + +``do_populate_lic`` +------------------- + +Writes license information for the recipe that is collected later when +the image is constructed. + +.. _ref-tasks-populate_sdk: + +``do_populate_sdk`` +------------------- + +Creates the file and directory structure for an installable SDK. See the +":ref:`sdk-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual for more +information. + +.. _ref-tasks-populate_sdk_ext: + +``do_populate_sdk_ext`` +----------------------- + +Creates the file and directory structure for an installable extensible +SDK (eSDK). See the ":ref:`sdk-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual for more +information. + + +.. _ref-tasks-populate_sysroot: + +``do_populate_sysroot`` +----------------------- + +Stages (copies) a subset of the files installed by the +:ref:`ref-tasks-install` task into the appropriate +sysroot. For information on how to access these files from other +recipes, see the :term:`STAGING_DIR* ` variables. +Directories that would typically not be needed by other recipes at build +time (e.g. ``/etc``) are not copied by default. + +For information on what directories are copied by default, see the +:term:`SYSROOT_DIRS* ` variables. You can change +these variables inside your recipe if you need to make additional (or +fewer) directories available to other recipes at build time. + +The ``do_populate_sysroot`` task is a shared state (sstate) task, which +means that the task can be accelerated through sstate use. Realize also +that if the task is re-executed, any previous output is removed (i.e. +"cleaned"). + +.. _ref-tasks-prepare_recipe_sysroot: + +``do_prepare_recipe_sysroot`` +----------------------------- + +Installs the files into the individual recipe specific sysroots (i.e. +``recipe-sysroot`` and ``recipe-sysroot-native`` under +``${``\ :term:`WORKDIR`\ ``}`` based upon the +dependencies specified by :term:`DEPENDS`). See the +":ref:`staging `" class for more information. + +.. _ref-tasks-rm_work: + +``do_rm_work`` +-------------- + +Removes work files after the OpenEmbedded build system has finished with +them. You can learn more by looking at the +":ref:`rm_work.bbclass `" section. + +.. _ref-tasks-unpack: + +``do_unpack`` +------------- + +Unpacks the source code into a working directory pointed to by +``${``\ :term:`WORKDIR`\ ``}``. The :term:`S` +variable also plays a role in where unpacked source files ultimately +reside. For more information on how source files are unpacked, see the +":ref:`source-fetching-dev-environment`" +section in the Yocto Project Overview and Concepts Manual and also see +the ``WORKDIR`` and ``S`` variable descriptions. + +Manually Called Tasks +===================== + +These tasks are typically manually triggered (e.g. by using the +``bitbake -c`` command-line option): + +.. _ref-tasks-checkpkg: + +``do_checkpkg`` +--------------- + +Provides information about the recipe including its upstream version and +status. The upstream version and status reveals whether or not a version +of the recipe exists upstream and a status of not updated, updated, or +unknown. + +To check the upstream version and status of a recipe, use the following +devtool commands: +:: + + $ devtool latest-version + $ devtool check-upgrade-status + +See the ":ref:`ref-manual/ref-devtool-reference:\`\`devtool\`\` quick reference`" +chapter for more information on +``devtool``. See the ":ref:`devtool-checking-on-the-upgrade-status-of-a-recipe`" +section for information on checking the upgrade status of a recipe. + +To build the ``checkpkg`` task, use the ``bitbake`` command with the +"-c" option and task name: +:: + + $ bitbake core-image-minimal -c checkpkg + +By default, the results are stored in :term:`$LOG_DIR ` (e.g. +``$BUILD_DIR/tmp/log``). + +.. _ref-tasks-checkuri: + +``do_checkuri`` +--------------- + +Validates the :term:`SRC_URI` value. + +.. _ref-tasks-clean: + +``do_clean`` +------------ + +Removes all output files for a target from the +:ref:`ref-tasks-unpack` task forward (i.e. ``do_unpack``, +:ref:`ref-tasks-configure`, +:ref:`ref-tasks-compile`, +:ref:`ref-tasks-install`, and +:ref:`ref-tasks-package`). + +You can run this task using BitBake as follows: +:: + + $ bitbake -c clean recipe + +Running this task does not remove the +:ref:`sstate ` cache files. +Consequently, if no changes have been made and the recipe is rebuilt +after cleaning, output files are simply restored from the sstate cache. +If you want to remove the sstate cache files for the recipe, you need to +use the :ref:`ref-tasks-cleansstate` task instead +(i.e. ``bitbake -c cleansstate`` recipe). + +.. _ref-tasks-cleanall: + +``do_cleanall`` +--------------- + +Removes all output files, shared state +(:ref:`sstate `) cache, and +downloaded source files for a target (i.e. the contents of +:term:`DL_DIR`). Essentially, the ``do_cleanall`` task is +identical to the :ref:`ref-tasks-cleansstate` task +with the added removal of downloaded source files. + +You can run this task using BitBake as follows: +:: + + $ bitbake -c cleanall recipe + +Typically, you would not normally use the ``cleanall`` task. Do so only +if you want to start fresh with the :ref:`ref-tasks-fetch` +task. + +.. _ref-tasks-cleansstate: + +``do_cleansstate`` +------------------ + +Removes all output files and shared state +(:ref:`sstate `) cache for a +target. Essentially, the ``do_cleansstate`` task is identical to the +:ref:`ref-tasks-clean` task with the added removal of +shared state (`:ref:`sstate `) +cache. + +You can run this task using BitBake as follows: +:: + + $ bitbake -c cleansstate recipe + +When you run the ``do_cleansstate`` task, the OpenEmbedded build system +no longer uses any sstate. Consequently, building the recipe from +scratch is guaranteed. + +.. note:: + + The + do_cleansstate + task cannot remove sstate from a remote sstate mirror. If you need to + build a target from scratch using remote mirrors, use the "-f" option + as follows: + :: + + $ bitbake -f -c do_cleansstate target + + +.. _ref-tasks-devpyshell: + +``do_devpyshell`` +----------------- + +Starts a shell in which an interactive Python interpreter allows you to +interact with the BitBake build environment. From within this shell, you +can directly examine and set bits from the data store and execute +functions as if within the BitBake environment. See the ":ref:`platdev-appdev-devpyshell`" section in +the Yocto Project Development Tasks Manual for more information about +using ``devpyshell``. + +.. _ref-tasks-devshell: + +``do_devshell`` +--------------- + +Starts a shell whose environment is set up for development, debugging, +or both. See the ":ref:`platdev-appdev-devshell`" section in the +Yocto Project Development Tasks Manual for more information about using +``devshell``. + +.. _ref-tasks-listtasks: + +``do_listtasks`` +---------------- + +Lists all defined tasks for a target. + +.. _ref-tasks-package_index: + +``do_package_index`` +-------------------- + +Creates or updates the index in the `:ref:`package-feeds-dev-environment` area. + +.. note:: + + This task is not triggered with the + bitbake -c + command-line option as are the other tasks in this section. Because + this task is specifically for the + package-index + recipe, you run it using + bitbake package-index + . + +Image-Related Tasks +=================== + +The following tasks are applicable to image recipes. + +.. _ref-tasks-bootimg: + +``do_bootimg`` +-------------- + +Creates a bootable live image. See the +:term:`IMAGE_FSTYPES` variable for additional +information on live image types. + +.. _ref-tasks-bundle_initramfs: + +``do_bundle_initramfs`` +----------------------- + +Combines an initial RAM disk (initramfs) image and kernel together to +form a single image. The +:term:`CONFIG_INITRAMFS_SOURCE` variable +has some more information about these types of images. + +.. _ref-tasks-rootfs: + +``do_rootfs`` +------------- + +Creates the root filesystem (file and directory structure) for an image. +See the ":ref:`image-generation-dev-environment`" +section in the Yocto Project Overview and Concepts Manual for more +information on how the root filesystem is created. + +.. _ref-tasks-testimage: + +``do_testimage`` +---------------- + +Boots an image and performs runtime tests within the image. For +information on automatically testing images, see the +":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" +section in the Yocto Project Development Tasks Manual. + +.. _ref-tasks-testimage_auto: + +``do_testimage_auto`` +--------------------- + +Boots an image and performs runtime tests within the image immediately +after it has been built. This task is enabled when you set +:term:`TESTIMAGE_AUTO` equal to "1". + +For information on automatically testing images, see the +":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" +section in the Yocto Project Development Tasks Manual. + +Kernel-Related Tasks +==================== + +The following tasks are applicable to kernel recipes. Some of these +tasks (e.g. the :ref:`ref-tasks-menuconfig` task) are +also applicable to recipes that use Linux kernel style configuration +such as the BusyBox recipe. + +.. _ref-tasks-compile_kernelmodules: + +``do_compile_kernelmodules`` +---------------------------- + +Runs the step that builds the kernel modules (if needed). Building a +kernel consists of two steps: 1) the kernel (``vmlinux``) is built, and +2) the modules are built (i.e. ``make modules``). + +.. _ref-tasks-diffconfig: + +``do_diffconfig`` +----------------- + +When invoked by the user, this task creates a file containing the +differences between the original config as produced by +:ref:`ref-tasks-kernel_configme` task and the +changes made by the user with other methods (i.e. using +(:ref:`ref-tasks-kernel_menuconfig`). Once the +file of differences is created, it can be used to create a config +fragment that only contains the differences. You can invoke this task +from the command line as follows: +:: + + $ bitbake linux-yocto -c diffconfig + +For more information, see the +":ref:`kernel-dev/kernel-dev-common:creating configuration fragments`" +section in the Yocto Project Linux Kernel Development Manual. + +.. _ref-tasks-kernel_checkout: + +``do_kernel_checkout`` +---------------------- + +Converts the newly unpacked kernel source into a form with which the +OpenEmbedded build system can work. Because the kernel source can be +fetched in several different ways, the ``do_kernel_checkout`` task makes +sure that subsequent tasks are given a clean working tree copy of the +kernel with the correct branches checked out. + +.. _ref-tasks-kernel_configcheck: + +``do_kernel_configcheck`` +------------------------- + +Validates the configuration produced by the +:ref:`ref-tasks-kernel_menuconfig` task. The +``do_kernel_configcheck`` task produces warnings when a requested +configuration does not appear in the final ``.config`` file or when you +override a policy configuration in a hardware configuration fragment. +You can run this task explicitly and view the output by using the +following command: +:: + + $ bitbake linux-yocto -c kernel_configcheck -f + +For more information, see the +":ref:`kernel-dev/kernel-dev-common:validating configuration`" +section in the Yocto Project Linux Kernel Development Manual. + +.. _ref-tasks-kernel_configme: + +``do_kernel_configme`` +---------------------- + +After the kernel is patched by the :ref:`ref-tasks-patch` +task, the ``do_kernel_configme`` task assembles and merges all the +kernel config fragments into a merged configuration that can then be +passed to the kernel configuration phase proper. This is also the time +during which user-specified defconfigs are applied if present, and where +configuration modes such as ``--allnoconfig`` are applied. + +.. _ref-tasks-kernel_menuconfig: + +``do_kernel_menuconfig`` +------------------------ + +Invoked by the user to manipulate the ``.config`` file used to build a +linux-yocto recipe. This task starts the Linux kernel configuration +tool, which you then use to modify the kernel configuration. + +.. note:: + + You can also invoke this tool from the command line as follows: + :: + + $ bitbake linux-yocto -c menuconfig + + +See the ":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``" +section in the Yocto Project Linux Kernel Development Manual for more +information on this configuration tool. + +.. _ref-tasks-kernel_metadata: + +``do_kernel_metadata`` +---------------------- + +Collects all the features required for a given kernel build, whether the +features come from :term:`SRC_URI` or from Git +repositories. After collection, the ``do_kernel_metadata`` task +processes the features into a series of config fragments and patches, +which can then be applied by subsequent tasks such as +:ref:`ref-tasks-patch` and +:ref:`ref-tasks-kernel_configme`. + +.. _ref-tasks-menuconfig: + +``do_menuconfig`` +----------------- + +Runs ``make menuconfig`` for the kernel. For information on +``menuconfig``, see the +":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``" +section in the Yocto Project Linux Kernel Development Manual. + +.. _ref-tasks-savedefconfig: + +``do_savedefconfig`` +-------------------- + +When invoked by the user, creates a defconfig file that can be used +instead of the default defconfig. The saved defconfig contains the +differences between the default defconfig and the changes made by the +user using other methods (i.e. the +:ref:`ref-tasks-kernel_menuconfig` task. You +can invoke the task using the following command: +:: + + $ bitbake linux-yocto -c savedefconfig + +.. _ref-tasks-shared_workdir: + +``do_shared_workdir`` +--------------------- + +After the kernel has been compiled but before the kernel modules have +been compiled, this task copies files required for module builds and +which are generated from the kernel build into the shared work +directory. With these copies successfully copied, the +:ref:`ref-tasks-compile_kernelmodules` task +can successfully build the kernel modules in the next step of the build. + +.. _ref-tasks-sizecheck: + +``do_sizecheck`` +---------------- + +After the kernel has been built, this task checks the size of the +stripped kernel image against +:term:`KERNEL_IMAGE_MAXSIZE`. If that +variable was set and the size of the stripped kernel exceeds that size, +the kernel build produces a warning to that effect. + +.. _ref-tasks-strip: + +``do_strip`` +------------ + +If ``KERNEL_IMAGE_STRIP_EXTRA_SECTIONS`` is defined, this task strips +the sections named in that variable from ``vmlinux``. This stripping is +typically used to remove nonessential sections such as ``.comment`` +sections from a size-sensitive configuration. + +.. _ref-tasks-validate_branches: + +``do_validate_branches`` +------------------------ + +After the kernel is unpacked but before it is patched, this task makes +sure that the machine and metadata branches as specified by the +:term:`SRCREV` variables actually exist on the specified +branches. If these branches do not exist and +:term:`AUTOREV` is not being used, the +``do_validate_branches`` task fails during the build. + +Miscellaneous Tasks +=================== + +The following sections describe miscellaneous tasks. + +.. _ref-tasks-spdx: + +``do_spdx`` +----------- + +A build stage that takes the source code and scans it on a remote +FOSSOLOGY server in order to produce an SPDX document. This task applies +only to the :ref:`spdx ` class. diff --git a/poky/documentation/ref-manual/ref-terms.rst b/poky/documentation/ref-manual/ref-terms.rst new file mode 100644 index 000000000..6e7e5169c --- /dev/null +++ b/poky/documentation/ref-manual/ref-terms.rst @@ -0,0 +1,397 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************* +Yocto Project Terms +******************* + +Following is a list of terms and definitions users new to the Yocto Project +development environment might find helpful. While some of these terms are +universal, the list includes them just in case: + +.. glossary:: + + Append Files + Files that append build information to a recipe file. Append files are + known as BitBake append files and ``.bbappend`` files. The OpenEmbedded + build system expects every append file to have a corresponding recipe + (``.bb``) file. Furthermore, the append file and corresponding recipe file + must use the same root filename. The filenames can differ only in the + file type suffix used (e.g. ``formfactor_0.0.bb`` and + ``formfactor_0.0.bbappend``). + + Information in append files extends or overrides the information in the + similarly-named recipe file. For an example of an append file in use, see + the ":ref:`dev-manual/dev-manual-common-tasks:Using .bbappend Files in + Your Layer`" section in the Yocto Project Development Tasks Manual. + + When you name an append file, you can use the "``%``" wildcard character + to allow for matching recipe names. For example, suppose you have an + append file named as follows: + :: + + busybox_1.21.%.bbappend + + That append file + would match any ``busybox_1.21.``\ x\ ``.bb`` version of the recipe. So, + the append file would match any of the following recipe names: + + .. code-block:: shell + + busybox_1.21.1.bb + busybox_1.21.2.bb + busybox_1.21.3.bb + busybox_1.21.10.bb + busybox_1.21.25.bb + + .. note:: + + The use of the " % " character is limited in that it only works + directly in front of the .bbappend portion of the append file's + name. You cannot use the wildcard character in any other location of + the name. + + BitBake + The task executor and scheduler used by the OpenEmbedded build system to + build images. For more information on BitBake, see the :doc:`BitBake User + Manual `. + + Board Support Package (BSP) + A group of drivers, definitions, and other components that provide support + for a specific hardware configuration. For more information on BSPs, see + the :ref:`bsp-guide/bsp-guide:Yocto Project Board Support Package + Developer's Guide`. + + Build Directory + This term refers to the area used by the OpenEmbedded build system for + builds. The area is created when you ``source`` the setup environment + script that is found in the Source Directory + (i.e. :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\``). The + :term:`TOPDIR` variable points to the Build Directory. + + You have a lot of flexibility when creating the Build Directory. + Following are some examples that show how to create the directory. The + examples assume your :term:`Source Directory` is named ``poky``: + + - Create the Build Directory inside your Source Directory and let + the name of the Build Directory default to ``build``: + + .. code-block:: shell + + $ cd $HOME/poky + $ source oe-init-build-env + + - Create the Build Directory inside your home directory and + specifically name it ``test-builds``: + + .. code-block:: shell + + $ cd $HOME + $ source poky/oe-init-build-env test-builds + + - Provide a directory path and specifically name the Build + Directory. Any intermediate folders in the pathname must exist. + This next example creates a Build Directory named + ``YP-POKYVERSION`` in your home directory within the existing + directory ``mybuilds``: + + .. code-block:: shell + + $ cd $HOME + $ source $HOME/poky/oe-init-build-env $HOME/mybuilds/YP-POKYVERSION + + .. note:: + + By default, the Build Directory contains :term:`TMPDIR` , which is a + temporary directory the build system uses for its work. TMPDIR cannot + be under NFS. Thus, by default, the Build Directory cannot be under + NFS. However, if you need the Build Directory to be under NFS, you can + set this up by setting TMPDIR in your local.conf file to use a local + drive. Doing so effectively separates TMPDIR from TOPDIR , which is the + Build Directory. + + Build Host + The system used to build images in a Yocto Project Development + environment. The build system is sometimes referred to as the development + host. + + Classes + Files that provide for logic encapsulation and inheritance so that + commonly used patterns can be defined once and then easily used in + multiple recipes. For reference information on the Yocto Project classes, + see the ":ref:`ref-manual/ref-classes:Classes`" chapter. Class files end with the + ``.bbclass`` filename extension. + + Configuration File + Files that hold global definitions of variables, user-defined variables, + and hardware configuration information. These files tell the OpenEmbedded + build system what to build and what to put into the image to support a + particular platform. + + Configuration files end with a ``.conf`` filename extension. The + :file:`conf/local.conf` configuration file in the :term:`Build Directory` + contains user-defined variables that affect every build. The + :file:`meta-poky/conf/distro/poky.conf` configuration file defines Yocto + "distro" configuration variables used only when building with this + policy. Machine configuration files, which are located throughout the + :term:`Source Directory`, define variables for specific hardware and are + only used when building for that target (e.g. the + :file:`machine/beaglebone.conf` configuration file defines variables for + the Texas Instruments ARM Cortex-A8 development board). + + Container Layer + Layers that hold other layers. An example of a container layer is + OpenEmbedded's `meta-openembedded + `_ layer. The + ``meta-openembedded`` layer contains many ``meta-*`` layers. + + Cross-Development Toolchain + In general, a cross-development toolchain is a collection of software + development tools and utilities that run on one architecture and allow you + to develop software for a different, or targeted, architecture. These + toolchains contain cross-compilers, linkers, and debuggers that are + specific to the target architecture. + + The Yocto Project supports two different cross-development toolchains: + + - A toolchain only used by and within BitBake when building an image for a + target architecture. + + - A relocatable toolchain used outside of BitBake by developers when + developing applications that will run on a targeted device. + + Creation of these toolchains is simple and automated. For information on + toolchain concepts as they apply to the Yocto Project, see the + ":ref:`overview-manual/overview-manual-concepts:Cross-Development + Toolchain Generation`" section in the Yocto Project Overview and Concepts + Manual. You can also find more information on using the relocatable + toolchain in the :ref:`sdk-manual/sdk-manual:Yocto Project Application + Development and the Extensible Software Development Kit (eSDK)` manual. + + Extensible Software Development Kit (eSDK) + A custom SDK for application developers. This eSDK allows developers to + incorporate their library and programming changes back into the image to + make their code available to other application developers. + + For information on the eSDK, see the :ref:`sdk-manual/sdk-manual:Yocto + Project Application Development and the Extensible Software Development + Kit (eSDK)` manual. + + Image + An image is an artifact of the BitBake build process given a collection of + recipes and related Metadata. Images are the binary output that run on + specific hardware or QEMU and are used for specific use-cases. For a list + of the supported image types that the Yocto Project provides, see the + ":ref:`ref-manual/ref-images:Images`" chapter. + + Layer + A collection of related recipes. Layers allow you to consolidate related + metadata to customize your build. Layers also isolate information used + when building for multiple architectures. Layers are hierarchical in + their ability to override previous specifications. You can include any + number of available layers from the Yocto Project and customize the build + by adding your layers after them. You can search the Layer Index for + layers used within Yocto Project. + + For introductory information on layers, see the + ":ref:`overview-manual/overview-manual-yp-intro:The Yocto Project Layer + Model`" section in the Yocto Project Overview and Concepts Manual. For + more detailed information on layers, see the + ":ref:`dev-manual/dev-manual-common-tasks:Understanding and Creating + Layers`" section in the Yocto Project Development Tasks Manual. For a + discussion specifically on BSP Layers, see the ":ref:`bsp-guide/bsp:BSP + Layers`" section in the Yocto Project Board Support Packages (BSP) + Developer's Guide. + + Metadata + A key element of the Yocto Project is the Metadata that + is used to construct a Linux distribution and is contained in the + files that the :term:`OpenEmbedded Build System` + parses when building an image. In general, Metadata includes recipes, + configuration files, and other information that refers to the build + instructions themselves, as well as the data used to control what + things get built and the effects of the build. Metadata also includes + commands and data used to indicate what versions of software are + used, from where they are obtained, and changes or additions to the + software itself (patches or auxiliary files) that are used to fix + bugs or customize the software for use in a particular situation. + OpenEmbedded-Core is an important set of validated metadata. + + In the context of the kernel ("kernel Metadata"), the term refers to + the kernel config fragments and features contained in the + :yocto_git:`yocto-kernel-cache ` + Git repository. + + OpenEmbedded-Core (OE-Core) + OE-Core is metadata comprised of + foundational recipes, classes, and associated files that are meant to + be common among many different OpenEmbedded-derived systems, + including the Yocto Project. OE-Core is a curated subset of an + original repository developed by the OpenEmbedded community that has + been pared down into a smaller, core set of continuously validated + recipes. The result is a tightly controlled and an quality-assured + core set of recipes. + + You can see the Metadata in the ``meta`` directory of the Yocto + Project :yocto_git:`Source Repositories <>`. + + OpenEmbedded Build System + The build system specific to the Yocto + Project. The OpenEmbedded build system is based on another project + known as "Poky", which uses :term:`BitBake` as the task + executor. Throughout the Yocto Project documentation set, the + OpenEmbedded build system is sometimes referred to simply as "the + build system". If other build systems, such as a host or target build + system are referenced, the documentation clearly states the + difference. + + .. note:: + + For some historical information about Poky, see the + Poky + term. + + Package + In the context of the Yocto Project, this term refers to a + recipe's packaged output produced by BitBake (i.e. a "baked recipe"). + A package is generally the compiled binaries produced from the + recipe's sources. You "bake" something by running it through BitBake. + + It is worth noting that the term "package" can, in general, have + subtle meanings. For example, the packages referred to in the + "`Required Packages for the Build + Host <#required-packages-for-the-build-host>`__" section are compiled + binaries that, when installed, add functionality to your Linux + distribution. + + Another point worth noting is that historically within the Yocto + Project, recipes were referred to as packages - thus, the existence + of several BitBake variables that are seemingly mis-named, (e.g. + :term:`PR`, :term:`PV`, and + :term:`PE`). + + Package Groups + Arbitrary groups of software Recipes. You use + package groups to hold recipes that, when built, usually accomplish a + single task. For example, a package group could contain the recipes + for a company's proprietary or value-add software. Or, the package + group could contain the recipes that enable graphics. A package group + is really just another recipe. Because package group files are + recipes, they end with the ``.bb`` filename extension. + + Poky + Poky, which is pronounced *Pock*-ee, is a reference embedded + distribution and a reference test configuration. Poky provides the + following: + + - A base-level functional distro used to illustrate how to customize + a distribution. + + - A means by which to test the Yocto Project components (i.e. Poky + is used to validate the Yocto Project). + + - A vehicle through which you can download the Yocto Project. + + Poky is not a product level distro. Rather, it is a good starting + point for customization. + + .. note:: + + Poky began as an open-source project initially developed by + OpenedHand. OpenedHand developed Poky from the existing + OpenEmbedded build system to create a commercially supportable + build system for embedded Linux. After Intel Corporation acquired + OpenedHand, the poky project became the basis for the Yocto + Project's build system. + + Recipe + A set of instructions for building packages. A recipe + describes where you get source code, which patches to apply, how to + configure the source, how to compile it and so on. Recipes also + describe dependencies for libraries or for other recipes. Recipes + represent the logical unit of execution, the software to build, the + images to build, and use the ``.bb`` file extension. + + Reference Kit + A working example of a system, which includes a + :term:`BSP` as well as a + :term:`build host` and other components, that can + work on specific hardware. + + Source Directory + This term refers to the directory structure + created as a result of creating a local copy of the ``poky`` Git + repository ``git://git.yoctoproject.org/poky`` or expanding a + released ``poky`` tarball. + + .. note:: + + Creating a local copy of the + poky + Git repository is the recommended method for setting up your + Source Directory. + + Sometimes you might hear the term "poky directory" used to refer to + this directory structure. + + .. note:: + + The OpenEmbedded build system does not support file or directory + names that contain spaces. Be sure that the Source Directory you + use does not contain these types of names. + + The Source Directory contains BitBake, Documentation, Metadata and + other files that all support the Yocto Project. Consequently, you + must have the Source Directory in place on your development system in + order to do any development using the Yocto Project. + + When you create a local copy of the Git repository, you can name the + repository anything you like. Throughout much of the documentation, + "poky" is used as the name of the top-level folder of the local copy + of the poky Git repository. So, for example, cloning the ``poky`` Git + repository results in a local Git repository whose top-level folder + is also named "poky". + + While it is not recommended that you use tarball expansion to set up + the Source Directory, if you do, the top-level directory name of the + Source Directory is derived from the Yocto Project release tarball. + For example, downloading and unpacking + :yocto_dl:`releases/yocto/&DISTRO_REL_TAG;/&YOCTO_POKY;.tar.bz2` + results in a Source Directory whose root folder is named ``poky``. + + It is important to understand the differences between the Source + Directory created by unpacking a released tarball as compared to + cloning ``git://git.yoctoproject.org/poky``. When you unpack a + tarball, you have an exact copy of the files based on the time of + release - a fixed release point. Any changes you make to your local + files in the Source Directory are on top of the release and will + remain local only. On the other hand, when you clone the ``poky`` Git + repository, you have an active development repository with access to + the upstream repository's branches and tags. In this case, any local + changes you make to the local Source Directory can be later applied + to active development branches of the upstream ``poky`` Git + repository. + + For more information on concepts related to Git repositories, + branches, and tags, see the + ":ref:`overview-manual/overview-manual-development-environment:repositories, tags, and branches`" + section in the Yocto Project Overview and Concepts Manual. + + Task + A unit of execution for BitBake (e.g. + :ref:`ref-tasks-compile`, + :ref:`ref-tasks-fetch`, + :ref:`ref-tasks-patch`, and so forth). + + Toaster + A web interface to the Yocto Project's :term:`OpenEmbedded Build System`. + The interface enables you to + configure and run your builds. Information about builds is collected + and stored in a database. For information on Toaster, see the + :doc:`../toaster-manual/toaster-manual`. + + Upstream + A reference to source code or repositories that are not + local to the development system but located in a master area that is + controlled by the maintainer of the source code. For example, in + order for a developer to work on a particular piece of code, they + need to first get a copy of it from an "upstream" source. diff --git a/poky/documentation/ref-manual/ref-terms.xml b/poky/documentation/ref-manual/ref-terms.xml index d2605c62a..2a0452bd7 100644 --- a/poky/documentation/ref-manual/ref-terms.xml +++ b/poky/documentation/ref-manual/ref-terms.xml @@ -365,7 +365,7 @@ You use package groups to hold recipes that, when built, usually accomplish a single task. For example, a package group could contain the recipes for a - company’s proprietary or value-add software. + company's proprietary or value-add software. Or, the package group could contain the recipes that enable graphics. A package group is really just another recipe. diff --git a/poky/documentation/ref-manual/ref-variables.rst b/poky/documentation/ref-manual/ref-variables.rst new file mode 100644 index 000000000..625a37c15 --- /dev/null +++ b/poky/documentation/ref-manual/ref-variables.rst @@ -0,0 +1,8899 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************** +Variables Glossary +****************** + +This chapter lists common variables used in the OpenEmbedded build +system and gives an overview of their function and contents. + +`A <#var-ABIEXTENSION>`__ :term:`B` `C <#var-CACHE>`__ +:term:`D` `E <#var-EFI_PROVIDER>`__ `F <#var-FEATURE_PACKAGES>`__ +`G <#var-GCCPIE>`__ `H <#var-HOMEPAGE>`__ `I <#var-ICECC_DISABLED>`__ +`K <#var-KARCH>`__ `L <#var-LABELS>`__ `M <#var-MACHINE>`__ +`N <#var-NATIVELSBSTRING>`__ `O <#var-OBJCOPY>`__ :term:`P` +`R <#var-RANLIB>`__ :term:`S` :term:`T` +`U <#var-UBOOT_CONFIG>`__ `V <#var-VOLATILE_LOG_DIR>`__ +`W <#var-WARN_QA>`__ `X <#var-XSERVER>`__ + +.. glossary:: + + ABIEXTENSION + Extension to the Application Binary Interface (ABI) field of the GNU + canonical architecture name (e.g. "eabi"). + + ABI extensions are set in the machine include files. For example, the + ``meta/conf/machine/include/arm/arch-arm.inc`` file sets the + following extension: + :: + + ABIEXTENSION = "eabi" + + ALLOW_EMPTY + Specifies whether to produce an output package even if it is empty. + By default, BitBake does not produce empty packages. This default + behavior can cause issues when there is an + :term:`RDEPENDS` or some other hard runtime + requirement on the existence of the package. + + Like all package-controlling variables, you must always use them in + conjunction with a package name override, as in: + :: + + ALLOW_EMPTY_${PN} = "1" + ALLOW_EMPTY_${PN}-dev = "1" + ALLOW_EMPTY_${PN}-staticdev = "1" + + ALTERNATIVE + Lists commands in a package that need an alternative binary naming + scheme. Sometimes the same command is provided in multiple packages. + When this occurs, the OpenEmbedded build system needs to use the + alternatives system to create a different binary naming scheme so the + commands can co-exist. + + To use the variable, list out the package's commands that also exist + as part of another package. For example, if the ``busybox`` package + has four commands that also exist as part of another package, you + identify them as follows: + :: + + ALTERNATIVE_busybox = "sh sed test bracket" + + For more information on the alternatives system, see the + ":ref:`update-alternatives.bbclass `" + section. + + ALTERNATIVE_LINK_NAME + Used by the alternatives system to map duplicated commands to actual + locations. For example, if the ``bracket`` command provided by the + ``busybox`` package is duplicated through another package, you must + use the ``ALTERNATIVE_LINK_NAME`` variable to specify the actual + location: + :: + + ALTERNATIVE_LINK_NAME[bracket] = "/usr/bin/[" + + In this example, the binary for the ``bracket`` command (i.e. ``[``) + from the ``busybox`` package resides in ``/usr/bin/``. + + .. note:: + + If ALTERNATIVE_LINK_NAME is not defined, it defaults to ${bindir}/ name. + + For more information on the alternatives system, see the + ":ref:`update-alternatives.bbclass `" + section. + + ALTERNATIVE_PRIORITY + Used by the alternatives system to create default priorities for + duplicated commands. You can use the variable to create a single + default regardless of the command name or package, a default for + specific duplicated commands regardless of the package, or a default + for specific commands tied to particular packages. Here are the + available syntax forms: + :: + + ALTERNATIVE_PRIORITY = "priority" + ALTERNATIVE_PRIORITY[name] = "priority" + ALTERNATIVE_PRIORITY_pkg[name] = "priority" + + For more information on the alternatives system, see the + ":ref:`update-alternatives.bbclass `" + section. + + ALTERNATIVE_TARGET + Used by the alternatives system to create default link locations for + duplicated commands. You can use the variable to create a single + default location for all duplicated commands regardless of the + command name or package, a default for specific duplicated commands + regardless of the package, or a default for specific commands tied to + particular packages. Here are the available syntax forms: + :: + + ALTERNATIVE_TARGET = "target" + ALTERNATIVE_TARGET[name] = "target" + ALTERNATIVE_TARGET_pkg[name] = "target" + + .. note:: + + If ``ALTERNATIVE_TARGET`` is not defined, it inherits the value + from the :term:`ALTERNATIVE_LINK_NAME` variable. + + If ``ALTERNATIVE_LINK_NAME`` and ``ALTERNATIVE_TARGET`` are the + same, the target for ``ALTERNATIVE_TARGET`` has "``.{BPN}``" + appended to it. + + Finally, if the file referenced has not been renamed, the + alternatives system will rename it to avoid the need to rename + alternative files in the :ref:`ref-tasks-install` + task while retaining support for the command if necessary. + + For more information on the alternatives system, see the + ":ref:`update-alternatives.bbclass `" + section. + + APPEND + An override list of append strings for each target specified with + :term:`LABELS`. + + See the :ref:`grub-efi ` class for more + information on how this variable is used. + + AR + The minimal command and arguments used to run ``ar``. + + ARCHIVER_MODE + When used with the :ref:`archiver ` class, + determines the type of information used to create a released archive. + You can use this variable to create archives of patched source, + original source, configured source, and so forth by employing the + following variable flags (varflags): + :: + + ARCHIVER_MODE[src] = "original" # Uses original (unpacked) source files. + ARCHIVER_MODE[src] = "patched" # Uses patched source files. This is the default. + ARCHIVER_MODE[src] = "configured" # Uses configured source files. + ARCHIVER_MODE[diff] = "1" # Uses patches between do_unpack and do_patch. + ARCHIVER_MODE[diff-exclude] ?= "file file ..." # Lists files and directories to exclude from diff. + ARCHIVER_MODE[dumpdata] = "1" # Uses environment data. + ARCHIVER_MODE[recipe] = "1" # Uses recipe and include files. + ARCHIVER_MODE[srpm] = "1" # Uses RPM package files. + + For information on how the variable works, see the + ``meta/classes/archiver.bbclass`` file in the :term:`Source Directory`. + + AS + Minimal command and arguments needed to run the assembler. + + ASSUME_PROVIDED + Lists recipe names (:term:`PN` values) BitBake does not + attempt to build. Instead, BitBake assumes these recipes have already + been built. + + In OpenEmbedded-Core, ``ASSUME_PROVIDED`` mostly specifies native + tools that should not be built. An example is ``git-native``, which + when specified, allows for the Git binary from the host to be used + rather than building ``git-native``. + + ASSUME_SHLIBS + Provides additional ``shlibs`` provider mapping information, which + adds to or overwrites the information provided automatically by the + system. Separate multiple entries using spaces. + + As an example, use the following form to add an ``shlib`` provider of + shlibname in packagename with the optional version: + :: + + shlibname:packagename[_version] + + Here is an example that adds a shared library named ``libEGL.so.1`` + as being provided by the ``libegl-implementation`` package: + :: + + ASSUME_SHLIBS = "libEGL.so.1:libegl-implementation" + + AUTHOR + The email address used to contact the original author or authors in + order to send patches and forward bugs. + + AUTO_LIBNAME_PKGS + When the :ref:`debian ` class is inherited, + which is the default behavior, ``AUTO_LIBNAME_PKGS`` specifies which + packages should be checked for libraries and renamed according to + Debian library package naming. + + The default value is "${PACKAGES}", which causes the debian class to + act on all packages that are explicitly generated by the recipe. + + AUTO_SYSLINUXMENU + Enables creating an automatic menu for the syslinux bootloader. You + must set this variable in your recipe. The + :ref:`syslinux ` class checks this variable. + + AUTOREV + When ``SRCREV`` is set to the value of this variable, it specifies to + use the latest source revision in the repository. Here is an example: + :: + + SRCREV = "${AUTOREV}" + + If you use the previous statement to retrieve the latest version of + software, you need to be sure :term:`PV` contains + ``${``\ :term:`SRCPV`\ ``}``. For example, suppose you + have a kernel recipe that inherits the + :ref:`kernel ` class and you use the previous + statement. In this example, ``${SRCPV}`` does not automatically get + into ``PV``. Consequently, you need to change ``PV`` in your recipe + so that it does contain ``${SRCPV}``. + + For more information see the + ":ref:`dev-manual/dev-manual-common-tasks:automatically incrementing a package version number`" + section in the Yocto Project Development Tasks Manual. + + AVAILABLE_LICENSES + List of licenses found in the directories specified by + :term:`COMMON_LICENSE_DIR` and + :term:`LICENSE_PATH`. + + .. note:: + + It is assumed that all changes to + COMMON_LICENSE_DIR + and + LICENSE_PATH + have been done before + AVAILABLE_LICENSES + is defined (in + license.bbclass + ). + + AVAILTUNES + The list of defined CPU and Application Binary Interface (ABI) + tunings (i.e. "tunes") available for use by the OpenEmbedded build + system. + + The list simply presents the tunes that are available. Not all tunes + may be compatible with a particular machine configuration, or with + each other in a + :ref:`Multilib ` + configuration. + + To add a tune to the list, be sure to append it with spaces using the + "+=" BitBake operator. Do not simply replace the list by using the + "=" operator. See the + ":ref:`Basic Syntax `" section in the BitBake + User Manual for more information. + + B + The directory within the :term:`Build Directory` in + which the OpenEmbedded build system places generated objects during a + recipe's build process. By default, this directory is the same as the + :term:`S` directory, which is defined as: + :: + + S = "${WORKDIR}/${BP}" + + You can separate the (``S``) directory and the directory pointed to + by the ``B`` variable. Most Autotools-based recipes support + separating these directories. The build system defaults to using + separate directories for ``gcc`` and some kernel recipes. + + BAD_RECOMMENDATIONS + Lists "recommended-only" packages to not install. Recommended-only + packages are packages installed only through the + :term:`RRECOMMENDS` variable. You can prevent any + of these "recommended" packages from being installed by listing them + with the ``BAD_RECOMMENDATIONS`` variable: + :: + + BAD_RECOMMENDATIONS = "package_name package_name package_name ..." + + You can set this variable globally in your ``local.conf`` file or you + can attach it to a specific image recipe by using the recipe name + override: + :: + + BAD_RECOMMENDATIONS_pn-target_image = "package_name" + + It is important to realize that if you choose to not install packages + using this variable and some other packages are dependent on them + (i.e. listed in a recipe's :term:`RDEPENDS` + variable), the OpenEmbedded build system ignores your request and + will install the packages to avoid dependency errors. + + Support for this variable exists only when using the IPK and RPM + packaging backend. Support does not exist for DEB. + + See the :term:`NO_RECOMMENDATIONS` and the + :term:`PACKAGE_EXCLUDE` variables for related + information. + + BASE_LIB + The library directory name for the CPU or Application Binary + Interface (ABI) tune. The ``BASE_LIB`` applies only in the Multilib + context. See the ":ref:`dev-manual/dev-manual-common-tasks:combining multiple versions of library files into one image`" + section in the Yocto Project Development Tasks Manual for information + on Multilib. + + The ``BASE_LIB`` variable is defined in the machine include files in + the :term:`Source Directory`. If Multilib is not + being used, the value defaults to "lib". + + BASE_WORKDIR + Points to the base of the work directory for all recipes. The default + value is "${TMPDIR}/work". + + BB_ALLOWED_NETWORKS + Specifies a space-delimited list of hosts that the fetcher is allowed + to use to obtain the required source code. Following are + considerations surrounding this variable: + + - This host list is only used if ``BB_NO_NETWORK`` is either not set + or set to "0". + + - Limited support for wildcard matching against the beginning of + host names exists. For example, the following setting matches + ``git.gnu.org``, ``ftp.gnu.org``, and ``foo.git.gnu.org``. + :: + + BB_ALLOWED_NETWORKS = "*.gnu.org" + + .. note:: + + The use of the "``*``" character only works at the beginning of + a host name and it must be isolated from the remainder of the + host name. You cannot use the wildcard character in any other + location of the name or combined with the front part of the + name. + + For example, ``*.foo.bar`` is supported, while ``*aa.foo.bar`` + is not. + + - Mirrors not in the host list are skipped and logged in debug. + + - Attempts to access networks not in the host list cause a failure. + + Using ``BB_ALLOWED_NETWORKS`` in conjunction with + :term:`PREMIRRORS` is very useful. Adding the host + you want to use to ``PREMIRRORS`` results in the source code being + fetched from an allowed location and avoids raising an error when a + host that is not allowed is in a :term:`SRC_URI` + statement. This is because the fetcher does not attempt to use the + host listed in ``SRC_URI`` after a successful fetch from the + ``PREMIRRORS`` occurs. + + BB_DANGLINGAPPENDS_WARNONLY + Defines how BitBake handles situations where an append file + (``.bbappend``) has no corresponding recipe file (``.bb``). This + condition often occurs when layers get out of sync (e.g. ``oe-core`` + bumps a recipe version and the old recipe no longer exists and the + other layer has not been updated to the new version of the recipe + yet). + + The default fatal behavior is safest because it is the sane reaction + given something is out of sync. It is important to realize when your + changes are no longer being applied. + + You can change the default behavior by setting this variable to "1", + "yes", or "true" in your ``local.conf`` file, which is located in the + :term:`Build Directory`: Here is an example: + :: + + BB_DANGLINGAPPENDS_WARNONLY = "1" + + BB_DISKMON_DIRS + Monitors disk space and available inodes during the build and allows + you to control the build based on these parameters. + + Disk space monitoring is disabled by default. To enable monitoring, + add the ``BB_DISKMON_DIRS`` variable to your ``conf/local.conf`` file + found in the :term:`Build Directory`. Use the + following form: + :: + + BB_DISKMON_DIRS = "action,dir,threshold [...]" + + where: + + action is: + ABORT: Immediately abort the build when + a threshold is broken. + STOPTASKS: Stop the build after the currently + executing tasks have finished when + a threshold is broken. + WARN: Issue a warning but continue the + build when a threshold is broken. + Subsequent warnings are issued as + defined by the BB_DISKMON_WARNINTERVAL + variable, which must be defined in + the conf/local.conf file. + + dir is: + Any directory you choose. You can specify one or + more directories to monitor by separating the + groupings with a space. If two directories are + on the same device, only the first directory + is monitored. + + threshold is: + Either the minimum available disk space, + the minimum number of free inodes, or + both. You must specify at least one. To + omit one or the other, simply omit the value. + Specify the threshold using G, M, K for Gbytes, + Mbytes, and Kbytes, respectively. If you do + not specify G, M, or K, Kbytes is assumed by + default. Do not use GB, MB, or KB. + + Here are some examples: + :: + + BB_DISKMON_DIRS = "ABORT,${TMPDIR},1G,100K WARN,${SSTATE_DIR},1G,100K" + BB_DISKMON_DIRS = "STOPTASKS,${TMPDIR},1G" + BB_DISKMON_DIRS = "ABORT,${TMPDIR},,100K" + + The first example works only if you also provide the + :term:`BB_DISKMON_WARNINTERVAL` + variable in the ``conf/local.conf``. This example causes the build + system to immediately abort when either the disk space in + ``${TMPDIR}`` drops below 1 Gbyte or the available free inodes drops + below 100 Kbytes. Because two directories are provided with the + variable, the build system also issue a warning when the disk space + in the ``${SSTATE_DIR}`` directory drops below 1 Gbyte or the number + of free inodes drops below 100 Kbytes. Subsequent warnings are issued + during intervals as defined by the ``BB_DISKMON_WARNINTERVAL`` + variable. + + The second example stops the build after all currently executing + tasks complete when the minimum disk space in the ``${TMPDIR}`` + directory drops below 1 Gbyte. No disk monitoring occurs for the free + inodes in this case. + + The final example immediately aborts the build when the number of + free inodes in the ``${TMPDIR}`` directory drops below 100 Kbytes. No + disk space monitoring for the directory itself occurs in this case. + + BB_DISKMON_WARNINTERVAL + Defines the disk space and free inode warning intervals. To set these + intervals, define the variable in your ``conf/local.conf`` file in + the :term:`Build Directory`. + + If you are going to use the ``BB_DISKMON_WARNINTERVAL`` variable, you + must also use the :term:`BB_DISKMON_DIRS` + variable and define its action as "WARN". During the build, + subsequent warnings are issued each time disk space or number of free + inodes further reduces by the respective interval. + + If you do not provide a ``BB_DISKMON_WARNINTERVAL`` variable and you + do use ``BB_DISKMON_DIRS`` with the "WARN" action, the disk + monitoring interval defaults to the following: + :: + + BB_DISKMON_WARNINTERVAL = "50M,5K" + + When specifying the variable in your configuration file, use the + following form: + :: + + BB_DISKMON_WARNINTERVAL = "disk_space_interval,disk_inode_interval" + + where: + + disk_space_interval is: + An interval of memory expressed in either + G, M, or K for Gbytes, Mbytes, or Kbytes, + respectively. You cannot use GB, MB, or KB. + + disk_inode_interval is: + An interval of free inodes expressed in either + G, M, or K for Gbytes, Mbytes, or Kbytes, + respectively. You cannot use GB, MB, or KB. + + Here is an example: + :: + + BB_DISKMON_DIRS = "WARN,${SSTATE_DIR},1G,100K" + BB_DISKMON_WARNINTERVAL = "50M,5K" + + These variables cause the + OpenEmbedded build system to issue subsequent warnings each time the + available disk space further reduces by 50 Mbytes or the number of + free inodes further reduces by 5 Kbytes in the ``${SSTATE_DIR}`` + directory. Subsequent warnings based on the interval occur each time + a respective interval is reached beyond the initial warning (i.e. 1 + Gbytes and 100 Kbytes). + + BB_GENERATE_MIRROR_TARBALLS + Causes tarballs of the source control repositories (e.g. Git + repositories), including metadata, to be placed in the + :term:`DL_DIR` directory. + + For performance reasons, creating and placing tarballs of these + repositories is not the default action by the OpenEmbedded build + system. + :: + + BB_GENERATE_MIRROR_TARBALLS = "1" + + Set this variable in your + ``local.conf`` file in the :term:`Build Directory`. + + Once you have the tarballs containing your source files, you can + clean up your ``DL_DIR`` directory by deleting any Git or other + source control work directories. + + BB_NUMBER_THREADS + The maximum number of tasks BitBake should run in parallel at any one + time. The OpenEmbedded build system automatically configures this + variable to be equal to the number of cores on the build system. For + example, a system with a dual core processor that also uses + hyper-threading causes the ``BB_NUMBER_THREADS`` variable to default + to "4". + + For single socket systems (i.e. one CPU), you should not have to + override this variable to gain optimal parallelism during builds. + However, if you have very large systems that employ multiple physical + CPUs, you might want to make sure the ``BB_NUMBER_THREADS`` variable + is not set higher than "20". + + For more information on speeding up builds, see the + ":ref:`dev-manual/dev-manual-common-tasks:speeding up a build`" + section in the Yocto Project Development Tasks Manual. + + BB_SERVER_TIMEOUT + Specifies the time (in seconds) after which to unload the BitBake + server due to inactivity. Set ``BB_SERVER_TIMEOUT`` to determine how + long the BitBake server stays resident between invocations. + + For example, the following statement in your ``local.conf`` file + instructs the server to be unloaded after 20 seconds of inactivity: + :: + + BB_SERVER_TIMEOUT = "20" + + If you want the server to never be unloaded, + set ``BB_SERVER_TIMEOUT`` to "-1". + + BBCLASSEXTEND + Allows you to extend a recipe so that it builds variants of the + software. Common variants for recipes exist such as "natives" like + ``quilt-native``, which is a copy of Quilt built to run on the build + system; "crosses" such as ``gcc-cross``, which is a compiler built to + run on the build machine but produces binaries that run on the target + :term:`MACHINE`; "nativesdk", which targets the SDK + machine instead of ``MACHINE``; and "mulitlibs" in the form + "``multilib:``\ multilib_name". + + To build a different variant of the recipe with a minimal amount of + code, it usually is as simple as adding the following to your recipe: + :: + + BBCLASSEXTEND =+ "native nativesdk" + BBCLASSEXTEND =+ "multilib:multilib_name" + + .. note:: + + Internally, the ``BBCLASSEXTEND`` mechanism generates recipe + variants by rewriting variable values and applying overrides such + as ``_class-native``. For example, to generate a native version of + a recipe, a :term:`DEPENDS` on "foo" is rewritten + to a ``DEPENDS`` on "foo-native". + + Even when using ``BBCLASSEXTEND``, the recipe is only parsed once. + Parsing once adds some limitations. For example, it is not + possible to include a different file depending on the variant, + since ``include`` statements are processed when the recipe is + parsed. + + BBFILE_COLLECTIONS + Lists the names of configured layers. These names are used to find + the other ``BBFILE_*`` variables. Typically, each layer will append + its name to this variable in its ``conf/layer.conf`` file. + + BBFILE_PATTERN + Variable that expands to match files from + :term:`BBFILES` in a particular layer. This variable + is used in the ``conf/layer.conf`` file and must be suffixed with the + name of the specific layer (e.g. ``BBFILE_PATTERN_emenlow``). + + BBFILE_PRIORITY + Assigns the priority for recipe files in each layer. + + This variable is useful in situations where the same recipe appears + in more than one layer. Setting this variable allows you to + prioritize a layer against other layers that contain the same recipe + - effectively letting you control the precedence for the multiple + layers. The precedence established through this variable stands + regardless of a recipe's version (:term:`PV` variable). For + example, a layer that has a recipe with a higher ``PV`` value but for + which the ``BBFILE_PRIORITY`` is set to have a lower precedence still + has a lower precedence. + + A larger value for the ``BBFILE_PRIORITY`` variable results in a + higher precedence. For example, the value 6 has a higher precedence + than the value 5. If not specified, the ``BBFILE_PRIORITY`` variable + is set based on layer dependencies (see the ``LAYERDEPENDS`` variable + for more information. The default priority, if unspecified for a + layer with no dependencies, is the lowest defined priority + 1 (or 1 + if no priorities are defined). + + .. tip:: + + You can use the command + bitbake-layers show-layers + to list all configured layers along with their priorities. + + BBFILES + A space-separated list of recipe files BitBake uses to build + software. + + When specifying recipe files, you can pattern match using Python's + `glob `_ syntax. + For details on the syntax, see the documentation by following the + previous link. + + BBFILES_DYNAMIC + Activates content when identified layers are present. You identify + the layers by the collections that the layers define. + + Use the ``BBFILES_DYNAMIC`` variable to avoid ``.bbappend`` files + whose corresponding ``.bb`` file is in a layer that attempts to + modify other layers through ``.bbappend`` but does not want to + introduce a hard dependency on those other layers. + + Use the following form for ``BBFILES_DYNAMIC``: + collection_name:filename_pattern The following example identifies two + collection names and two filename patterns: + :: + + BBFILES_DYNAMIC += " \ + clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \ + core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \ + " + + This next example shows an error message that occurs because invalid + entries are found, which cause parsing to abort: + :: + + ERROR: BBFILES_DYNAMIC entries must be of the form :, not: + /work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend + /work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend + + BBINCLUDELOGS + Variable that controls how BitBake displays logs on build failure. + + BBINCLUDELOGS_LINES + If :term:`BBINCLUDELOGS` is set, specifies the + maximum number of lines from the task log file to print when + reporting a failed task. If you do not set ``BBINCLUDELOGS_LINES``, + the entire log is printed. + + BBLAYERS + Lists the layers to enable during the build. This variable is defined + in the ``bblayers.conf`` configuration file in the :term:`Build Directory`. + Here is an example: + :: + + BBLAYERS = " \ + /home/scottrif/poky/meta \ /home/scottrif/poky/meta-poky \ + /home/scottrif/poky/meta-yocto-bsp \ + /home/scottrif/poky/meta-mykernel \ + " + + This example enables four layers, one of which is a custom, + user-defined layer named ``meta-mykernel``. + + BBMASK + Prevents BitBake from processing recipes and recipe append files. + + You can use the ``BBMASK`` variable to "hide" these ``.bb`` and + ``.bbappend`` files. BitBake ignores any recipe or recipe append + files that match any of the expressions. It is as if BitBake does not + see them at all. Consequently, matching files are not parsed or + otherwise used by BitBake. + + The values you provide are passed to Python's regular expression + compiler. Consequently, the syntax follows Python's Regular + Expression (re) syntax. The expressions are compared against the full + paths to the files. For complete syntax information, see Python's + documentation at http://docs.python.org/3/library/re.html#re. + + The following example uses a complete regular expression to tell + BitBake to ignore all recipe and recipe append files in the + ``meta-ti/recipes-misc/`` directory: + :: + + BBMASK = "meta-ti/recipes-misc/" + + If you want to mask out multiple directories or recipes, you can + specify multiple regular expression fragments. This next example + masks out multiple directories and individual recipes: :: + + BBMASK += "/meta-ti/recipes-misc/ meta-ti/recipes-ti/packagegroup/" + BBMASK += "/meta-oe/recipes-support/" + BBMASK += "/meta-foo/.*/openldap" + BBMASK += "opencv.*\.bbappend" + BBMASK += "lzma" + + .. note:: + + When specifying a directory name, use the trailing slash character + to ensure you match just that directory name. + + BBMULTICONFIG + Specifies each additional separate configuration when you are + building targets with multiple configurations. Use this variable in + your ``conf/local.conf`` configuration file. Specify a + multiconfigname for each configuration file you are using. For + example, the following line specifies three configuration files: + :: + + BBMULTICONFIG = "configA configB configC" + + Each configuration file you + use must reside in the :term:`Build Directory` + ``conf/multiconfig`` directory (e.g. + build_directory\ ``/conf/multiconfig/configA.conf``). + + For information on how to use ``BBMULTICONFIG`` in an environment + that supports building targets with multiple configurations, see the + ":ref:`dev-building-images-for-multiple-targets-using-multiple-configurations`" + section in the Yocto Project Development Tasks Manual. + + BBPATH + Used by BitBake to locate ``.bbclass`` and configuration files. This + variable is analogous to the ``PATH`` variable. + + .. note:: + + If you run BitBake from a directory outside of the + Build Directory + , you must be sure to set + BBPATH + to point to the Build Directory. Set the variable as you would any + environment variable and then run BitBake: + :: + + $ BBPATH = "build_directory" + $ export BBPATH + $ bitbake target + + + BBSERVER + If defined in the BitBake environment, ``BBSERVER`` points to the + BitBake remote server. + + Use the following format to export the variable to the BitBake + environment: + :: + + export BBSERVER=localhost:$port + + By default, ``BBSERVER`` also appears in + :term:`bitbake:BB_HASHBASE_WHITELIST`. + Consequently, ``BBSERVER`` is excluded from checksum and dependency + data. + + BINCONFIG + When inheriting the + :ref:`binconfig-disabled ` class, + this variable specifies binary configuration scripts to disable in + favor of using ``pkg-config`` to query the information. The + ``binconfig-disabled`` class will modify the specified scripts to + return an error so that calls to them can be easily found and + replaced. + + To add multiple scripts, separate them by spaces. Here is an example + from the ``libpng`` recipe: + :: + + BINCONFIG = "${bindir}/libpng-config ${bindir}/libpng16-config" + + BINCONFIG_GLOB + When inheriting the :ref:`binconfig ` class, + this variable specifies a wildcard for configuration scripts that + need editing. The scripts are edited to correct any paths that have + been set up during compilation so that they are correct for use when + installed into the sysroot and called by the build processes of other + recipes. + + .. note:: + + The + BINCONFIG_GLOB + variable uses + shell globbing + , which is recognition and expansion of wildcards during pattern + matching. Shell globbing is very similar to + fnmatch + and + glob + . + + For more information on how this variable works, see + ``meta/classes/binconfig.bbclass`` in the :term:`Source Directory`. + You can also find general + information on the class in the + ":ref:`binconfig.bbclass `" section. + + BP + The base recipe name and version but without any special recipe name + suffix (i.e. ``-native``, ``lib64-``, and so forth). ``BP`` is + comprised of the following: + :: + + ${BPN}-${PV} + + BPN + This variable is a version of the :term:`PN` variable with + common prefixes and suffixes removed, such as ``nativesdk-``, + ``-cross``, ``-native``, and multilib's ``lib64-`` and ``lib32-``. + The exact lists of prefixes and suffixes removed are specified by the + :term:`MLPREFIX` and + :term:`SPECIAL_PKGSUFFIX` variables, + respectively. + + BUGTRACKER + Specifies a URL for an upstream bug tracking website for a recipe. + The OpenEmbedded build system does not use this variable. Rather, the + variable is a useful pointer in case a bug in the software being + built needs to be manually reported. + + BUILD_ARCH + Specifies the architecture of the build host (e.g. ``i686``). The + OpenEmbedded build system sets the value of ``BUILD_ARCH`` from the + machine name reported by the ``uname`` command. + + BUILD_AS_ARCH + Specifies the architecture-specific assembler flags for the build + host. By default, the value of ``BUILD_AS_ARCH`` is empty. + + BUILD_CC_ARCH + Specifies the architecture-specific C compiler flags for the build + host. By default, the value of ``BUILD_CC_ARCH`` is empty. + + BUILD_CCLD + Specifies the linker command to be used for the build host when the C + compiler is being used as the linker. By default, ``BUILD_CCLD`` + points to GCC and passes as arguments the value of + :term:`BUILD_CC_ARCH`, assuming + ``BUILD_CC_ARCH`` is set. + + BUILD_CFLAGS + Specifies the flags to pass to the C compiler when building for the + build host. When building in the ``-native`` context, + :term:`CFLAGS` is set to the value of this variable by + default. + + BUILD_CPPFLAGS + Specifies the flags to pass to the C preprocessor (i.e. to both the C + and the C++ compilers) when building for the build host. When + building in the ``-native`` context, :term:`CPPFLAGS` + is set to the value of this variable by default. + + BUILD_CXXFLAGS + Specifies the flags to pass to the C++ compiler when building for the + build host. When building in the ``-native`` context, + :term:`CXXFLAGS` is set to the value of this variable + by default. + + BUILD_FC + Specifies the Fortran compiler command for the build host. By + default, ``BUILD_FC`` points to Gfortran and passes as arguments the + value of :term:`BUILD_CC_ARCH`, assuming + ``BUILD_CC_ARCH`` is set. + + BUILD_LD + Specifies the linker command for the build host. By default, + ``BUILD_LD`` points to the GNU linker (ld) and passes as arguments + the value of :term:`BUILD_LD_ARCH`, assuming + ``BUILD_LD_ARCH`` is set. + + BUILD_LD_ARCH + Specifies architecture-specific linker flags for the build host. By + default, the value of ``BUILD_LD_ARCH`` is empty. + + BUILD_LDFLAGS + Specifies the flags to pass to the linker when building for the build + host. When building in the ``-native`` context, + :term:`LDFLAGS` is set to the value of this variable + by default. + + BUILD_OPTIMIZATION + Specifies the optimization flags passed to the C compiler when + building for the build host or the SDK. The flags are passed through + the :term:`BUILD_CFLAGS` and + :term:`BUILDSDK_CFLAGS` default values. + + The default value of the ``BUILD_OPTIMIZATION`` variable is "-O2 + -pipe". + + BUILD_OS + Specifies the operating system in use on the build host (e.g. + "linux"). The OpenEmbedded build system sets the value of + ``BUILD_OS`` from the OS reported by the ``uname`` command - the + first word, converted to lower-case characters. + + BUILD_PREFIX + The toolchain binary prefix used for native recipes. The OpenEmbedded + build system uses the ``BUILD_PREFIX`` value to set the + :term:`TARGET_PREFIX` when building for + ``native`` recipes. + + BUILD_STRIP + Specifies the command to be used to strip debugging symbols from + binaries produced for the build host. By default, ``BUILD_STRIP`` + points to + ``${``\ :term:`BUILD_PREFIX`\ ``}strip``. + + BUILD_SYS + Specifies the system, including the architecture and the operating + system, to use when building for the build host (i.e. when building + ``native`` recipes). + + The OpenEmbedded build system automatically sets this variable based + on :term:`BUILD_ARCH`, + :term:`BUILD_VENDOR`, and + :term:`BUILD_OS`. You do not need to set the + ``BUILD_SYS`` variable yourself. + + BUILD_VENDOR + Specifies the vendor name to use when building for the build host. + The default value is an empty string (""). + + BUILDDIR + Points to the location of the :term:`Build Directory`. + You can define this directory indirectly through the + ````` <#structure-core-script>`__ script by passing in a Build + Directory path when you run the script. If you run the script and do + not provide a Build Directory path, the ``BUILDDIR`` defaults to + ``build`` in the current directory. + + BUILDHISTORY_COMMIT + When inheriting the :ref:`buildhistory ` + class, this variable specifies whether or not to commit the build + history output in a local Git repository. If set to "1", this local + repository will be maintained automatically by the ``buildhistory`` + class and a commit will be created on every build for changes to each + top-level subdirectory of the build history output (images, packages, + and sdk). If you want to track changes to build history over time, + you should set this value to "1". + + By default, the ``buildhistory`` class does not commit the build + history output in a local Git repository: + :: + + BUILDHISTORY_COMMIT ?= "0" + + BUILDHISTORY_COMMIT_AUTHOR + When inheriting the :ref:`buildhistory ` + class, this variable specifies the author to use for each Git commit. + In order for the ``BUILDHISTORY_COMMIT_AUTHOR`` variable to work, the + :term:`BUILDHISTORY_COMMIT` variable must + be set to "1". + + Git requires that the value you provide for the + ``BUILDHISTORY_COMMIT_AUTHOR`` variable takes the form of "name + email@host". Providing an email address or host that is not valid + does not produce an error. + + By default, the ``buildhistory`` class sets the variable as follows: + :: + + BUILDHISTORY_COMMIT_AUTHOR ?= "buildhistory " + + BUILDHISTORY_DIR + When inheriting the :ref:`buildhistory ` + class, this variable specifies the directory in which build history + information is kept. For more information on how the variable works, + see the ``buildhistory.class``. + + By default, the ``buildhistory`` class sets the directory as follows: + :: + + BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory" + + BUILDHISTORY_FEATURES + When inheriting the :ref:`buildhistory ` + class, this variable specifies the build history features to be + enabled. For more information on how build history works, see the + ":ref:`dev-manual/dev-manual-common-tasks:maintaining build output quality`" + section in the Yocto Project Development Tasks Manual. + + You can specify these features in the form of a space-separated list: + + - *image:* Analysis of the contents of images, which includes the + list of installed packages among other things. + + - *package:* Analysis of the contents of individual packages. + + - *sdk:* Analysis of the contents of the software development kit + (SDK). + + - *task:* Save output file signatures for + :ref:`shared state ` + (sstate) tasks. + This saves one file per task and lists the SHA-256 checksums for + each file staged (i.e. the output of the task). + + By default, the ``buildhistory`` class enables the following + features: + :: + + BUILDHISTORY_FEATURES ?= "image package sdk" + + BUILDHISTORY_IMAGE_FILES + When inheriting the :ref:`buildhistory ` + class, this variable specifies a list of paths to files copied from + the image contents into the build history directory under an + "image-files" directory in the directory for the image, so that you + can track the contents of each file. The default is to copy + ``/etc/passwd`` and ``/etc/group``, which allows you to monitor for + changes in user and group entries. You can modify the list to include + any file. Specifying an invalid path does not produce an error. + Consequently, you can include files that might not always be present. + + By default, the ``buildhistory`` class provides paths to the + following files: + :: + + BUILDHISTORY_IMAGE_FILES ?= "/etc/passwd /etc/group" + + BUILDHISTORY_PUSH_REPO + When inheriting the :ref:`buildhistory ` + class, this variable optionally specifies a remote repository to + which build history pushes Git changes. In order for + ``BUILDHISTORY_PUSH_REPO`` to work, + :term:`BUILDHISTORY_COMMIT` must be set to + "1". + + The repository should correspond to a remote address that specifies a + repository as understood by Git, or alternatively to a remote name + that you have set up manually using ``git remote`` within the local + repository. + + By default, the ``buildhistory`` class sets the variable as follows: + :: + + BUILDHISTORY_PUSH_REPO ?= "" + + BUILDSDK_CFLAGS + Specifies the flags to pass to the C compiler when building for the + SDK. When building in the ``nativesdk-`` context, + :term:`CFLAGS` is set to the value of this variable by + default. + + BUILDSDK_CPPFLAGS + Specifies the flags to pass to the C pre-processor (i.e. to both the + C and the C++ compilers) when building for the SDK. When building in + the ``nativesdk-`` context, :term:`CPPFLAGS` is set + to the value of this variable by default. + + BUILDSDK_CXXFLAGS + Specifies the flags to pass to the C++ compiler when building for the + SDK. When building in the ``nativesdk-`` context, + :term:`CXXFLAGS` is set to the value of this variable + by default. + + BUILDSDK_LDFLAGS + Specifies the flags to pass to the linker when building for the SDK. + When building in the ``nativesdk-`` context, + :term:`LDFLAGS` is set to the value of this variable + by default. + + BUILDSTATS_BASE + Points to the location of the directory that holds build statistics + when you use and enable the + :ref:`buildstats ` class. The + ``BUILDSTATS_BASE`` directory defaults to + ``${``\ :term:`TMPDIR`\ ``}/buildstats/``. + + BUSYBOX_SPLIT_SUID + For the BusyBox recipe, specifies whether to split the output + executable file into two parts: one for features that require + ``setuid root``, and one for the remaining features (i.e. those that + do not require ``setuid root``). + + The ``BUSYBOX_SPLIT_SUID`` variable defaults to "1", which results in + splitting the output executable file. Set the variable to "0" to get + a single output executable file. + + CACHE + Specifies the directory BitBake uses to store a cache of the + :term:`Metadata` so it does not need to be parsed every time + BitBake is started. + + CC + The minimal command and arguments used to run the C compiler. + + CFLAGS + Specifies the flags to pass to the C compiler. This variable is + exported to an environment variable and thus made visible to the + software being built during the compilation step. + + Default initialization for ``CFLAGS`` varies depending on what is + being built: + + - :term:`TARGET_CFLAGS` when building for the + target + + - :term:`BUILD_CFLAGS` when building for the + build host (i.e. ``-native``) + + - :term:`BUILDSDK_CFLAGS` when building for + an SDK (i.e. ``nativesdk-``) + + CLASSOVERRIDE + An internal variable specifying the special class override that + should currently apply (e.g. "class-target", "class-native", and so + forth). The classes that use this variable (e.g. + :ref:`native `, + :ref:`nativesdk `, and so forth) set the + variable to appropriate values. + + .. note:: + + CLASSOVERRIDE + gets its default "class-target" value from the + bitbake.conf + file. + + As an example, the following override allows you to install extra + files, but only when building for the target: + :: + + do_install_append_class-target() { + install my-extra-file ${D}${sysconfdir} + } + + Here is an example where ``FOO`` is set to + "native" when building for the build host, and to "other" when not + building for the build host: + :: + + FOO_class-native = "native" + FOO = "other" + + The underlying mechanism behind ``CLASSOVERRIDE`` is simply + that it is included in the default value of + :term:`OVERRIDES`. + + CLEANBROKEN + If set to "1" within a recipe, ``CLEANBROKEN`` specifies that the + ``make clean`` command does not work for the software being built. + Consequently, the OpenEmbedded build system will not try to run + ``make clean`` during the :ref:`ref-tasks-configure` + task, which is the default behavior. + + COMBINED_FEATURES + Provides a list of hardware features that are enabled in both + :term:`MACHINE_FEATURES` and + :term:`DISTRO_FEATURES`. This select list of + features contains features that make sense to be controlled both at + the machine and distribution configuration level. For example, the + "bluetooth" feature requires hardware support but should also be + optional at the distribution level, in case the hardware supports + Bluetooth but you do not ever intend to use it. + + COMMON_LICENSE_DIR + Points to ``meta/files/common-licenses`` in the + :term:`Source Directory`, which is where generic license + files reside. + + COMPATIBLE_HOST + A regular expression that resolves to one or more hosts (when the + recipe is native) or one or more targets (when the recipe is + non-native) with which a recipe is compatible. The regular expression + is matched against :term:`HOST_SYS`. You can use the + variable to stop recipes from being built for classes of systems with + which the recipes are not compatible. Stopping these builds is + particularly useful with kernels. The variable also helps to increase + parsing speed since the build system skips parsing recipes not + compatible with the current system. + + COMPATIBLE_MACHINE + A regular expression that resolves to one or more target machines + with which a recipe is compatible. The regular expression is matched + against :term:`MACHINEOVERRIDES`. You can use + the variable to stop recipes from being built for machines with which + the recipes are not compatible. Stopping these builds is particularly + useful with kernels. The variable also helps to increase parsing + speed since the build system skips parsing recipes not compatible + with the current machine. + + COMPLEMENTARY_GLOB + Defines wildcards to match when installing a list of complementary + packages for all the packages explicitly (or implicitly) installed in + an image. + + .. note:: + + The + COMPLEMENTARY_GLOB + variable uses Unix filename pattern matching ( + fnmatch + ), which is similar to the Unix style pathname pattern expansion ( + glob + ). + + The resulting list of complementary packages is associated with an + item that can be added to + :term:`IMAGE_FEATURES`. An example usage of + this is the "dev-pkgs" item that when added to ``IMAGE_FEATURES`` + will install -dev packages (containing headers and other development + files) for every package in the image. + + To add a new feature item pointing to a wildcard, use a variable flag + to specify the feature item name and use the value to specify the + wildcard. Here is an example: + :: + + COMPLEMENTARY_GLOB[dev-pkgs] = '*-dev' + + COMPONENTS_DIR + Stores sysroot components for each recipe. The OpenEmbedded build + system uses ``COMPONENTS_DIR`` when constructing recipe-specific + sysroots for other recipes. + + The default is + "``${``\ :term:`STAGING_DIR`\ ``}-components``." + (i.e. + "``${``\ :term:`TMPDIR`\ ``}/sysroots-components``"). + + CONF_VERSION + Tracks the version of the local configuration file (i.e. + ``local.conf``). The value for ``CONF_VERSION`` increments each time + ``build/conf/`` compatibility changes. + + CONFFILES + Identifies editable or configurable files that are part of a package. + If the Package Management System (PMS) is being used to update + packages on the target system, it is possible that configuration + files you have changed after the original installation and that you + now want to remain unchanged are overwritten. In other words, + editable files might exist in the package that you do not want reset + as part of the package update process. You can use the ``CONFFILES`` + variable to list the files in the package that you wish to prevent + the PMS from overwriting during this update process. + + To use the ``CONFFILES`` variable, provide a package name override + that identifies the resulting package. Then, provide a + space-separated list of files. Here is an example: + :: + + CONFFILES_${PN} += "${sysconfdir}/file1 \ + ${sysconfdir}/file2 ${sysconfdir}/file3" + + A relationship exists between the ``CONFFILES`` and ``FILES`` + variables. The files listed within ``CONFFILES`` must be a subset of + the files listed within ``FILES``. Because the configuration files + you provide with ``CONFFILES`` are simply being identified so that + the PMS will not overwrite them, it makes sense that the files must + already be included as part of the package through the ``FILES`` + variable. + + .. note:: + + When specifying paths as part of the + CONFFILES + variable, it is good practice to use appropriate path variables. + For example, + ${sysconfdir} + rather than + /etc + or + ${bindir} + rather than + /usr/bin + . You can find a list of these variables at the top of the + meta/conf/bitbake.conf + file in the + Source Directory + . + + CONFIG_INITRAMFS_SOURCE + Identifies the initial RAM filesystem (initramfs) source files. The + OpenEmbedded build system receives and uses this kernel Kconfig + variable as an environment variable. By default, the variable is set + to null (""). + + The ``CONFIG_INITRAMFS_SOURCE`` can be either a single cpio archive + with a ``.cpio`` suffix or a space-separated list of directories and + files for building the initramfs image. A cpio archive should contain + a filesystem archive to be used as an initramfs image. Directories + should contain a filesystem layout to be included in the initramfs + image. Files should contain entries according to the format described + by the ``usr/gen_init_cpio`` program in the kernel tree. + + If you specify multiple directories and files, the initramfs image + will be the aggregate of all of them. + + For information on creating an initramfs, see the + ":ref:`building-an-initramfs-image`" section + in the Yocto Project Development Tasks Manual. + + CONFIG_SITE + A list of files that contains ``autoconf`` test results relevant to + the current build. This variable is used by the Autotools utilities + when running ``configure``. + + CONFIGURE_FLAGS + The minimal arguments for GNU configure. + + CONFLICT_DISTRO_FEATURES + When inheriting the + :ref:`distro_features_check ` + class, this variable identifies distribution features that would be + in conflict should the recipe be built. In other words, if the + ``CONFLICT_DISTRO_FEATURES`` variable lists a feature that also + appears in ``DISTRO_FEATURES`` within the current configuration, an + error occurs and the build stops. + + COPYLEFT_LICENSE_EXCLUDE + A space-separated list of licenses to exclude from the source + archived by the :ref:`archiver ` class. In + other words, if a license in a recipe's + :term:`LICENSE` value is in the value of + ``COPYLEFT_LICENSE_EXCLUDE``, then its source is not archived by the + class. + + .. note:: + + The + COPYLEFT_LICENSE_EXCLUDE + variable takes precedence over the + COPYLEFT_LICENSE_INCLUDE + variable. + + The default value, which is "CLOSED Proprietary", for + ``COPYLEFT_LICENSE_EXCLUDE`` is set by the + :ref:`copyleft_filter ` class, which + is inherited by the ``archiver`` class. + + COPYLEFT_LICENSE_INCLUDE + A space-separated list of licenses to include in the source archived + by the :ref:`archiver ` class. In other + words, if a license in a recipe's :term:`LICENSE` + value is in the value of ``COPYLEFT_LICENSE_INCLUDE``, then its + source is archived by the class. + + The default value is set by the + :ref:`copyleft_filter ` class, which + is inherited by the ``archiver`` class. The default value includes + "GPL*", "LGPL*", and "AGPL*". + + COPYLEFT_PN_EXCLUDE + A list of recipes to exclude in the source archived by the + :ref:`archiver ` class. The + ``COPYLEFT_PN_EXCLUDE`` variable overrides the license inclusion and + exclusion caused through the + :term:`COPYLEFT_LICENSE_INCLUDE` and + :term:`COPYLEFT_LICENSE_EXCLUDE` + variables, respectively. + + The default value, which is "" indicating to not explicitly exclude + any recipes by name, for ``COPYLEFT_PN_EXCLUDE`` is set by the + :ref:`copyleft_filter ` class, which + is inherited by the ``archiver`` class. + + COPYLEFT_PN_INCLUDE + A list of recipes to include in the source archived by the + :ref:`archiver ` class. The + ``COPYLEFT_PN_INCLUDE`` variable overrides the license inclusion and + exclusion caused through the + :term:`COPYLEFT_LICENSE_INCLUDE` and + :term:`COPYLEFT_LICENSE_EXCLUDE` + variables, respectively. + + The default value, which is "" indicating to not explicitly include + any recipes by name, for ``COPYLEFT_PN_INCLUDE`` is set by the + :ref:`copyleft_filter ` class, which + is inherited by the ``archiver`` class. + + COPYLEFT_RECIPE_TYPES + A space-separated list of recipe types to include in the source + archived by the :ref:`archiver ` class. + Recipe types are ``target``, ``native``, ``nativesdk``, ``cross``, + ``crosssdk``, and ``cross-canadian``. + + The default value, which is "target*", for ``COPYLEFT_RECIPE_TYPES`` + is set by the :ref:`copyleft_filter ` + class, which is inherited by the ``archiver`` class. + + COPY_LIC_DIRS + If set to "1" along with the + :term:`COPY_LIC_MANIFEST` variable, the + OpenEmbedded build system copies into the image the license files, + which are located in ``/usr/share/common-licenses``, for each + package. The license files are placed in directories within the image + itself during build time. + + .. note:: + + The + COPY_LIC_DIRS + does not offer a path for adding licenses for newly installed + packages to an image, which might be most suitable for read-only + filesystems that cannot be upgraded. See the + LICENSE_CREATE_PACKAGE + variable for additional information. You can also reference the " + Providing License Text + " section in the Yocto Project Development Tasks Manual for + information on providing license text. + + COPY_LIC_MANIFEST + If set to "1", the OpenEmbedded build system copies the license + manifest for the image to + ``/usr/share/common-licenses/license.manifest`` within the image + itself during build time. + + .. note:: + + The + COPY_LIC_MANIFEST + does not offer a path for adding licenses for newly installed + packages to an image, which might be most suitable for read-only + filesystems that cannot be upgraded. See the + LICENSE_CREATE_PACKAGE + variable for additional information. You can also reference the " + Providing License Text + " section in the Yocto Project Development Tasks Manual for + information on providing license text. + + CORE_IMAGE_EXTRA_INSTALL + Specifies the list of packages to be added to the image. You should + only set this variable in the ``local.conf`` configuration file found + in the :term:`Build Directory`. + + This variable replaces ``POKY_EXTRA_INSTALL``, which is no longer + supported. + + COREBASE + Specifies the parent directory of the OpenEmbedded-Core Metadata + layer (i.e. ``meta``). + + It is an important distinction that ``COREBASE`` points to the parent + of this layer and not the layer itself. Consider an example where you + have cloned the Poky Git repository and retained the ``poky`` name + for your local copy of the repository. In this case, ``COREBASE`` + points to the ``poky`` folder because it is the parent directory of + the ``poky/meta`` layer. + + COREBASE_FILES + Lists files from the :term:`COREBASE` directory that + should be copied other than the layers listed in the + ``bblayers.conf`` file. The ``COREBASE_FILES`` variable exists for + the purpose of copying metadata from the OpenEmbedded build system + into the extensible SDK. + + Explicitly listing files in ``COREBASE`` is needed because it + typically contains build directories and other files that should not + normally be copied into the extensible SDK. Consequently, the value + of ``COREBASE_FILES`` is used in order to only copy the files that + are actually needed. + + CPP + The minimal command and arguments used to run the C preprocessor. + + CPPFLAGS + Specifies the flags to pass to the C pre-processor (i.e. to both the + C and the C++ compilers). This variable is exported to an environment + variable and thus made visible to the software being built during the + compilation step. + + Default initialization for ``CPPFLAGS`` varies depending on what is + being built: + + - :term:`TARGET_CPPFLAGS` when building for + the target + + - :term:`BUILD_CPPFLAGS` when building for the + build host (i.e. ``-native``) + + - :term:`BUILDSDK_CPPFLAGS` when building + for an SDK (i.e. ``nativesdk-``) + + CROSS_COMPILE + The toolchain binary prefix for the target tools. The + ``CROSS_COMPILE`` variable is the same as the + :term:`TARGET_PREFIX` variable. + + .. note:: + + The OpenEmbedded build system sets the + CROSS_COMPILE + variable only in certain contexts (e.g. when building for kernel + and kernel module recipes). + + CVSDIR + The directory in which files checked out under the CVS system are + stored. + + CXX + The minimal command and arguments used to run the C++ compiler. + + CXXFLAGS + Specifies the flags to pass to the C++ compiler. This variable is + exported to an environment variable and thus made visible to the + software being built during the compilation step. + + Default initialization for ``CXXFLAGS`` varies depending on what is + being built: + + - :term:`TARGET_CXXFLAGS` when building for + the target + + - :term:`BUILD_CXXFLAGS` when building for the + build host (i.e. ``-native``) + + - :term:`BUILDSDK_CXXFLAGS` when building + for an SDK (i.e. ``nativesdk-``) + + D + The destination directory. The location in the :term:`Build Directory` + where components are installed by the + :ref:`ref-tasks-install` task. This location defaults + to: + :: + + ${WORKDIR}/image + + .. note:: + + Tasks that read from or write to this directory should run under + fakeroot + . + + DATE + The date the build was started. Dates appear using the year, month, + and day (YMD) format (e.g. "20150209" for February 9th, 2015). + + DATETIME + The date and time on which the current build started. The format is + suitable for timestamps. + + DEBIAN_NOAUTONAME + When the :ref:`debian ` class is inherited, + which is the default behavior, ``DEBIAN_NOAUTONAME`` specifies a + particular package should not be renamed according to Debian library + package naming. You must use the package name as an override when you + set this variable. Here is an example from the ``fontconfig`` recipe: + :: + + DEBIAN_NOAUTONAME_fontconfig-utils = "1" + + DEBIANNAME + When the :ref:`debian ` class is inherited, + which is the default behavior, ``DEBIANNAME`` allows you to override + the library name for an individual package. Overriding the library + name in these cases is rare. You must use the package name as an + override when you set this variable. Here is an example from the + ``dbus`` recipe: + :: + + DEBIANNAME_${PN} = "dbus-1" + + DEBUG_BUILD + Specifies to build packages with debugging information. This + influences the value of the ``SELECTED_OPTIMIZATION`` variable. + + DEBUG_OPTIMIZATION + The options to pass in ``TARGET_CFLAGS`` and ``CFLAGS`` when + compiling a system for debugging. This variable defaults to "-O + -fno-omit-frame-pointer ${DEBUG_FLAGS} -pipe". + + DEFAULT_PREFERENCE + Specifies a weak bias for recipe selection priority. + + The most common usage of this is variable is to set it to "-1" within + a recipe for a development version of a piece of software. Using the + variable in this way causes the stable version of the recipe to build + by default in the absence of ``PREFERRED_VERSION`` being used to + build the development version. + + .. note:: + + The bias provided by + DEFAULT_PREFERENCE + is weak and is overridden by + BBFILE_PRIORITY + if that variable is different between two layers that contain + different versions of the same recipe. + + DEFAULTTUNE + The default CPU and Application Binary Interface (ABI) tunings (i.e. + the "tune") used by the OpenEmbedded build system. The + ``DEFAULTTUNE`` helps define + :term:`TUNE_FEATURES`. + + The default tune is either implicitly or explicitly set by the + machine (:term:`MACHINE`). However, you can override + the setting using available tunes as defined with + :term:`AVAILTUNES`. + + DEPENDS + Lists a recipe's build-time dependencies. These are dependencies on + other recipes whose contents (e.g. headers and shared libraries) are + needed by the recipe at build time. + + As an example, consider a recipe ``foo`` that contains the following + assignment: + :: + + DEPENDS = "bar" + + The practical effect of the previous + assignment is that all files installed by bar will be available in + the appropriate staging sysroot, given by the + :term:`STAGING_DIR* ` variables, by the time the + :ref:`ref-tasks-configure` task for ``foo`` runs. + This mechanism is implemented by having ``do_configure`` depend on + the :ref:`ref-tasks-populate_sysroot` task of + each recipe listed in ``DEPENDS``, through a + ``[``\ :ref:`deptask `\ ``]`` + declaration in the :ref:`base ` class. + + .. note:: + + It seldom is necessary to reference, for example, + STAGING_DIR_HOST + explicitly. The standard classes and build-related variables are + configured to automatically use the appropriate staging sysroots. + + As another example, ``DEPENDS`` can also be used to add utilities + that run on the build machine during the build. For example, a recipe + that makes use of a code generator built by the recipe ``codegen`` + might have the following: + :: + + DEPENDS = "codegen-native" + + For more + information, see the :ref:`native ` class and + the :term:`EXTRANATIVEPATH` variable. + + .. note:: + + - ``DEPENDS`` is a list of recipe names. Or, to be more precise, + it is a list of :term:`PROVIDES` names, which + usually match recipe names. Putting a package name such as + "foo-dev" in ``DEPENDS`` does not make sense. Use "foo" + instead, as this will put files from all the packages that make + up ``foo``, which includes those from ``foo-dev``, into the + sysroot. + + - One recipe having another recipe in ``DEPENDS`` does not by + itself add any runtime dependencies between the packages + produced by the two recipes. However, as explained in the + ":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" + section in the Yocto Project Overview and Concepts Manual, + runtime dependencies will often be added automatically, meaning + ``DEPENDS`` alone is sufficient for most recipes. + + - Counterintuitively, ``DEPENDS`` is often necessary even for + recipes that install precompiled components. For example, if + ``libfoo`` is a precompiled library that links against + ``libbar``, then linking against ``libfoo`` requires both + ``libfoo`` and ``libbar`` to be available in the sysroot. + Without a ``DEPENDS`` from the recipe that installs ``libfoo`` + to the recipe that installs ``libbar``, other recipes might + fail to link against ``libfoo``. + + For information on runtime dependencies, see the + :term:`RDEPENDS` variable. You can also see the + ":ref:`Tasks `" and + ":ref:`Dependencies `" sections in the + BitBake User Manual for additional information on tasks and + dependencies. + + DEPLOY_DIR + Points to the general area that the OpenEmbedded build system uses to + place images, packages, SDKs, and other output files that are ready + to be used outside of the build system. By default, this directory + resides within the :term:`Build Directory` as + ``${TMPDIR}/deploy``. + + For more information on the structure of the Build Directory, see + ":ref:`ref-manual/ref-structure:the build directory - \`\`build/\`\``" section. + For more detail on the contents of the ``deploy`` directory, see the + ":ref:`Images `", ":ref:`Package + Feeds `", and + ":ref:`sdk-dev-environment`" sections all in the + Yocto Project Overview and Concepts Manual. + + DEPLOY_DIR_DEB + Points to the area that the OpenEmbedded build system uses to place + Debian packages that are ready to be used outside of the build + system. This variable applies only when + :term:`PACKAGE_CLASSES` contains + "package_deb". + + The BitBake configuration file initially defines the + ``DEPLOY_DIR_DEB`` variable as a sub-folder of + :term:`DEPLOY_DIR`: + :: + + DEPLOY_DIR_DEB = "${DEPLOY_DIR}/deb" + + The :ref:`package_deb ` class uses the + ``DEPLOY_DIR_DEB`` variable to make sure the + :ref:`ref-tasks-package_write_deb` task + writes Debian packages into the appropriate folder. For more + information on how packaging works, see the ":ref:`Package + Feeds `" section + in the Yocto Project Overview and Concepts Manual. + + DEPLOY_DIR_IMAGE + Points to the area that the OpenEmbedded build system uses to place + images and other associated output files that are ready to be + deployed onto the target machine. The directory is machine-specific + as it contains the ``${MACHINE}`` name. By default, this directory + resides within the :term:`Build Directory` as + ``${DEPLOY_DIR}/images/${MACHINE}/``. + + For more information on the structure of the Build Directory, see + ":ref:`ref-manual/ref-structure:the build directory - \`\`build/\`\``" section. + For more detail on the contents of the ``deploy`` directory, see the + ":ref:`Images `" and + ":ref:`sdk-dev-environment`" sections both in + the Yocto Project Overview and Concepts Manual. + + DEPLOY_DIR_IPK + Points to the area that the OpenEmbedded build system uses to place + IPK packages that are ready to be used outside of the build system. + This variable applies only when + :term:`PACKAGE_CLASSES` contains + "package_ipk". + + The BitBake configuration file initially defines this variable as a + sub-folder of :term:`DEPLOY_DIR`: + :: + + DEPLOY_DIR_IPK = "${DEPLOY_DIR}/ipk" + + The :ref:`package_ipk ` class uses the + ``DEPLOY_DIR_IPK`` variable to make sure the + :ref:`ref-tasks-package_write_ipk` task + writes IPK packages into the appropriate folder. For more information + on how packaging works, see the ":ref:`Package + Feeds `" section + in the Yocto Project Overview and Concepts Manual. + + DEPLOY_DIR_RPM + Points to the area that the OpenEmbedded build system uses to place + RPM packages that are ready to be used outside of the build system. + This variable applies only when + :term:`PACKAGE_CLASSES` contains + "package_rpm". + + The BitBake configuration file initially defines this variable as a + sub-folder of :term:`DEPLOY_DIR`: + :: + + DEPLOY_DIR_RPM = "${DEPLOY_DIR}/rpm" + + The :ref:`package_rpm ` class uses the + ``DEPLOY_DIR_RPM`` variable to make sure the + :ref:`ref-tasks-package_write_rpm` task + writes RPM packages into the appropriate folder. For more information + on how packaging works, see the ":ref:`Package + Feeds `" section + in the Yocto Project Overview and Concepts Manual. + + DEPLOY_DIR_TAR + Points to the area that the OpenEmbedded build system uses to place + tarballs that are ready to be used outside of the build system. This + variable applies only when + :term:`PACKAGE_CLASSES` contains + "package_tar". + + The BitBake configuration file initially defines this variable as a + sub-folder of :term:`DEPLOY_DIR`: + :: + + DEPLOY_DIR_TAR = "${DEPLOY_DIR}/tar" + + The :ref:`package_tar ` class uses the + ``DEPLOY_DIR_TAR`` variable to make sure the + :ref:`ref-tasks-package_write_tar` task + writes TAR packages into the appropriate folder. For more information + on how packaging works, see the ":ref:`Package + Feeds `" section + in the Yocto Project Overview and Concepts Manual. + + DEPLOYDIR + When inheriting the :ref:`deploy ` class, the + ``DEPLOYDIR`` points to a temporary work area for deployed files that + is set in the ``deploy`` class as follows: + :: + + DEPLOYDIR = "${WORKDIR}/deploy-${:term:`PN`}" + + Recipes inheriting the ``deploy`` class should copy files to be + deployed into ``DEPLOYDIR``, and the class will take care of copying + them into :term:`DEPLOY_DIR_IMAGE` + afterwards. + + DESCRIPTION + The package description used by package managers. If not set, + ``DESCRIPTION`` takes the value of the :term:`SUMMARY` + variable. + + DISTRO + The short name of the distribution. For information on the long name + of the distribution, see the :term:`DISTRO_NAME` + variable. + + The ``DISTRO`` variable corresponds to a distribution configuration + file whose root name is the same as the variable's argument and whose + filename extension is ``.conf``. For example, the distribution + configuration file for the Poky distribution is named ``poky.conf`` + and resides in the ``meta-poky/conf/distro`` directory of the + :term:`Source Directory`. + + Within that ``poky.conf`` file, the ``DISTRO`` variable is set as + follows: + :: + + DISTRO = "poky" + + Distribution configuration files are located in a ``conf/distro`` + directory within the :term:`Metadata` that contains the + distribution configuration. The value for ``DISTRO`` must not contain + spaces, and is typically all lower-case. + + .. note:: + + If the + DISTRO + variable is blank, a set of default configurations are used, which + are specified within + meta/conf/distro/defaultsetup.conf + also in the Source Directory. + + DISTRO_CODENAME + Specifies a codename for the distribution being built. + + DISTRO_EXTRA_RDEPENDS + Specifies a list of distro-specific packages to add to all images. + This variable takes affect through ``packagegroup-base`` so the + variable only really applies to the more full-featured images that + include ``packagegroup-base``. You can use this variable to keep + distro policy out of generic images. As with all other distro + variables, you set this variable in the distro ``.conf`` file. + + DISTRO_EXTRA_RRECOMMENDS + Specifies a list of distro-specific packages to add to all images if + the packages exist. The packages might not exist or be empty (e.g. + kernel modules). The list of packages are automatically installed but + you can remove them. + + DISTRO_FEATURES + The software support you want in your distribution for various + features. You define your distribution features in the distribution + configuration file. + + In most cases, the presence or absence of a feature in + ``DISTRO_FEATURES`` is translated to the appropriate option supplied + to the configure script during the + :ref:`ref-tasks-configure` task for recipes that + optionally support the feature. For example, specifying "x11" in + ``DISTRO_FEATURES``, causes every piece of software built for the + target that can optionally support X11 to have its X11 support + enabled. + + Two more examples are Bluetooth and NFS support. For a more complete + list of features that ships with the Yocto Project and that you can + provide with this variable, see the "`Distro + Features <#ref-features-distro>`__" section. + + DISTRO_FEATURES_BACKFILL + Features to be added to ``DISTRO_FEATURES`` if not also present in + ``DISTRO_FEATURES_BACKFILL_CONSIDERED``. + + This variable is set in the ``meta/conf/bitbake.conf`` file. It is + not intended to be user-configurable. It is best to just reference + the variable to see which distro features are being backfilled for + all distro configurations. See the "`Feature + Backfilling <#ref-features-backfill>`__" section for more + information. + + DISTRO_FEATURES_BACKFILL_CONSIDERED + Features from ``DISTRO_FEATURES_BACKFILL`` that should not be + backfilled (i.e. added to ``DISTRO_FEATURES``) during the build. See + the "`Feature Backfilling <#ref-features-backfill>`__" section for + more information. + + DISTRO_FEATURES_DEFAULT + A convenience variable that gives you the default list of distro + features with the exception of any features specific to the C library + (``libc``). + + When creating a custom distribution, you might find it useful to be + able to reuse the default + :term:`DISTRO_FEATURES` options without the + need to write out the full set. Here is an example that uses + ``DISTRO_FEATURES_DEFAULT`` from a custom distro configuration file: + :: + + DISTRO_FEATURES ?= "${DISTRO_FEATURES_DEFAULT} myfeature" + + DISTRO_FEATURES_FILTER_NATIVE + Specifies a list of features that if present in the target + :term:`DISTRO_FEATURES` value should be + included in ``DISTRO_FEATURES`` when building native recipes. This + variable is used in addition to the features filtered using the + :term:`DISTRO_FEATURES_NATIVE` + variable. + + DISTRO_FEATURES_FILTER_NATIVESDK + Specifies a list of features that if present in the target + :term:`DISTRO_FEATURES` value should be + included in ``DISTRO_FEATURES`` when building nativesdk recipes. This + variable is used in addition to the features filtered using the + :term:`DISTRO_FEATURES_NATIVESDK` + variable. + + DISTRO_FEATURES_NATIVE + Specifies a list of features that should be included in + :term:`DISTRO_FEATURES` when building native + recipes. This variable is used in addition to the features filtered + using the + :term:`DISTRO_FEATURES_FILTER_NATIVE` + variable. + + DISTRO_FEATURES_NATIVESDK + Specifies a list of features that should be included in + :term:`DISTRO_FEATURES` when building + nativesdk recipes. This variable is used in addition to the features + filtered using the + :term:`DISTRO_FEATURES_FILTER_NATIVESDK` + variable. + + DISTRO_NAME + The long name of the distribution. For information on the short name + of the distribution, see the :term:`DISTRO` variable. + + The ``DISTRO_NAME`` variable corresponds to a distribution + configuration file whose root name is the same as the variable's + argument and whose filename extension is ``.conf``. For example, the + distribution configuration file for the Poky distribution is named + ``poky.conf`` and resides in the ``meta-poky/conf/distro`` directory + of the :term:`Source Directory`. + + Within that ``poky.conf`` file, the ``DISTRO_NAME`` variable is set + as follows: + :: + + DISTRO_NAME = "Poky (Yocto Project Reference Distro)" + + Distribution configuration files are located in a ``conf/distro`` + directory within the :term:`Metadata` that contains the + distribution configuration. + + .. note:: + + If the + DISTRO_NAME + variable is blank, a set of default configurations are used, which + are specified within + meta/conf/distro/defaultsetup.conf + also in the Source Directory. + + DISTRO_VERSION + The version of the distribution. + + DISTROOVERRIDES + A colon-separated list of overrides specific to the current + distribution. By default, this list includes the value of + :term:`DISTRO`. + + You can extend ``DISTROOVERRIDES`` to add extra overrides that should + apply to the distribution. + + The underlying mechanism behind ``DISTROOVERRIDES`` is simply that it + is included in the default value of + :term:`OVERRIDES`. + + DL_DIR + The central download directory used by the build process to store + downloads. By default, ``DL_DIR`` gets files suitable for mirroring + for everything except Git repositories. If you want tarballs of Git + repositories, use the + :term:`BB_GENERATE_MIRROR_TARBALLS` + variable. + + You can set this directory by defining the ``DL_DIR`` variable in the + ``conf/local.conf`` file. This directory is self-maintaining and you + should not have to touch it. By default, the directory is + ``downloads`` in the :term:`Build Directory`. + :: + + #DL_DIR ?= "${TOPDIR}/downloads" + + To specify a different download directory, + simply remove the comment from the line and provide your directory. + + During a first build, the system downloads many different source code + tarballs from various upstream projects. Downloading can take a + while, particularly if your network connection is slow. Tarballs are + all stored in the directory defined by ``DL_DIR`` and the build + system looks there first to find source tarballs. + + .. note:: + + When wiping and rebuilding, you can preserve this directory to + speed up this part of subsequent builds. + + You can safely share this directory between multiple builds on the + same development machine. For additional information on how the build + process gets source files when working behind a firewall or proxy + server, see this specific question in the + "`FAQ <#how-does-the-yocto-project-obtain-source-code-and-will-it-work-behind-my-firewall-or-proxy-server>`__" + chapter. You can also refer to the + ":yocto_wiki:`Working Behind a Network Proxy `" + Wiki page. + + DOC_COMPRESS + When inheriting the :ref:`compress_doc ` + class, this variable sets the compression policy used when the + OpenEmbedded build system compresses man pages and info pages. By + default, the compression method used is gz (gzip). Other policies + available are xz and bz2. + + For information on policies and on how to use this variable, see the + comments in the ``meta/classes/compress_doc.bbclass`` file. + + EFI_PROVIDER + When building bootable images (i.e. where ``hddimg``, ``iso``, or + ``wic.vmdk`` is in :term:`IMAGE_FSTYPES`), the + ``EFI_PROVIDER`` variable specifies the EFI bootloader to use. The + default is "grub-efi", but "systemd-boot" can be used instead. + + See the :ref:`systemd-boot ` and + :ref:`image-live ` classes for more + information. + + ENABLE_BINARY_LOCALE_GENERATION + Variable that controls which locales for ``glibc`` are generated + during the build (useful if the target device has 64Mbytes of RAM or + less). + + ERR_REPORT_DIR + When used with the :ref:`report-error ` + class, specifies the path used for storing the debug files created by + the :ref:`error reporting + tool `, which + allows you to submit build errors you encounter to a central + database. By default, the value of this variable is + ``${``\ :term:`LOG_DIR`\ ``}/error-report``. + + You can set ``ERR_REPORT_DIR`` to the path you want the error + reporting tool to store the debug files as follows in your + ``local.conf`` file: + :: + + ERR_REPORT_DIR = "path" + + ERROR_QA + Specifies the quality assurance checks whose failures are reported as + errors by the OpenEmbedded build system. You set this variable in + your distribution configuration file. For a list of the checks you + can control with this variable, see the + ":ref:`insane.bbclass `" section. + + EXCLUDE_FROM_SHLIBS + Triggers the OpenEmbedded build system's shared libraries resolver to + exclude an entire package when scanning for shared libraries. + + .. note:: + + The shared libraries resolver's functionality results in part from + the internal function + package_do_shlibs + , which is part of the + do_package + task. You should be aware that the shared libraries resolver might + implicitly define some dependencies between packages. + + The ``EXCLUDE_FROM_SHLIBS`` variable is similar to the + :term:`PRIVATE_LIBS` variable, which excludes a + package's particular libraries only and not the whole package. + + Use the ``EXCLUDE_FROM_SHLIBS`` variable by setting it to "1" for a + particular package: + :: + + EXCLUDE_FROM_SHLIBS = "1" + + EXCLUDE_FROM_WORLD + Directs BitBake to exclude a recipe from world builds (i.e. + ``bitbake world``). During world builds, BitBake locates, parses and + builds all recipes found in every layer exposed in the + ``bblayers.conf`` configuration file. + + To exclude a recipe from a world build using this variable, set the + variable to "1" in the recipe. + + .. note:: + + Recipes added to + EXCLUDE_FROM_WORLD + may still be built during a world build in order to satisfy + dependencies of other recipes. Adding a recipe to + EXCLUDE_FROM_WORLD + only ensures that the recipe is not explicitly added to the list + of build targets in a world build. + + EXTENDPE + Used with file and pathnames to create a prefix for a recipe's + version based on the recipe's :term:`PE` value. If ``PE`` + is set and greater than zero for a recipe, ``EXTENDPE`` becomes that + value (e.g if ``PE`` is equal to "1" then ``EXTENDPE`` becomes "1"). + If a recipe's ``PE`` is not set (the default) or is equal to zero, + ``EXTENDPE`` becomes "". + + See the :term:`STAMP` variable for an example. + + EXTENDPKGV + The full package version specification as it appears on the final + packages produced by a recipe. The variable's value is normally used + to fix a runtime dependency to the exact same version of another + package in the same recipe: + :: + + RDEPENDS_${PN}-additional-module = "${PN} (= ${EXTENDPKGV})" + + The dependency relationships are intended to force the package + manager to upgrade these types of packages in lock-step. + + EXTERNAL_KERNEL_TOOLS + When set, the ``EXTERNAL_KERNEL_TOOLS`` variable indicates that these + tools are not in the source tree. + + When kernel tools are available in the tree, they are preferred over + any externally installed tools. Setting the ``EXTERNAL_KERNEL_TOOLS`` + variable tells the OpenEmbedded build system to prefer the installed + external tools. See the + :ref:`kernel-yocto ` class in + ``meta/classes`` to see how the variable is used. + + EXTERNALSRC + When inheriting the :ref:`externalsrc ` + class, this variable points to the source tree, which is outside of + the OpenEmbedded build system. When set, this variable sets the + :term:`S` variable, which is what the OpenEmbedded build + system uses to locate unpacked recipe source code. + + For more information on ``externalsrc.bbclass``, see the + ":ref:`externalsrc.bbclass `" section. You + can also find information on how to use this variable in the + ":ref:`dev-manual/dev-manual-common-tasks:building software from an external source`" + section in the Yocto Project Development Tasks Manual. + + EXTERNALSRC_BUILD + When inheriting the :ref:`externalsrc ` + class, this variable points to the directory in which the recipe's + source code is built, which is outside of the OpenEmbedded build + system. When set, this variable sets the :term:`B` variable, + which is what the OpenEmbedded build system uses to locate the Build + Directory. + + For more information on ``externalsrc.bbclass``, see the + ":ref:`externalsrc.bbclass `" section. You + can also find information on how to use this variable in the + ":ref:`dev-manual/dev-manual-common-tasks:building software from an external source`" + section in the Yocto Project Development Tasks Manual. + + EXTRA_AUTORECONF + For recipes inheriting the :ref:`autotools ` + class, you can use ``EXTRA_AUTORECONF`` to specify extra options to + pass to the ``autoreconf`` command that is executed during the + :ref:`ref-tasks-configure` task. + + The default value is "--exclude=autopoint". + + EXTRA_IMAGE_FEATURES + A list of additional features to include in an image. When listing + more than one feature, separate them with a space. + + Typically, you configure this variable in your ``local.conf`` file, + which is found in the :term:`Build Directory`. + Although you can use this variable from within a recipe, best + practices dictate that you do not. + + .. note:: + + To enable primary features from within the image recipe, use the + IMAGE_FEATURES + variable. + + Here are some examples of features you can add: + + - "dbg-pkgs" - Adds -dbg packages for all installed packages including + symbol information for debugging and profiling. + + - "debug-tweaks" - Makes an image suitable for debugging. For example, allows root logins without passwords and + enables post-installation logging. See the 'allow-empty-password' and + 'post-install-logging' features in the "`Image + Features <#ref-features-image>`__" section for more information. + - "dev-pkgs" - Adds -dev packages for all installed packages. This is + useful if you want to develop against the libraries in the image. + - "read-only-rootfs" - Creates an image whose root filesystem is + read-only. See the + ":ref:`dev-manual/dev-manual-common-tasks:creating a read-only root filesystem`" + section in the Yocto Project Development Tasks Manual for more + information + - "tools-debug" - Adds debugging tools such as gdb and strace. + - "tools-sdk" - Adds development tools such as gcc, make, + pkgconfig and so forth. + - "tools-testapps" - Adds useful testing tools + such as ts_print, aplay, arecord and so forth. + + For a complete list of image features that ships with the Yocto + Project, see the "`Image Features <#ref-features-image>`__" section. + + For an example that shows how to customize your image by using this + variable, see the ":ref:`usingpoky-extend-customimage-imagefeatures`" + section in the Yocto Project Development Tasks Manual. + + EXTRA_IMAGECMD + Specifies additional options for the image creation command that has + been specified in :term:`IMAGE_CMD`. When setting + this variable, use an override for the associated image type. Here is + an example: + :: + + EXTRA_IMAGECMD_ext3 ?= "-i 4096" + + EXTRA_IMAGEDEPENDS + A list of recipes to build that do not provide packages for + installing into the root filesystem. + + Sometimes a recipe is required to build the final image but is not + needed in the root filesystem. You can use the ``EXTRA_IMAGEDEPENDS`` + variable to list these recipes and thus specify the dependencies. A + typical example is a required bootloader in a machine configuration. + + .. note:: + + To add packages to the root filesystem, see the various + \*RDEPENDS and \*RRECOMMENDS + variables. + + EXTRANATIVEPATH + A list of subdirectories of + ``${``\ :term:`STAGING_BINDIR_NATIVE`\ ``}`` + added to the beginning of the environment variable ``PATH``. As an + example, the following prepends + "${STAGING_BINDIR_NATIVE}/foo:${STAGING_BINDIR_NATIVE}/bar:" to + ``PATH``: + :: + + EXTRANATIVEPATH = "foo bar" + + EXTRA_OECMAKE + Additional `CMake `__ options. See the + :ref:`cmake ` class for additional information. + + EXTRA_OECONF + Additional ``configure`` script options. See + :term:`PACKAGECONFIG_CONFARGS` for + additional information on passing configure script options. + + EXTRA_OEMAKE + Additional GNU ``make`` options. + + Because the ``EXTRA_OEMAKE`` defaults to "", you need to set the + variable to specify any required GNU options. + + :term:`PARALLEL_MAKE` and + :term:`PARALLEL_MAKEINST` also make use of + ``EXTRA_OEMAKE`` to pass the required flags. + + EXTRA_OESCONS + When inheriting the :ref:`scons ` class, this + variable specifies additional configuration options you want to pass + to the ``scons`` command line. + + EXTRA_USERS_PARAMS + When inheriting the :ref:`extrausers ` + class, this variable provides image level user and group operations. + This is a more global method of providing user and group + configuration as compared to using the + :ref:`useradd ` class, which ties user and + group configurations to a specific recipe. + + The set list of commands you can configure using the + ``EXTRA_USERS_PARAMS`` is shown in the ``extrausers`` class. These + commands map to the normal Unix commands of the same names: + :: + + # EXTRA_USERS_PARAMS = "\ + # useradd -p '' tester; \ + # groupadd developers; \ + # userdel nobody; \ + # groupdel -g video; \ + # groupmod -g 1020 developers; \ + # usermod -s /bin/sh tester; \ + # " + + FEATURE_PACKAGES + Defines one or more packages to include in an image when a specific + item is included in :term:`IMAGE_FEATURES`. + When setting the value, ``FEATURE_PACKAGES`` should have the name of + the feature item as an override. Here is an example: + :: + + FEATURE_PACKAGES_widget = "package1 package2" + + In this example, if "widget" were added to ``IMAGE_FEATURES``, + package1 and package2 would be included in the image. + + .. note:: + + Packages installed by features defined through + FEATURE_PACKAGES + are often package groups. While similarly named, you should not + confuse the + FEATURE_PACKAGES + variable with package groups, which are discussed elsewhere in the + documentation. + + FEED_DEPLOYDIR_BASE_URI + Points to the base URL of the server and location within the + document-root that provides the metadata and packages required by + OPKG to support runtime package management of IPK packages. You set + this variable in your ``local.conf`` file. + + Consider the following example: + :: + + FEED_DEPLOYDIR_BASE_URI = "http://192.168.7.1/BOARD-dir" + + This example assumes you are serving + your packages over HTTP and your databases are located in a directory + named ``BOARD-dir``, which is underneath your HTTP server's + document-root. In this case, the OpenEmbedded build system generates + a set of configuration files for you in your target that work with + the feed. + + FILES + The list of files and directories that are placed in a package. The + :term:`PACKAGES` variable lists the packages + generated by a recipe. + + To use the ``FILES`` variable, provide a package name override that + identifies the resulting package. Then, provide a space-separated + list of files or paths that identify the files you want included as + part of the resulting package. Here is an example: + :: + + FILES_${PN} += "${bindir}/mydir1 ${bindir}/mydir2/myfile" + + .. note:: + + - When specifying files or paths, you can pattern match using + Python's + `glob `_ + syntax. For details on the syntax, see the documentation by + following the previous link. + + - When specifying paths as part of the ``FILES`` variable, it is + good practice to use appropriate path variables. For example, + use ``${sysconfdir}`` rather than ``/etc``, or ``${bindir}`` + rather than ``/usr/bin``. You can find a list of these + variables at the top of the ``meta/conf/bitbake.conf`` file in + the :term:`Source Directory`. You will also + find the default values of the various ``FILES_*`` variables in + this file. + + If some of the files you provide with the ``FILES`` variable are + editable and you know they should not be overwritten during the + package update process by the Package Management System (PMS), you + can identify these files so that the PMS will not overwrite them. See + the :term:`CONFFILES` variable for information on + how to identify these files to the PMS. + + FILES_SOLIBSDEV + Defines the file specification to match + :term:`SOLIBSDEV`. In other words, + ``FILES_SOLIBSDEV`` defines the full path name of the development + symbolic link (symlink) for shared libraries on the target platform. + + The following statement from the ``bitbake.conf`` shows how it is + set: + :: + + FILES_SOLIBSDEV ?= "${base_libdir}/lib*${SOLIBSDEV} ${libdir}/lib*${SOLIBSDEV}" + + FILESEXTRAPATHS + Extends the search path the OpenEmbedded build system uses when + looking for files and patches as it processes recipes and append + files. The default directories BitBake uses when it processes recipes + are initially defined by the :term:`FILESPATH` + variable. You can extend ``FILESPATH`` variable by using + ``FILESEXTRAPATHS``. + + Best practices dictate that you accomplish this by using + ``FILESEXTRAPATHS`` from within a ``.bbappend`` file and that you + prepend paths as follows: + :: + + FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:" + + In the above example, the build system first + looks for files in a directory that has the same name as the + corresponding append file. + + .. note:: + + When extending ``FILESEXTRAPATHS``, be sure to use the immediate + expansion (``:=``) operator. Immediate expansion makes sure that + BitBake evaluates :term:`THISDIR` at the time the + directive is encountered rather than at some later time when + expansion might result in a directory that does not contain the + files you need. + + Also, include the trailing separating colon character if you are + prepending. The trailing colon character is necessary because you + are directing BitBake to extend the path by prepending directories + to the search path. + + Here is another common use: + :: + + FILESEXTRAPATHS_prepend := "${THISDIR}/files:" + + In this example, the build system extends the + ``FILESPATH`` variable to include a directory named ``files`` that is + in the same directory as the corresponding append file. + + This next example specifically adds three paths: + :: + + FILESEXTRAPATHS_prepend := "path_1:path_2:path_3:" + + A final example shows how you can extend the search path and include + a :term:`MACHINE`-specific override, which is useful + in a BSP layer: + :: + + FILESEXTRAPATHS_prepend_intel-x86-common := "${THISDIR}/${PN}:" + + The previous statement appears in the + ``linux-yocto-dev.bbappend`` file, which is found in the + :ref:`overview-manual/overview-manual-development-environment:yocto project source repositories` in + ``meta-intel/common/recipes-kernel/linux``. Here, the machine + override is a special :term:`PACKAGE_ARCH` + definition for multiple ``meta-intel`` machines. + + .. note:: + + For a layer that supports a single BSP, the override could just be + the value of + MACHINE + . + + By prepending paths in ``.bbappend`` files, you allow multiple append + files that reside in different layers but are used for the same + recipe to correctly extend the path. + + FILESOVERRIDES + A subset of :term:`OVERRIDES` used by the + OpenEmbedded build system for creating + :term:`FILESPATH`. The ``FILESOVERRIDES`` variable + uses overrides to automatically extend the + :term:`FILESPATH` variable. For an example of how + that works, see the :term:`FILESPATH` variable + description. Additionally, you find more information on how overrides + are handled in the + ":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax (overrides)`" + section of the BitBake User Manual. + + By default, the ``FILESOVERRIDES`` variable is defined as: + :: + + FILESOVERRIDES = "${TRANSLATED_TARGET_ARCH}:${MACHINEOVERRIDES}:${DISTROOVERRIDES}" + + .. note:: + + Do not hand-edit the + FILESOVERRIDES + variable. The values match up with expected overrides and are used + in an expected manner by the build system. + + FILESPATH + The default set of directories the OpenEmbedded build system uses + when searching for patches and files. + + During the build process, BitBake searches each directory in + ``FILESPATH`` in the specified order when looking for files and + patches specified by each ``file://`` URI in a recipe's + :term:`SRC_URI` statements. + + The default value for the ``FILESPATH`` variable is defined in the + ``base.bbclass`` class found in ``meta/classes`` in the + :term:`Source Directory`: + :: + + FILESPATH = "${@base_set_filespath(["${FILE_DIRNAME}/${BP}", \ + "${FILE_DIRNAME}/${BPN}", "${FILE_DIRNAME}/files"], d)}" + + The + ``FILESPATH`` variable is automatically extended using the overrides + from the :term:`FILESOVERRIDES` variable. + + .. note:: + + - Do not hand-edit the ``FILESPATH`` variable. If you want the + build system to look in directories other than the defaults, + extend the ``FILESPATH`` variable by using the + :term:`FILESEXTRAPATHS` variable. + + - Be aware that the default ``FILESPATH`` directories do not map + to directories in custom layers where append files + (``.bbappend``) are used. If you want the build system to find + patches or files that reside with your append files, you need + to extend the ``FILESPATH`` variable by using the + ``FILESEXTRAPATHS`` variable. + + You can take advantage of this searching behavior in useful ways. For + example, consider a case where the following directory structure + exists for general and machine-specific configurations: + :: + + files/defconfig + files/MACHINEA/defconfig + files/MACHINEB/defconfig + + Also in the example, the ``SRC_URI`` statement contains + "file://defconfig". Given this scenario, you can set + :term:`MACHINE` to "MACHINEA" and cause the build + system to use files from ``files/MACHINEA``. Set ``MACHINE`` to + "MACHINEB" and the build system uses files from ``files/MACHINEB``. + Finally, for any machine other than "MACHINEA" and "MACHINEB", the + build system uses files from ``files/defconfig``. + + You can find out more about the patching process in the + ":ref:`patching-dev-environment`" section + in the Yocto Project Overview and Concepts Manual and the + ":ref:`new-recipe-patching-code`" section in + the Yocto Project Development Tasks Manual. See the + :ref:`ref-tasks-patch` task as well. + + FILESYSTEM_PERMS_TABLES + Allows you to define your own file permissions settings table as part + of your configuration for the packaging process. For example, suppose + you need a consistent set of custom permissions for a set of groups + and users across an entire work project. It is best to do this in the + packages themselves but this is not always possible. + + By default, the OpenEmbedded build system uses the ``fs-perms.txt``, + which is located in the ``meta/files`` folder in the :term:`Source Directory`. + If you create your own file + permissions setting table, you should place it in your layer or the + distro's layer. + + You define the ``FILESYSTEM_PERMS_TABLES`` variable in the + ``conf/local.conf`` file, which is found in the :term:`Build Directory`, + to point to your custom + ``fs-perms.txt``. You can specify more than a single file permissions + setting table. The paths you specify to these files must be defined + within the :term:`BBPATH` variable. + + For guidance on how to create your own file permissions settings + table file, examine the existing ``fs-perms.txt``. + + FIT_HASH_ALG + Specifies the hash algorithm used in creating the FIT Image. For e.g. sha256. + + FIT_SIGN_ALG + Specifies the signature algorithm used in creating the FIT Image. + For e.g. rsa2048. + + FONT_EXTRA_RDEPENDS + When inheriting the :ref:`fontcache ` class, + this variable specifies the runtime dependencies for font packages. + By default, the ``FONT_EXTRA_RDEPENDS`` is set to "fontconfig-utils". + + FONT_PACKAGES + When inheriting the :ref:`fontcache ` class, + this variable identifies packages containing font files that need to + be cached by Fontconfig. By default, the ``fontcache`` class assumes + that fonts are in the recipe's main package (i.e. + ``${``\ :term:`PN`\ ``}``). Use this variable if fonts you + need are in a package other than that main package. + + FORCE_RO_REMOVE + Forces the removal of the packages listed in ``ROOTFS_RO_UNNEEDED`` + during the generation of the root filesystem. + + Set the variable to "1" to force the removal of these packages. + + FULL_OPTIMIZATION + The options to pass in ``TARGET_CFLAGS`` and ``CFLAGS`` when + compiling an optimized system. This variable defaults to "-O2 -pipe + ${DEBUG_FLAGS}". + + GCCPIE + Enables Position Independent Executables (PIE) within the GNU C + Compiler (GCC). Enabling PIE in the GCC makes Return Oriented + Programming (ROP) attacks much more difficult to execute. + + By default the ``security_flags.inc`` file enables PIE by setting the + variable as follows: + :: + + GCCPIE ?= "--enable-default-pie" + + GCCVERSION + Specifies the default version of the GNU C Compiler (GCC) used for + compilation. By default, ``GCCVERSION`` is set to "8.x" in the + ``meta/conf/distro/include/tcmode-default.inc`` include file: + :: + + GCCVERSION ?= "8.%" + + You can override this value by setting it in a + configuration file such as the ``local.conf``. + + GDB + The minimal command and arguments to run the GNU Debugger. + + GITDIR + The directory in which a local copy of a Git repository is stored + when it is cloned. + + GLIBC_GENERATE_LOCALES + Specifies the list of GLIBC locales to generate should you not wish + to generate all LIBC locals, which can be time consuming. + + .. note:: + + If you specifically remove the locale + en_US.UTF-8 + , you must set + IMAGE_LINGUAS + appropriately. + + You can set ``GLIBC_GENERATE_LOCALES`` in your ``local.conf`` file. + By default, all locales are generated. + :: + + GLIBC_GENERATE_LOCALES = "en_GB.UTF-8 en_US.UTF-8" + + GROUPADD_PARAM + When inheriting the :ref:`useradd ` class, + this variable specifies for a package what parameters should be + passed to the ``groupadd`` command if you wish to add a group to the + system when the package is installed. + + Here is an example from the ``dbus`` recipe: + :: + + GROUPADD_PARAM_${PN} = "-r netdev" + + For information on the standard Linux shell command + ``groupadd``, see http://linux.die.net/man/8/groupadd. + + GROUPMEMS_PARAM + When inheriting the :ref:`useradd ` class, + this variable specifies for a package what parameters should be + passed to the ``groupmems`` command if you wish to modify the members + of a group when the package is installed. + + For information on the standard Linux shell command ``groupmems``, + see http://linux.die.net/man/8/groupmems. + + GRUB_GFXSERIAL + Configures the GNU GRand Unified Bootloader (GRUB) to have graphics + and serial in the boot menu. Set this variable to "1" in your + ``local.conf`` or distribution configuration file to enable graphics + and serial in the menu. + + See the :ref:`grub-efi ` class for more + information on how this variable is used. + + GRUB_OPTS + Additional options to add to the GNU GRand Unified Bootloader (GRUB) + configuration. Use a semi-colon character (``;``) to separate + multiple options. + + The ``GRUB_OPTS`` variable is optional. See the + :ref:`grub-efi ` class for more information + on how this variable is used. + + GRUB_TIMEOUT + Specifies the timeout before executing the default ``LABEL`` in the + GNU GRand Unified Bootloader (GRUB). + + The ``GRUB_TIMEOUT`` variable is optional. See the + :ref:`grub-efi ` class for more information + on how this variable is used. + + GTKIMMODULES_PACKAGES + When inheriting the + :ref:`gtk-immodules-cache ` class, + this variable specifies the packages that contain the GTK+ input + method modules being installed when the modules are in packages other + than the main package. + + HOMEPAGE + Website where more information about the software the recipe is + building can be found. + + HOST_ARCH + The name of the target architecture, which is normally the same as + :term:`TARGET_ARCH`. The OpenEmbedded build system + supports many architectures. Here is an example list of architectures + supported. This list is by no means complete as the architecture is + configurable: + + - arm + - i586 + - x86_64 + - powerpc + - powerpc64 + - mips + - mipsel + + HOST_CC_ARCH + Specifies architecture-specific compiler flags that are passed to the + C compiler. + + Default initialization for ``HOST_CC_ARCH`` varies depending on what + is being built: + + - :term:`TARGET_CC_ARCH` when building for the + target + + - ``BUILD_CC_ARCH`` when building for the build host (i.e. + ``-native``) + + - ``BUILDSDK_CC_ARCH`` when building for an SDK (i.e. + ``nativesdk-``) + + HOST_OS + Specifies the name of the target operating system, which is normally + the same as the :term:`TARGET_OS`. The variable can + be set to "linux" for ``glibc``-based systems and to "linux-musl" for + ``musl``. For ARM/EABI targets, there are also "linux-gnueabi" and + "linux-musleabi" values possible. + + HOST_PREFIX + Specifies the prefix for the cross-compile toolchain. ``HOST_PREFIX`` + is normally the same as :term:`TARGET_PREFIX`. + + HOST_SYS + Specifies the system, including the architecture and the operating + system, for which the build is occurring in the context of the + current recipe. + + The OpenEmbedded build system automatically sets this variable based + on :term:`HOST_ARCH`, + :term:`HOST_VENDOR`, and + :term:`HOST_OS` variables. + + .. note:: + + You do not need to set the variable yourself. + + Consider these two examples: + + - Given a native recipe on a 32-bit x86 machine running Linux, the + value is "i686-linux". + + - Given a recipe being built for a little-endian MIPS target running + Linux, the value might be "mipsel-linux". + + HOSTTOOLS + A space-separated list (filter) of tools on the build host that + should be allowed to be called from within build tasks. Using this + filter helps reduce the possibility of host contamination. If a tool + specified in the value of ``HOSTTOOLS`` is not found on the build + host, the OpenEmbedded build system produces an error and the build + is not started. + + For additional information, see + :term:`HOSTTOOLS_NONFATAL`. + + HOSTTOOLS_NONFATAL + A space-separated list (filter) of tools on the build host that + should be allowed to be called from within build tasks. Using this + filter helps reduce the possibility of host contamination. Unlike + :term:`HOSTTOOLS`, the OpenEmbedded build system + does not produce an error if a tool specified in the value of + ``HOSTTOOLS_NONFATAL`` is not found on the build host. Thus, you can + use ``HOSTTOOLS_NONFATAL`` to filter optional host tools. + + HOST_VENDOR + Specifies the name of the vendor. ``HOST_VENDOR`` is normally the + same as :term:`TARGET_VENDOR`. + + ICECC_DISABLED + Disables or enables the ``icecc`` (Icecream) function. For more + information on this function and best practices for using this + variable, see the ":ref:`icecc.bbclass `" + section. + + Setting this variable to "1" in your ``local.conf`` disables the + function: + :: + + ICECC_DISABLED ??= "1" + + To enable the function, set the variable as follows: + :: + + ICECC_DISABLED = "" + + ICECC_ENV_EXEC + Points to the ``icecc-create-env`` script that you provide. This + variable is used by the :ref:`icecc ` class. You + set this variable in your ``local.conf`` file. + + If you do not point to a script that you provide, the OpenEmbedded + build system uses the default script provided by the + ``icecc-create-env.bb`` recipe, which is a modified version and not + the one that comes with ``icecc``. + + ICECC_PARALLEL_MAKE + Extra options passed to the ``make`` command during the + :ref:`ref-tasks-compile` task that specify parallel + compilation. This variable usually takes the form of "-j x", where x + represents the maximum number of parallel threads ``make`` can run. + + .. note:: + + The options passed affect builds on all enabled machines on the + network, which are machines running the + iceccd + daemon. + + If your enabled machines support multiple cores, coming up with the + maximum number of parallel threads that gives you the best + performance could take some experimentation since machine speed, + network lag, available memory, and existing machine loads can all + affect build time. Consequently, unlike the + :term:`PARALLEL_MAKE` variable, there is no + rule-of-thumb for setting ``ICECC_PARALLEL_MAKE`` to achieve optimal + performance. + + If you do not set ``ICECC_PARALLEL_MAKE``, the build system does not + use it (i.e. the system does not detect and assign the number of + cores as is done with ``PARALLEL_MAKE``). + + ICECC_PATH + The location of the ``icecc`` binary. You can set this variable in + your ``local.conf`` file. If your ``local.conf`` file does not define + this variable, the :ref:`icecc ` class attempts + to define it by locating ``icecc`` using ``which``. + + ICECC_USER_CLASS_BL + Identifies user classes that you do not want the Icecream distributed + compile support to consider. This variable is used by the + :ref:`icecc ` class. You set this variable in + your ``local.conf`` file. + + When you list classes using this variable, you are "blacklisting" + them from distributed compilation across remote hosts. Any classes + you list will be distributed and compiled locally. + + ICECC_USER_PACKAGE_BL + Identifies user recipes that you do not want the Icecream distributed + compile support to consider. This variable is used by the + :ref:`icecc ` class. You set this variable in + your ``local.conf`` file. + + When you list packages using this variable, you are "blacklisting" + them from distributed compilation across remote hosts. Any packages + you list will be distributed and compiled locally. + + ICECC_USER_PACKAGE_WL + Identifies user recipes that use an empty + :term:`PARALLEL_MAKE` variable that you want to + force remote distributed compilation on using the Icecream + distributed compile support. This variable is used by the + :ref:`icecc ` class. You set this variable in + your ``local.conf`` file. + + IMAGE_BASENAME + The base name of image output files. This variable defaults to the + recipe name (``${``\ :term:`PN`\ ``}``). + + IMAGE_BOOT_FILES + A space-separated list of files installed into the boot partition + when preparing an image using the Wic tool with the + ``bootimg-partition`` or ``bootimg-efi`` source plugin. By default, + the files are + installed under the same name as the source files. To change the + installed name, separate it from the original name with a semi-colon + (;). Source files need to be located in + :term:`DEPLOY_DIR_IMAGE`. Here are two + examples: + :: + + IMAGE_BOOT_FILES = "u-boot.img uImage;kernel" + IMAGE_BOOT_FILES = "u-boot.${UBOOT_SUFFIX} ${KERNEL_IMAGETYPE}" + + Alternatively, source files can be picked up using a glob pattern. In + this case, the destination file must have the same name as the base + name of the source file path. To install files into a directory + within the target location, pass its name after a semi-colon (;). + Here are two examples: + :: + + IMAGE_BOOT_FILES = "bcm2835-bootfiles/*" + IMAGE_BOOT_FILES = "bcm2835-bootfiles/*;boot/" + + The first example + installs all files from ``${DEPLOY_DIR_IMAGE}/bcm2835-bootfiles`` + into the root of the target partition. The second example installs + the same files into a ``boot`` directory within the target partition. + + You can find information on how to use the Wic tool in the + ":ref:`dev-manual/dev-manual-common-tasks:creating partitioned images using wic`" + section of the Yocto Project Development Tasks Manual. Reference + material for Wic is located in the + ":doc:`../ref-manual/ref-kickstart`" chapter. + + IMAGE_CLASSES + A list of classes that all images should inherit. You typically use + this variable to specify the list of classes that register the + different types of images the OpenEmbedded build system creates. + + The default value for ``IMAGE_CLASSES`` is ``image_types``. You can + set this variable in your ``local.conf`` or in a distribution + configuration file. + + For more information, see ``meta/classes/image_types.bbclass`` in the + :term:`Source Directory`. + + IMAGE_CMD + Specifies the command to create the image file for a specific image + type, which corresponds to the value set set in + :term:`IMAGE_FSTYPES`, (e.g. ``ext3``, + ``btrfs``, and so forth). When setting this variable, you should use + an override for the associated type. Here is an example: + :: + + IMAGE_CMD_jffs2 = "mkfs.jffs2 --root=${IMAGE_ROOTFS} \ + --faketime --output=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.jffs2 \ + ${EXTRA_IMAGECMD}" + + You typically do not need to set this variable unless you are adding + support for a new image type. For more examples on how to set this + variable, see the :ref:`image_types ` + class file, which is ``meta/classes/image_types.bbclass``. + + IMAGE_DEVICE_TABLES + Specifies one or more files that contain custom device tables that + are passed to the ``makedevs`` command as part of creating an image. + These files list basic device nodes that should be created under + ``/dev`` within the image. If ``IMAGE_DEVICE_TABLES`` is not set, + ``files/device_table-minimal.txt`` is used, which is located by + :term:`BBPATH`. For details on how you should write + device table files, see ``meta/files/device_table-minimal.txt`` as an + example. + + IMAGE_FEATURES + The primary list of features to include in an image. Typically, you + configure this variable in an image recipe. Although you can use this + variable from your ``local.conf`` file, which is found in the + :term:`Build Directory`, best practices dictate that you do + not. + + .. note:: + + To enable extra features from outside the image recipe, use the + EXTRA_IMAGE_FEATURES + variable. + + For a list of image features that ships with the Yocto Project, see + the "`Image Features <#ref-features-image>`__" section. + + For an example that shows how to customize your image by using this + variable, see the ":ref:`usingpoky-extend-customimage-imagefeatures`" + section in the Yocto Project Development Tasks Manual. + + IMAGE_FSTYPES + Specifies the formats the OpenEmbedded build system uses during the + build when creating the root filesystem. For example, setting + ``IMAGE_FSTYPES`` as follows causes the build system to create root + filesystems using two formats: ``.ext3`` and ``.tar.bz2``: + :: + + IMAGE_FSTYPES = "ext3 tar.bz2" + + For the complete list of supported image formats from which you can + choose, see :term:`IMAGE_TYPES`. + + .. note:: + + - If an image recipe uses the "inherit image" line and you are + setting ``IMAGE_FSTYPES`` inside the recipe, you must set + ``IMAGE_FSTYPES`` prior to using the "inherit image" line. + + - Due to the way the OpenEmbedded build system processes this + variable, you cannot update its contents by using ``_append`` + or ``_prepend``. You must use the ``+=`` operator to add one or + more options to the ``IMAGE_FSTYPES`` variable. + + IMAGE_INSTALL + Used by recipes to specify the packages to install into an image + through the :ref:`image ` class. Use the + ``IMAGE_INSTALL`` variable with care to avoid ordering issues. + + Image recipes set ``IMAGE_INSTALL`` to specify the packages to + install into an image through ``image.bbclass``. Additionally, + "helper" classes such as the + :ref:`core-image ` class exist that can + take lists used with ``IMAGE_FEATURES`` and turn them into + auto-generated entries in ``IMAGE_INSTALL`` in addition to its + default contents. + + When you use this variable, it is best to use it as follows: + :: + + IMAGE_INSTALL_append = " package-name" + + Be sure to include the space + between the quotation character and the start of the package name or + names. + + .. note:: + + - When working with a + ```core-image-minimal-initramfs`` <#images-core-image-minimal-initramfs>`__ + image, do not use the ``IMAGE_INSTALL`` variable to specify + packages for installation. Instead, use the + :term:`PACKAGE_INSTALL` variable, which + allows the initial RAM filesystem (initramfs) recipe to use a + fixed set of packages and not be affected by ``IMAGE_INSTALL``. + For information on creating an initramfs, see the + ":ref:`building-an-initramfs-image`" + section in the Yocto Project Development Tasks Manual. + + - Using ``IMAGE_INSTALL`` with the + :ref:`+= ` + BitBake operator within the ``/conf/local.conf`` file or from + within an image recipe is not recommended. Use of this operator + in these ways can cause ordering issues. Since + ``core-image.bbclass`` sets ``IMAGE_INSTALL`` to a default + value using the + :ref:`?= ` + operator, using a ``+=`` operation against ``IMAGE_INSTALL`` + results in unexpected behavior when used within + ``conf/local.conf``. Furthermore, the same operation from + within an image recipe may or may not succeed depending on the + specific situation. In both these cases, the behavior is + contrary to how most users expect the ``+=`` operator to work. + + IMAGE_LINGUAS + Specifies the list of locales to install into the image during the + root filesystem construction process. The OpenEmbedded build system + automatically splits locale files, which are used for localization, + into separate packages. Setting the ``IMAGE_LINGUAS`` variable + ensures that any locale packages that correspond to packages already + selected for installation into the image are also installed. Here is + an example: + :: + + IMAGE_LINGUAS = "pt-br de-de" + + In this example, the build system ensures any Brazilian Portuguese + and German locale files that correspond to packages in the image are + installed (i.e. ``*-locale-pt-br`` and ``*-locale-de-de`` as well as + ``*-locale-pt`` and ``*-locale-de``, since some software packages + only provide locale files by language and not by country-specific + language). + + See the :term:`GLIBC_GENERATE_LOCALES` + variable for information on generating GLIBC locales. + + IMAGE_MANIFEST + The manifest file for the image. This file lists all the installed + packages that make up the image. The file contains package + information on a line-per-package basis as follows: + :: + + packagename packagearch version + + The :ref:`image ` class defines the manifest + file as follows: + :: + + IMAGE_MANIFEST ="${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.rootfs.manifest" + + The location is + derived using the :term:`DEPLOY_DIR_IMAGE` + and :term:`IMAGE_NAME` variables. You can find + information on how the image is created in the ":ref:`image-generation-dev-environment`" + section in the Yocto Project Overview and Concepts Manual. + + IMAGE_NAME + The name of the output image files minus the extension. This variable + is derived using the :term:`IMAGE_BASENAME`, + :term:`MACHINE`, and :term:`DATETIME` + variables: + :: + + IMAGE_NAME = "${IMAGE_BASENAME}-${MACHINE}-${DATETIME}" + + IMAGE_OVERHEAD_FACTOR + Defines a multiplier that the build system applies to the initial + image size for cases when the multiplier times the returned disk + usage value for the image is greater than the sum of + ``IMAGE_ROOTFS_SIZE`` and ``IMAGE_ROOTFS_EXTRA_SPACE``. The result of + the multiplier applied to the initial image size creates free disk + space in the image as overhead. By default, the build process uses a + multiplier of 1.3 for this variable. This default value results in + 30% free disk space added to the image when this method is used to + determine the final generated image size. You should be aware that + post install scripts and the package management system uses disk + space inside this overhead area. Consequently, the multiplier does + not produce an image with all the theoretical free disk space. See + ``IMAGE_ROOTFS_SIZE`` for information on how the build system + determines the overall image size. + + The default 30% free disk space typically gives the image enough room + to boot and allows for basic post installs while still leaving a + small amount of free disk space. If 30% free space is inadequate, you + can increase the default value. For example, the following setting + gives you 50% free space added to the image: + :: + + IMAGE_OVERHEAD_FACTOR = "1.5" + + Alternatively, you can ensure a specific amount of free disk space is + added to the image by using the ``IMAGE_ROOTFS_EXTRA_SPACE`` + variable. + + IMAGE_PKGTYPE + Defines the package type (i.e. DEB, RPM, IPK, or TAR) used by the + OpenEmbedded build system. The variable is defined appropriately by + the :ref:`package_deb `, + :ref:`package_rpm `, + :ref:`package_ipk `, or + :ref:`package_tar ` class. + + .. note:: + + The + package_tar + class is broken and is not supported. It is recommended that you + do not use it. + + The :ref:`populate_sdk_* ` and + :ref:`image ` classes use the ``IMAGE_PKGTYPE`` + for packaging up images and SDKs. + + You should not set the ``IMAGE_PKGTYPE`` manually. Rather, the + variable is set indirectly through the appropriate + :ref:`package_* ` class using the + :term:`PACKAGE_CLASSES` variable. The + OpenEmbedded build system uses the first package type (e.g. DEB, RPM, + or IPK) that appears with the variable + + .. note:: + + Files using the + .tar + format are never used as a substitute packaging format for DEB, + RPM, and IPK formatted files for your image or SDK. + + IMAGE_POSTPROCESS_COMMAND + Specifies a list of functions to call once the OpenEmbedded build + system creates the final image output files. You can specify + functions separated by semicolons: + :: + + IMAGE_POSTPROCESS_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within the + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + IMAGE_PREPROCESS_COMMAND + Specifies a list of functions to call before the OpenEmbedded build + system creates the final image output files. You can specify + functions separated by semicolons: + :: + + IMAGE_PREPROCESS_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within the + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + IMAGE_ROOTFS + The location of the root filesystem while it is under construction + (i.e. during the :ref:`ref-tasks-rootfs` task). This + variable is not configurable. Do not change it. + + IMAGE_ROOTFS_ALIGNMENT + Specifies the alignment for the output image file in Kbytes. If the + size of the image is not a multiple of this value, then the size is + rounded up to the nearest multiple of the value. The default value is + "1". See :term:`IMAGE_ROOTFS_SIZE` for + additional information. + + IMAGE_ROOTFS_EXTRA_SPACE + Defines additional free disk space created in the image in Kbytes. By + default, this variable is set to "0". This free disk space is added + to the image after the build system determines the image size as + described in ``IMAGE_ROOTFS_SIZE``. + + This variable is particularly useful when you want to ensure that a + specific amount of free disk space is available on a device after an + image is installed and running. For example, to be sure 5 Gbytes of + free disk space is available, set the variable as follows: + :: + + IMAGE_ROOTFS_EXTRA_SPACE = "5242880" + + For example, the Yocto Project Build Appliance specifically requests + 40 Gbytes of extra space with the line: + :: + + IMAGE_ROOTFS_EXTRA_SPACE = "41943040" + + IMAGE_ROOTFS_SIZE + Defines the size in Kbytes for the generated image. The OpenEmbedded + build system determines the final size for the generated image using + an algorithm that takes into account the initial disk space used for + the generated image, a requested size for the image, and requested + additional free disk space to be added to the image. Programatically, + the build system determines the final size of the generated image as + follows: + :: + + if (image-du * overhead) < rootfs-size: + internal-rootfs-size = rootfs-size + xspace + else: + internal-rootfs-size = (image-du * overhead) + xspace + where: + image-du = Returned value of the du command on the image. + overhead = IMAGE_OVERHEAD_FACTOR + rootfs-size = IMAGE_ROOTFS_SIZE + internal-rootfs-size = Initial root filesystem size before any modifications. + xspace = IMAGE_ROOTFS_EXTRA_SPACE + + See the :term:`IMAGE_OVERHEAD_FACTOR` + and :term:`IMAGE_ROOTFS_EXTRA_SPACE` + variables for related information. + + IMAGE_TYPEDEP + Specifies a dependency from one image type on another. Here is an + example from the :ref:`image-live ` class: + :: + + IMAGE_TYPEDEP_live = "ext3" + + In the previous example, the variable ensures that when "live" is + listed with the :term:`IMAGE_FSTYPES` variable, + the OpenEmbedded build system produces an ``ext3`` image first since + one of the components of the live image is an ``ext3`` formatted + partition containing the root filesystem. + + IMAGE_TYPES + Specifies the complete list of supported image types by default: + + - btrfs + - container + - cpio + - cpio.gz + - cpio.lz4 + - cpio.lzma + - cpio.xz + - cramfs + - ext2 + - ext2.bz2 + - ext2.gz + - ext2.lzma + - ext3 + - ext3.gz + - ext4 + - ext4.gz + - f2fs + - hddimg + - iso + - jffs2 + - jffs2.sum + - multiubi + - squashfs + - squashfs-lz4 + - squashfs-lzo + - squashfs-xz + - tar + - tar.bz2 + - tar.gz + - tar.lz4 + - tar.xz + - tar.zst + - ubi + - ubifs + - wic + - wic.bz2 + - wic.gz + - wic.lzma + + For more information about these types of images, see + ``meta/classes/image_types*.bbclass`` in the :term:`Source Directory`. + + INC_PR + Helps define the recipe revision for recipes that share a common + ``include`` file. You can think of this variable as part of the + recipe revision as set from within an include file. + + Suppose, for example, you have a set of recipes that are used across + several projects. And, within each of those recipes the revision (its + :term:`PR` value) is set accordingly. In this case, when + the revision of those recipes changes, the burden is on you to find + all those recipes and be sure that they get changed to reflect the + updated version of the recipe. In this scenario, it can get + complicated when recipes that are used in many places and provide + common functionality are upgraded to a new revision. + + A more efficient way of dealing with this situation is to set the + ``INC_PR`` variable inside the ``include`` files that the recipes + share and then expand the ``INC_PR`` variable within the recipes to + help define the recipe revision. + + The following provides an example that shows how to use the + ``INC_PR`` variable given a common ``include`` file that defines the + variable. Once the variable is defined in the ``include`` file, you + can use the variable to set the ``PR`` values in each recipe. You + will notice that when you set a recipe's ``PR`` you can provide more + granular revisioning by appending values to the ``INC_PR`` variable: + :: + + recipes-graphics/xorg-font/xorg-font-common.inc:INC_PR = "r2" + recipes-graphics/xorg-font/encodings_1.0.4.bb:PR = "${INC_PR}.1" + recipes-graphics/xorg-font/font-util_1.3.0.bb:PR = "${INC_PR}.0" + recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3" + + The + first line of the example establishes the baseline revision to be + used for all recipes that use the ``include`` file. The remaining + lines in the example are from individual recipes and show how the + ``PR`` value is set. + + INCOMPATIBLE_LICENSE + Specifies a space-separated list of license names (as they would + appear in :term:`LICENSE`) that should be excluded + from the build. Recipes that provide no alternatives to listed + incompatible licenses are not built. Packages that are individually + licensed with the specified incompatible licenses will be deleted. + + .. note:: + + This functionality is only regularly tested using the following + setting: + :: + + INCOMPATIBLE_LICENSE = "GPL-3.0 LGPL-3.0 AGPL-3.0" + + + Although you can use other settings, you might be required to + remove dependencies on or provide alternatives to components that + are required to produce a functional system image. + + .. note:: + + It is possible to define a list of licenses that are allowed to be + used instead of the licenses that are excluded. To do this, define + a variable + COMPATIBLE_LICENSES + with the names of the licences that are allowed. Then define + INCOMPATIBLE_LICENSE + as: + :: + + INCOMPATIBLE_LICENSE = "${@' '.join(sorted(set(d.getVar('AVAILABLE_LICENSES').split()) - set(d.getVar('COMPATIBLE_LICENSES').split())))}" + + + This will result in + INCOMPATIBLE_LICENSE + containing the names of all licences from + AVAILABLE_LICENSES + except the ones specified in + COMPATIBLE_LICENSES + , thus only allowing the latter licences to be used. + + INHERIT + Causes the named class or classes to be inherited globally. Anonymous + functions in the class or classes are not executed for the base + configuration and in each individual recipe. The OpenEmbedded build + system ignores changes to ``INHERIT`` in individual recipes. + + For more information on ``INHERIT``, see the + :ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:\`\`inherit\`\` configuration directive`" + section in the Bitbake User Manual. + + INHERIT_DISTRO + Lists classes that will be inherited at the distribution level. It is + unlikely that you want to edit this variable. + + The default value of the variable is set as follows in the + ``meta/conf/distro/defaultsetup.conf`` file: + :: + + INHERIT_DISTRO ?= "debian devshell sstate license" + + INHIBIT_DEFAULT_DEPS + Prevents the default dependencies, namely the C compiler and standard + C library (libc), from being added to :term:`DEPENDS`. + This variable is usually used within recipes that do not require any + compilation using the C compiler. + + Set the variable to "1" to prevent the default dependencies from + being added. + + INHIBIT_PACKAGE_DEBUG_SPLIT + Prevents the OpenEmbedded build system from splitting out debug + information during packaging. By default, the build system splits out + debugging information during the + :ref:`ref-tasks-package` task. For more information on + how debug information is split out, see the + :term:`PACKAGE_DEBUG_SPLIT_STYLE` + variable. + + To prevent the build system from splitting out debug information + during packaging, set the ``INHIBIT_PACKAGE_DEBUG_SPLIT`` variable as + follows: + :: + + INHIBIT_PACKAGE_DEBUG_SPLIT = "1" + + INHIBIT_PACKAGE_STRIP + If set to "1", causes the build to not strip binaries in resulting + packages and prevents the ``-dbg`` package from containing the source + files. + + By default, the OpenEmbedded build system strips binaries and puts + the debugging symbols into ``${``\ :term:`PN`\ ``}-dbg``. + Consequently, you should not set ``INHIBIT_PACKAGE_STRIP`` when you + plan to debug in general. + + INHIBIT_SYSROOT_STRIP + If set to "1", causes the build to not strip binaries in the + resulting sysroot. + + By default, the OpenEmbedded build system strips binaries in the + resulting sysroot. When you specifically set the + ``INHIBIT_SYSROOT_STRIP`` variable to "1" in your recipe, you inhibit + this stripping. + + If you want to use this variable, include the + :ref:`staging ` class. This class uses a + ``sys_strip()`` function to test for the variable and acts + accordingly. + + .. note:: + + Use of the + INHIBIT_SYSROOT_STRIP + variable occurs in rare and special circumstances. For example, + suppose you are building bare-metal firmware by using an external + GCC toolchain. Furthermore, even if the toolchain's binaries are + strippable, other files exist that are needed for the build that + are not strippable. + + INITRAMFS_FSTYPES + Defines the format for the output image of an initial RAM filesystem + (initramfs), which is used during boot. Supported formats are the + same as those supported by the + :term:`IMAGE_FSTYPES` variable. + + The default value of this variable, which is set in the + ``meta/conf/bitbake.conf`` configuration file in the + :term:`Source Directory`, is "cpio.gz". The Linux kernel's + initramfs mechanism, as opposed to the initial RAM filesystem + `initrd `__ mechanism, expects + an optionally compressed cpio archive. + + INITRAMFS_IMAGE + Specifies the :term:`PROVIDES` name of an image + recipe that is used to build an initial RAM filesystem (initramfs) + image. In other words, the ``INITRAMFS_IMAGE`` variable causes an + additional recipe to be built as a dependency to whatever root + filesystem recipe you might be using (e.g. ``core-image-sato``). The + initramfs image recipe you provide should set + :term:`IMAGE_FSTYPES` to + :term:`INITRAMFS_FSTYPES`. + + An initramfs image provides a temporary root filesystem used for + early system initialization (e.g. loading of modules needed to locate + and mount the "real" root filesystem). + + .. note:: + + See the + meta/recipes-core/images/core-image-minimal-initramfs.bb + recipe in the + Source Directory + for an example initramfs recipe. To select this sample recipe as + the one built to provide the initramfs image, set + INITRAMFS_IMAGE + to "core-image-minimal-initramfs". + + You can also find more information by referencing the + ``meta-poky/conf/local.conf.sample.extended`` configuration file in + the Source Directory, the :ref:`image ` class, + and the :ref:`kernel ` class to see how to use + the ``INITRAMFS_IMAGE`` variable. + + If ``INITRAMFS_IMAGE`` is empty, which is the default, then no + initramfs image is built. + + For more information, you can also see the + :term:`INITRAMFS_IMAGE_BUNDLE` + variable, which allows the generated image to be bundled inside the + kernel image. Additionally, for information on creating an initramfs + image, see the ":ref:`building-an-initramfs-image`" section + in the Yocto Project Development Tasks Manual. + + INITRAMFS_IMAGE_BUNDLE + Controls whether or not the image recipe specified by + :term:`INITRAMFS_IMAGE` is run through an + extra pass + (:ref:`ref-tasks-bundle_initramfs`) during + kernel compilation in order to build a single binary that contains + both the kernel image and the initial RAM filesystem (initramfs) + image. This makes use of the + :term:`CONFIG_INITRAMFS_SOURCE` kernel + feature. + + .. note:: + + Using an extra compilation pass to bundle the initramfs avoids a + circular dependency between the kernel recipe and the initramfs + recipe should the initramfs include kernel modules. Should that be + the case, the initramfs recipe depends on the kernel for the + kernel modules, and the kernel depends on the initramfs recipe + since the initramfs is bundled inside the kernel image. + + The combined binary is deposited into the ``tmp/deploy`` directory, + which is part of the :term:`Build Directory`. + + Setting the variable to "1" in a configuration file causes the + OpenEmbedded build system to generate a kernel image with the + initramfs specified in ``INITRAMFS_IMAGE`` bundled within: + :: + + INITRAMFS_IMAGE_BUNDLE = "1" + + By default, the + :ref:`kernel ` class sets this variable to a + null string as follows: + :: + + INITRAMFS_IMAGE_BUNDLE ?= "" + + .. note:: + + You must set the + INITRAMFS_IMAGE_BUNDLE + variable in a configuration file. You cannot set the variable in a + recipe file. + + See the + :yocto_git:`local.conf.sample.extended ` + file for additional information. Also, for information on creating an + initramfs, see the ":ref:`building-an-initramfs-image`" section + in the Yocto Project Development Tasks Manual. + + INITRAMFS_LINK_NAME + The link name of the initial RAM filesystem image. This variable is + set in the ``meta/classes/kernel-artifact-names.bbclass`` file as + follows: + :: + + INITRAMFS_LINK_NAME ?= "initramfs-${KERNEL_ARTIFACT_LINK_NAME}" + + The value of the + ``KERNEL_ARTIFACT_LINK_NAME`` variable, which is set in the same + file, has the following value: + :: + + KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" + + See the :term:`MACHINE` variable for additional + information. + + INITRAMFS_NAME + The base name of the initial RAM filesystem image. This variable is + set in the ``meta/classes/kernel-artifact-names.bbclass`` file as + follows: + :: + + INITRAMFS_NAME ?= "initramfs-${KERNEL_ARTIFACT_NAME}" + + The value of the :term:`KERNEL_ARTIFACT_NAME` + variable, which is set in the same file, has the following value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + INITRD + Indicates list of filesystem images to concatenate and use as an + initial RAM disk (``initrd``). + + The ``INITRD`` variable is an optional variable used with the + :ref:`image-live ` class. + + INITRD_IMAGE + When building a "live" bootable image (i.e. when + :term:`IMAGE_FSTYPES` contains "live"), + ``INITRD_IMAGE`` specifies the image recipe that should be built to + provide the initial RAM disk image. The default value is + "core-image-minimal-initramfs". + + See the :ref:`image-live ` class for more + information. + + INITSCRIPT_NAME + The filename of the initialization script as installed to + ``${sysconfdir}/init.d``. + + This variable is used in recipes when using ``update-rc.d.bbclass``. + The variable is mandatory. + + INITSCRIPT_PACKAGES + A list of the packages that contain initscripts. If multiple packages + are specified, you need to append the package name to the other + ``INITSCRIPT_*`` as an override. + + This variable is used in recipes when using ``update-rc.d.bbclass``. + The variable is optional and defaults to the :term:`PN` + variable. + + INITSCRIPT_PARAMS + Specifies the options to pass to ``update-rc.d``. Here is an example: + :: + + INITSCRIPT_PARAMS = "start 99 5 2 . stop 20 0 1 6 ." + + In this example, the script has a runlevel of 99, starts the script + in initlevels 2 and 5, and stops the script in levels 0, 1 and 6. + + The variable's default value is "defaults", which is set in the + :ref:`update-rc.d ` class. + + The value in ``INITSCRIPT_PARAMS`` is passed through to the + ``update-rc.d`` command. For more information on valid parameters, + please see the ``update-rc.d`` manual page at + http://www.tin.org/bin/man.cgi?section=8&topic=update-rc.d. + + INSANE_SKIP + Specifies the QA checks to skip for a specific package within a + recipe. For example, to skip the check for symbolic link ``.so`` + files in the main package of a recipe, add the following to the + recipe. The package name override must be used, which in this example + is ``${PN}``: + :: + + INSANE_SKIP_${PN} += "dev-so" + + See the ":ref:`insane.bbclass `" section for a + list of the valid QA checks you can specify using this variable. + + INSTALL_TIMEZONE_FILE + By default, the ``tzdata`` recipe packages an ``/etc/timezone`` file. + Set the ``INSTALL_TIMEZONE_FILE`` variable to "0" at the + configuration level to disable this behavior. + + IPK_FEED_URIS + When the IPK backend is in use and package management is enabled on + the target, you can use this variable to set up ``opkg`` in the + target image to point to package feeds on a nominated server. Once + the feed is established, you can perform installations or upgrades + using the package manager at runtime. + + KARCH + Defines the kernel architecture used when assembling the + configuration. Architectures supported for this release are: + + - powerpc + - i386 + - x86_64 + - arm + - qemu + - mips + + You define the ``KARCH`` variable in the :ref:`kernel-dev/kernel-dev-advanced:bsp descriptions`. + + KBRANCH + A regular expression used by the build process to explicitly identify + the kernel branch that is validated, patched, and configured during a + build. You must set this variable to ensure the exact kernel branch + you want is being used by the build process. + + Values for this variable are set in the kernel's recipe file and the + kernel's append file. For example, if you are using the + ``linux-yocto_4.12`` kernel, the kernel recipe file is the + ``meta/recipes-kernel/linux/linux-yocto_4.12.bb`` file. ``KBRANCH`` + is set as follows in that kernel recipe file: + :: + + KBRANCH ?= "standard/base" + + This variable is also used from the kernel's append file to identify + the kernel branch specific to a particular machine or target + hardware. Continuing with the previous kernel example, the kernel's + append file (i.e. ``linux-yocto_4.12.bbappend``) is located in the + BSP layer for a given machine. For example, the append file for the + Beaglebone, EdgeRouter, and generic versions of both 32 and 64-bit IA + machines (``meta-yocto-bsp``) is named + ``meta-yocto-bsp/recipes-kernel/linux/linux-yocto_4.12.bbappend``. + Here are the related statements from that append file: + :: + + KBRANCH_genericx86 = "standard/base" + KBRANCH_genericx86-64 = "standard/base" + KBRANCH_edgerouter = "standard/edgerouter" + KBRANCH_beaglebone = "standard/beaglebone" + + The ``KBRANCH`` statements + identify the kernel branch to use when building for each supported + BSP. + + KBUILD_DEFCONFIG + When used with the :ref:`kernel-yocto ` + class, specifies an "in-tree" kernel configuration file for use + during a kernel build. + + Typically, when using a ``defconfig`` to configure a kernel during a + build, you place the file in your layer in the same manner as you + would place patch files and configuration fragment files (i.e. + "out-of-tree"). However, if you want to use a ``defconfig`` file that + is part of the kernel tree (i.e. "in-tree"), you can use the + ``KBUILD_DEFCONFIG`` variable and append the + :term:`KMACHINE` variable to point to the + ``defconfig`` file. + + To use the variable, set it in the append file for your kernel recipe + using the following form: + :: + + KBUILD_DEFCONFIG_KMACHINE ?= defconfig_file + + Here is an example from a "raspberrypi2" ``KMACHINE`` build that uses + a ``defconfig`` file named "bcm2709_defconfig": + :: + + KBUILD_DEFCONFIG_raspberrypi2 = "bcm2709_defconfig" + + As an alternative, you can use the following within your append file: + :: + + KBUILD_DEFCONFIG_pn-linux-yocto ?= defconfig_file + + For more + information on how to use the ``KBUILD_DEFCONFIG`` variable, see the + ":ref:`kernel-dev/kernel-dev-common:using an "in-tree" \`\`defconfig\`\` file`" + section in the Yocto Project Linux Kernel Development Manual. + + KERNEL_ALT_IMAGETYPE + Specifies an alternate kernel image type for creation in addition to + the kernel image type specified using the + :term:`KERNEL_IMAGETYPE` variable. + + KERNEL_ARTIFACT_NAME + Specifies the name of all of the build artifacts. You can change the + name of the artifacts by changing the ``KERNEL_ARTIFACT_NAME`` + variable. + + The value of ``KERNEL_ARTIFACT_NAME``, which is set in the + ``meta/classes/kernel-artifact-names.bbclass`` file, has the + following default value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + See the :term:`PKGE`, :term:`PKGV`, :term:`PKGR`, and :term:`MACHINE` + variables for additional information. + + .. note:: + + The IMAGE_VERSION_SUFFIX variable is set to DATETIME. + + KERNEL_CLASSES + A list of classes defining kernel image types that the + :ref:`kernel ` class should inherit. You + typically append this variable to enable extended image types. An + example is the "kernel-fitimage", which enables fitImage support and + resides in ``meta/classes/kernel-fitimage.bbclass``. You can register + custom kernel image types with the ``kernel`` class using this + variable. + + KERNEL_DEVICETREE + Specifies the name of the generated Linux kernel device tree (i.e. + the ``.dtb``) file. + + .. note:: + + Legacy support exists for specifying the full path to the device + tree. However, providing just the .dtb file is preferred. + + In order to use this variable, the + :ref:`kernel-devicetree ` class must + be inherited. + + KERNEL_DTB_LINK_NAME + The link name of the kernel device tree binary (DTB). This variable + is set in the ``meta/classes/kernel-artifact-names.bbclass`` file as + follows: + :: + + KERNEL_DTB_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" + + The + value of the ``KERNEL_ARTIFACT_LINK_NAME`` variable, which is set in + the same file, has the following value: + :: + + KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" + + See the :term:`MACHINE` variable for additional + information. + + KERNEL_DTB_NAME + The base name of the kernel device tree binary (DTB). This variable + is set in the ``meta/classes/kernel-artifact-names.bbclass`` file as + follows: + :: + + KERNEL_DTB_NAME ?= "${KERNEL_ARTIFACT_NAME}" + + The value of the :term:`KERNEL_ARTIFACT_NAME` + variable, which is set in the same file, has the following value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + KERNEL_EXTRA_ARGS + Specifies additional ``make`` command-line arguments the OpenEmbedded + build system passes on when compiling the kernel. + + KERNEL_FEATURES + Includes additional kernel metadata. In the OpenEmbedded build + system, the default Board Support Packages (BSPs) + :term:`Metadata` is provided through the + :term:`KMACHINE` and :term:`KBRANCH` + variables. You can use the ``KERNEL_FEATURES`` variable from within + the kernel recipe or kernel append file to further add metadata for + all BSPs or specific BSPs. + + The metadata you add through this variable includes config fragments + and features descriptions, which usually includes patches as well as + config fragments. You typically override the ``KERNEL_FEATURES`` + variable for a specific machine. In this way, you can provide + validated, but optional, sets of kernel configurations and features. + + For example, the following example from the ``linux-yocto-rt_4.12`` + kernel recipe adds "netfilter" and "taskstats" features to all BSPs + as well as "virtio" configurations to all QEMU machines. The last two + statements add specific configurations to targeted machine types: + :: + + KERNEL_EXTRA_FEATURES ?= "features/netfilter/netfilter.scc features/taskstats/taskstats.scc" + KERNEL_FEATURES_append = "${KERNEL_EXTRA_FEATURES}" + KERNEL_FEATURES_append_qemuall = "cfg/virtio.scc" + KERNEL_FEATURES_append_qemux86 = " cfg/sound.scc cfg/paravirt_kvm.scc" + KERNEL_FEATURES_append_qemux86-64 = "cfg/sound.scc" + + KERNEL_FIT_LINK_NAME + The link name of the kernel flattened image tree (FIT) image. This + variable is set in the ``meta/classes/kernel-artifact-names.bbclass`` + file as follows: + :: + + KERNEL_FIT_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" + + The value of the + ``KERNEL_ARTIFACT_LINK_NAME`` variable, which is set in the same + file, has the following value: + :: + + KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" + + See the :term:`MACHINE` variable for additional + information. + + KERNEL_FIT_NAME + The base name of the kernel flattened image tree (FIT) image. This + variable is set in the ``meta/classes/kernel-artifact-names.bbclass`` + file as follows: + :: + + KERNEL_FIT_NAME ?= "${KERNEL_ARTIFACT_NAME}" + + The value of the :term:`KERNEL_ARTIFACT_NAME` + variable, which is set in the same file, has the following value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + KERNEL_IMAGE_LINK_NAME + The link name for the kernel image. This variable is set in the + ``meta/classes/kernel-artifact-names.bbclass`` file as follows: + :: + + KERNEL_IMAGE_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" + + The value of + the ``KERNEL_ARTIFACT_LINK_NAME`` variable, which is set in the same + file, has the following value: + :: + + KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" + + See the :term:`MACHINE` variable for additional + information. + + KERNEL_IMAGE_MAXSIZE + Specifies the maximum size of the kernel image file in kilobytes. If + ``KERNEL_IMAGE_MAXSIZE`` is set, the size of the kernel image file is + checked against the set value during the + :ref:`ref-tasks-sizecheck` task. The task fails if + the kernel image file is larger than the setting. + + ``KERNEL_IMAGE_MAXSIZE`` is useful for target devices that have a + limited amount of space in which the kernel image must be stored. + + By default, this variable is not set, which means the size of the + kernel image is not checked. + + KERNEL_IMAGE_NAME + The base name of the kernel image. This variable is set in the + ``meta/classes/kernel-artifact-names.bbclass`` file as follows: + :: + + KERNEL_IMAGE_NAME ?= "${KERNEL_ARTIFACT_NAME}" + + The value of the + :term:`KERNEL_ARTIFACT_NAME` variable, + which is set in the same file, has the following value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + KERNEL_IMAGETYPE + The type of kernel to build for a device, usually set by the machine + configuration files and defaults to "zImage". This variable is used + when building the kernel and is passed to ``make`` as the target to + build. + + If you want to build an alternate kernel image type, use the + :term:`KERNEL_ALT_IMAGETYPE` variable. + + KERNEL_MODULE_AUTOLOAD + Lists kernel modules that need to be auto-loaded during boot. + + .. note:: + + This variable replaces the deprecated + module_autoload + variable. + + You can use the ``KERNEL_MODULE_AUTOLOAD`` variable anywhere that it + can be recognized by the kernel recipe or by an out-of-tree kernel + module recipe (e.g. a machine configuration file, a distribution + configuration file, an append file for the recipe, or the recipe + itself). + + Specify it as follows: + :: + + KERNEL_MODULE_AUTOLOAD += "module_name1 module_name2 module_name3" + + Including ``KERNEL_MODULE_AUTOLOAD`` causes the OpenEmbedded build + system to populate the ``/etc/modules-load.d/modname.conf`` file with + the list of modules to be auto-loaded on boot. The modules appear + one-per-line in the file. Here is an example of the most common use + case: + :: + + KERNEL_MODULE_AUTOLOAD += "module_name" + + For information on how to populate the ``modname.conf`` file with + ``modprobe.d`` syntax lines, see the :term:`KERNEL_MODULE_PROBECONF` variable. + + KERNEL_MODULE_PROBECONF + Provides a list of modules for which the OpenEmbedded build system + expects to find ``module_conf_``\ modname values that specify + configuration for each of the modules. For information on how to + provide those module configurations, see the + :term:`module_conf_* ` variable. + + KERNEL_PATH + The location of the kernel sources. This variable is set to the value + of the :term:`STAGING_KERNEL_DIR` within + the :ref:`module ` class. For information on + how this variable is used, see the + ":ref:`kernel-dev/kernel-dev-common:incorporating out-of-tree modules`" + section in the Yocto Project Linux Kernel Development Manual. + + To help maximize compatibility with out-of-tree drivers used to build + modules, the OpenEmbedded build system also recognizes and uses the + :term:`KERNEL_SRC` variable, which is identical to + the ``KERNEL_PATH`` variable. Both variables are common variables + used by external Makefiles to point to the kernel source directory. + + KERNEL_SRC + The location of the kernel sources. This variable is set to the value + of the :term:`STAGING_KERNEL_DIR` within + the :ref:`module ` class. For information on + how this variable is used, see the + ":ref:`kernel-dev/kernel-dev-common:incorporating out-of-tree modules`" + section in the Yocto Project Linux Kernel Development Manual. + + To help maximize compatibility with out-of-tree drivers used to build + modules, the OpenEmbedded build system also recognizes and uses the + :term:`KERNEL_PATH` variable, which is identical + to the ``KERNEL_SRC`` variable. Both variables are common variables + used by external Makefiles to point to the kernel source directory. + + KERNEL_VERSION + Specifies the version of the kernel as extracted from ``version.h`` + or ``utsrelease.h`` within the kernel sources. Effects of setting + this variable do not take affect until the kernel has been + configured. Consequently, attempting to refer to this variable in + contexts prior to configuration will not work. + + KERNELDEPMODDEPEND + Specifies whether the data referenced through + :term:`PKGDATA_DIR` is needed or not. The + ``KERNELDEPMODDEPEND`` does not control whether or not that data + exists, but simply whether or not it is used. If you do not need to + use the data, set the ``KERNELDEPMODDEPEND`` variable in your + ``initramfs`` recipe. Setting the variable there when the data is not + needed avoids a potential dependency loop. + + KFEATURE_DESCRIPTION + Provides a short description of a configuration fragment. You use + this variable in the ``.scc`` file that describes a configuration + fragment file. Here is the variable used in a file named ``smp.scc`` + to describe SMP being enabled: + :: + + define KFEATURE_DESCRIPTION "Enable SMP" + + KMACHINE + The machine as known by the kernel. Sometimes the machine name used + by the kernel does not match the machine name used by the + OpenEmbedded build system. For example, the machine name that the + OpenEmbedded build system understands as ``core2-32-intel-common`` + goes by a different name in the Linux Yocto kernel. The kernel + understands that machine as ``intel-core2-32``. For cases like these, + the ``KMACHINE`` variable maps the kernel machine name to the + OpenEmbedded build system machine name. + + These mappings between different names occur in the Yocto Linux + Kernel's ``meta`` branch. As an example take a look in the + ``common/recipes-kernel/linux/linux-yocto_3.19.bbappend`` file: + :: + + LINUX_VERSION_core2-32-intel-common = "3.19.0" + COMPATIBLE_MACHINE_core2-32-intel-common = "${MACHINE}" + SRCREV_meta_core2-32-intel-common = "8897ef68b30e7426bc1d39895e71fb155d694974" + SRCREV_machine_core2-32-intel-common = "43b9eced9ba8a57add36af07736344dcc383f711" + KMACHINE_core2-32-intel-common = "intel-core2-32" + KBRANCH_core2-32-intel-common = "standard/base" + KERNEL_FEATURES_append_core2-32-intel-common = "${KERNEL_FEATURES_INTEL_COMMON}" + + The ``KMACHINE`` statement says + that the kernel understands the machine name as "intel-core2-32". + However, the OpenEmbedded build system understands the machine as + "core2-32-intel-common". + + KTYPE + Defines the kernel type to be used in assembling the configuration. + The linux-yocto recipes define "standard", "tiny", and "preempt-rt" + kernel types. See the ":ref:`kernel-dev/kernel-dev-advanced:kernel types`" + section in the + Yocto Project Linux Kernel Development Manual for more information on + kernel types. + + You define the ``KTYPE`` variable in the + :ref:`kernel-dev/kernel-dev-advanced:bsp descriptions`. The + value you use must match the value used for the + :term:`LINUX_KERNEL_TYPE` value used by the + kernel recipe. + + LABELS + Provides a list of targets for automatic configuration. + + See the :ref:`grub-efi ` class for more + information on how this variable is used. + + LAYERDEPENDS + Lists the layers, separated by spaces, on which this recipe depends. + Optionally, you can specify a specific layer version for a dependency + by adding it to the end of the layer name. Here is an example: + :: + + LAYERDEPENDS_mylayer = "anotherlayer (=3)" + + In this previous example, + version 3 of "anotherlayer" is compared against + :term:`LAYERVERSION`\ ``_anotherlayer``. + + An error is produced if any dependency is missing or the version + numbers (if specified) do not match exactly. This variable is used in + the ``conf/layer.conf`` file and must be suffixed with the name of + the specific layer (e.g. ``LAYERDEPENDS_mylayer``). + + LAYERDIR + When used inside the ``layer.conf`` configuration file, this variable + provides the path of the current layer. This variable is not + available outside of ``layer.conf`` and references are expanded + immediately when parsing of the file completes. + + LAYERRECOMMENDS + Lists the layers, separated by spaces, recommended for use with this + layer. + + Optionally, you can specify a specific layer version for a + recommendation by adding the version to the end of the layer name. + Here is an example: + :: + + LAYERRECOMMENDS_mylayer = "anotherlayer (=3)" + + In this previous example, version 3 of "anotherlayer" is compared + against ``LAYERVERSION_anotherlayer``. + + This variable is used in the ``conf/layer.conf`` file and must be + suffixed with the name of the specific layer (e.g. + ``LAYERRECOMMENDS_mylayer``). + + LAYERSERIES_COMPAT + Lists the versions of the :term:`OpenEmbedded-Core (OE-Core)` for which + a layer is compatible. Using the ``LAYERSERIES_COMPAT`` variable + allows the layer maintainer to indicate which combinations of the + layer and OE-Core can be expected to work. The variable gives the + system a way to detect when a layer has not been tested with new + releases of OE-Core (e.g. the layer is not maintained). + + To specify the OE-Core versions for which a layer is compatible, use + this variable in your layer's ``conf/layer.conf`` configuration file. + For the list, use the Yocto Project + :yocto_wiki:`Release Name ` (e.g. + DISTRO_NAME_NO_CAP). To specify multiple OE-Core versions for the + layer, use a space-separated list: + :: + + LAYERSERIES_COMPAT_layer_root_name = "DISTRO_NAME_NO_CAP DISTRO_NAME_NO_CAP_MINUS_ONE" + + .. note:: + + Setting + LAYERSERIES_COMPAT + is required by the Yocto Project Compatible version 2 standard. + The OpenEmbedded build system produces a warning if the variable + is not set for any given layer. + + See the ":ref:`dev-manual/dev-manual-common-tasks:creating your own layer`" + section in the Yocto Project Development Tasks Manual. + + LAYERVERSION + Optionally specifies the version of a layer as a single number. You + can use this within :term:`LAYERDEPENDS` for + another layer in order to depend on a specific version of the layer. + This variable is used in the ``conf/layer.conf`` file and must be + suffixed with the name of the specific layer (e.g. + ``LAYERVERSION_mylayer``). + + LD + The minimal command and arguments used to run the linker. + + LDFLAGS + Specifies the flags to pass to the linker. This variable is exported + to an environment variable and thus made visible to the software + being built during the compilation step. + + Default initialization for ``LDFLAGS`` varies depending on what is + being built: + + - :term:`TARGET_LDFLAGS` when building for the + target + + - :term:`BUILD_LDFLAGS` when building for the + build host (i.e. ``-native``) + + - :term:`BUILDSDK_LDFLAGS` when building for + an SDK (i.e. ``nativesdk-``) + + LEAD_SONAME + Specifies the lead (or primary) compiled library file (i.e. ``.so``) + that the :ref:`debian ` class applies its + naming policy to given a recipe that packages multiple libraries. + + This variable works in conjunction with the ``debian`` class. + + LIC_FILES_CHKSUM + Checksums of the license text in the recipe source code. + + This variable tracks changes in license text of the source code + files. If the license text is changed, it will trigger a build + failure, which gives the developer an opportunity to review any + license change. + + This variable must be defined for all recipes (unless + :term:`LICENSE` is set to "CLOSED"). + + For more information, see the ":ref:`usingpoky-configuring-lic_files_chksum`" + section in the Yocto Project Development Tasks Manual. + + LICENSE + The list of source licenses for the recipe. Follow these rules: + + - Do not use spaces within individual license names. + + - Separate license names using \| (pipe) when there is a choice + between licenses. + + - Separate license names using & (ampersand) when multiple licenses + exist that cover different parts of the source. + + - You can use spaces between license names. + + - For standard licenses, use the names of the files in + ``meta/files/common-licenses/`` or the + :term:`SPDXLICENSEMAP` flag names defined in + ``meta/conf/licenses.conf``. + + Here are some examples: + :: + + LICENSE = "LGPLv2.1 | GPLv3" + LICENSE = "MPL-1 & LGPLv2.1" + LICENSE = "GPLv2+" + + The first example is from the + recipes for Qt, which the user may choose to distribute under either + the LGPL version 2.1 or GPL version 3. The second example is from + Cairo where two licenses cover different parts of the source code. + The final example is from ``sysstat``, which presents a single + license. + + You can also specify licenses on a per-package basis to handle + situations where components of the output have different licenses. + For example, a piece of software whose code is licensed under GPLv2 + but has accompanying documentation licensed under the GNU Free + Documentation License 1.2 could be specified as follows: + :: + + LICENSE = "GFDL-1.2 & GPLv2" + LICENSE_${PN} = "GPLv2" + LICENSE_${PN}-doc = "GFDL-1.2" + + LICENSE_CREATE_PACKAGE + Setting ``LICENSE_CREATE_PACKAGE`` to "1" causes the OpenEmbedded + build system to create an extra package (i.e. + ``${``\ :term:`PN`\ ``}-lic``) for each recipe and to add + those packages to the + :term:`RRECOMMENDS`\ ``_${PN}``. + + The ``${PN}-lic`` package installs a directory in + ``/usr/share/licenses`` named ``${PN}``, which is the recipe's base + name, and installs files in that directory that contain license and + copyright information (i.e. copies of the appropriate license files + from ``meta/common-licenses`` that match the licenses specified in + the :term:`LICENSE` variable of the recipe metadata + and copies of files marked in + :term:`LIC_FILES_CHKSUM` as containing + license text). + + For related information on providing license text, see the + :term:`COPY_LIC_DIRS` variable, the + :term:`COPY_LIC_MANIFEST` variable, and the + ":ref:`dev-manual/dev-manual-common-tasks:providing license text`" + section in the Yocto Project Development Tasks Manual. + + LICENSE_FLAGS + Specifies additional flags for a recipe you must whitelist through + :term:`LICENSE_FLAGS_WHITELIST` in + order to allow the recipe to be built. When providing multiple flags, + separate them with spaces. + + This value is independent of :term:`LICENSE` and is + typically used to mark recipes that might require additional licenses + in order to be used in a commercial product. For more information, + see the + ":ref:`dev-manual/dev-manual-common-tasks:enabling commercially licensed recipes`" + section in the Yocto Project Development Tasks Manual. + + LICENSE_FLAGS_WHITELIST + Lists license flags that when specified in + :term:`LICENSE_FLAGS` within a recipe should not + prevent that recipe from being built. This practice is otherwise + known as "whitelisting" license flags. For more information, see the + ":ref:`dev-manual/dev-manual-common-tasks:enabling commercially licensed recipes`" + section in the Yocto Project Development Tasks Manual. + + LICENSE_PATH + Path to additional licenses used during the build. By default, the + OpenEmbedded build system uses ``COMMON_LICENSE_DIR`` to define the + directory that holds common license text used during the build. The + ``LICENSE_PATH`` variable allows you to extend that location to other + areas that have additional licenses: + :: + + LICENSE_PATH += "path-to-additional-common-licenses" + + LINUX_KERNEL_TYPE + Defines the kernel type to be used in assembling the configuration. + The linux-yocto recipes define "standard", "tiny", and "preempt-rt" + kernel types. See the ":ref:`kernel-dev/kernel-dev-advanced:kernel types`" + section in the + Yocto Project Linux Kernel Development Manual for more information on + kernel types. + + If you do not specify a ``LINUX_KERNEL_TYPE``, it defaults to + "standard". Together with :term:`KMACHINE`, the + ``LINUX_KERNEL_TYPE`` variable defines the search arguments used by + the kernel tools to find the appropriate description within the + kernel :term:`Metadata` with which to build out the sources + and configuration. + + LINUX_VERSION + The Linux version from ``kernel.org`` on which the Linux kernel image + being built using the OpenEmbedded build system is based. You define + this variable in the kernel recipe. For example, the + ``linux-yocto-3.4.bb`` kernel recipe found in + ``meta/recipes-kernel/linux`` defines the variables as follows: + :: + + LINUX_VERSION ?= "3.4.24" + + The ``LINUX_VERSION`` variable is used to define :term:`PV` + for the recipe: + :: + + PV = "${LINUX_VERSION}+git${SRCPV}" + + LINUX_VERSION_EXTENSION + A string extension compiled into the version string of the Linux + kernel built with the OpenEmbedded build system. You define this + variable in the kernel recipe. For example, the linux-yocto kernel + recipes all define the variable as follows: + :: + + LINUX_VERSION_EXTENSION ?= "-yocto-${LINUX_KERNEL_TYPE}" + + Defining this variable essentially sets the Linux kernel + configuration item ``CONFIG_LOCALVERSION``, which is visible through + the ``uname`` command. Here is an example that shows the extension + assuming it was set as previously shown: + :: + + $ uname -r + 3.7.0-rc8-custom + + LOG_DIR + Specifies the directory to which the OpenEmbedded build system writes + overall log files. The default directory is ``${TMPDIR}/log``. + + For the directory containing logs specific to each task, see the + :term:`T` variable. + + MACHINE + Specifies the target device for which the image is built. You define + ``MACHINE`` in the ``local.conf`` file found in the + :term:`Build Directory`. By default, ``MACHINE`` is set to + "qemux86", which is an x86-based architecture machine to be emulated + using QEMU: + :: + + MACHINE ?= "qemux86" + + The variable corresponds to a machine configuration file of the same + name, through which machine-specific configurations are set. Thus, + when ``MACHINE`` is set to "qemux86" there exists the corresponding + ``qemux86.conf`` machine configuration file, which can be found in + the :term:`Source Directory` in + ``meta/conf/machine``. + + The list of machines supported by the Yocto Project as shipped + include the following: + :: + + MACHINE ?= "qemuarm" + MACHINE ?= "qemuarm64" + MACHINE ?= "qemumips" + MACHINE ?= "qemumips64" + MACHINE ?= "qemuppc" + MACHINE ?= "qemux86" + MACHINE ?= "qemux86-64" + MACHINE ?= "genericx86" + MACHINE ?= "genericx86-64" + MACHINE ?= "beaglebone" + MACHINE ?= "edgerouter" + + The last five are Yocto Project reference hardware + boards, which are provided in the ``meta-yocto-bsp`` layer. + + .. note:: + + Adding additional Board Support Package (BSP) layers to your + configuration adds new possible settings for + MACHINE + . + + MACHINE_ARCH + Specifies the name of the machine-specific architecture. This + variable is set automatically from :term:`MACHINE` or + :term:`TUNE_PKGARCH`. You should not hand-edit + the ``MACHINE_ARCH`` variable. + + MACHINE_ESSENTIAL_EXTRA_RDEPENDS + A list of required machine-specific packages to install as part of + the image being built. The build process depends on these packages + being present. Furthermore, because this is a "machine-essential" + variable, the list of packages are essential for the machine to boot. + The impact of this variable affects images based on + ``packagegroup-core-boot``, including the ``core-image-minimal`` + image. + + This variable is similar to the + ``MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS`` variable with the exception + that the image being built has a build dependency on the variable's + list of packages. In other words, the image will not build if a file + in this list is not found. + + As an example, suppose the machine for which you are building + requires ``example-init`` to be run during boot to initialize the + hardware. In this case, you would use the following in the machine's + ``.conf`` configuration file: + :: + + MACHINE_ESSENTIAL_EXTRA_RDEPENDS += "example-init" + + MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS + A list of recommended machine-specific packages to install as part of + the image being built. The build process does not depend on these + packages being present. However, because this is a + "machine-essential" variable, the list of packages are essential for + the machine to boot. The impact of this variable affects images based + on ``packagegroup-core-boot``, including the ``core-image-minimal`` + image. + + This variable is similar to the ``MACHINE_ESSENTIAL_EXTRA_RDEPENDS`` + variable with the exception that the image being built does not have + a build dependency on the variable's list of packages. In other + words, the image will still build if a package in this list is not + found. Typically, this variable is used to handle essential kernel + modules, whose functionality may be selected to be built into the + kernel rather than as a module, in which case a package will not be + produced. + + Consider an example where you have a custom kernel where a specific + touchscreen driver is required for the machine to be usable. However, + the driver can be built as a module or into the kernel depending on + the kernel configuration. If the driver is built as a module, you + want it to be installed. But, when the driver is built into the + kernel, you still want the build to succeed. This variable sets up a + "recommends" relationship so that in the latter case, the build will + not fail due to the missing package. To accomplish this, assuming the + package for the module was called ``kernel-module-ab123``, you would + use the following in the machine's ``.conf`` configuration file: + :: + + MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS += "kernel-module-ab123" + + .. note:: + + In this example, the + kernel-module-ab123 + recipe needs to explicitly set its + PACKAGES + variable to ensure that BitBake does not use the kernel recipe's + PACKAGES_DYNAMIC + variable to satisfy the dependency. + + Some examples of these machine essentials are flash, screen, + keyboard, mouse, or touchscreen drivers (depending on the machine). + + MACHINE_EXTRA_RDEPENDS + A list of machine-specific packages to install as part of the image + being built that are not essential for the machine to boot. However, + the build process for more fully-featured images depends on the + packages being present. + + This variable affects all images based on ``packagegroup-base``, + which does not include the ``core-image-minimal`` or + ``core-image-full-cmdline`` images. + + The variable is similar to the ``MACHINE_EXTRA_RRECOMMENDS`` variable + with the exception that the image being built has a build dependency + on the variable's list of packages. In other words, the image will + not build if a file in this list is not found. + + An example is a machine that has WiFi capability but is not essential + for the machine to boot the image. However, if you are building a + more fully-featured image, you want to enable the WiFi. The package + containing the firmware for the WiFi hardware is always expected to + exist, so it is acceptable for the build process to depend upon + finding the package. In this case, assuming the package for the + firmware was called ``wifidriver-firmware``, you would use the + following in the ``.conf`` file for the machine: + :: + + MACHINE_EXTRA_RDEPENDS += "wifidriver-firmware" + + MACHINE_EXTRA_RRECOMMENDS + A list of machine-specific packages to install as part of the image + being built that are not essential for booting the machine. The image + being built has no build dependency on this list of packages. + + This variable affects only images based on ``packagegroup-base``, + which does not include the ``core-image-minimal`` or + ``core-image-full-cmdline`` images. + + This variable is similar to the ``MACHINE_EXTRA_RDEPENDS`` variable + with the exception that the image being built does not have a build + dependency on the variable's list of packages. In other words, the + image will build if a file in this list is not found. + + An example is a machine that has WiFi capability but is not essential + For the machine to boot the image. However, if you are building a + more fully-featured image, you want to enable WiFi. In this case, the + package containing the WiFi kernel module will not be produced if the + WiFi driver is built into the kernel, in which case you still want + the build to succeed instead of failing as a result of the package + not being found. To accomplish this, assuming the package for the + module was called ``kernel-module-examplewifi``, you would use the + following in the ``.conf`` file for the machine: + :: + + MACHINE_EXTRA_RRECOMMENDS += "kernel-module-examplewifi" + + MACHINE_FEATURES + Specifies the list of hardware features the + :term:`MACHINE` is capable of supporting. For related + information on enabling features, see the + :term:`DISTRO_FEATURES`, + :term:`COMBINED_FEATURES`, and + :term:`IMAGE_FEATURES` variables. + + For a list of hardware features supported by the Yocto Project as + shipped, see the "`Machine Features <#ref-features-machine>`__" + section. + + MACHINE_FEATURES_BACKFILL + Features to be added to ``MACHINE_FEATURES`` if not also present in + ``MACHINE_FEATURES_BACKFILL_CONSIDERED``. + + This variable is set in the ``meta/conf/bitbake.conf`` file. It is + not intended to be user-configurable. It is best to just reference + the variable to see which machine features are being backfilled for + all machine configurations. See the "`Feature + Backfilling <#ref-features-backfill>`__" section for more + information. + + MACHINE_FEATURES_BACKFILL_CONSIDERED + Features from ``MACHINE_FEATURES_BACKFILL`` that should not be + backfilled (i.e. added to ``MACHINE_FEATURES``) during the build. See + the "`Feature Backfilling <#ref-features-backfill>`__" section for + more information. + + MACHINEOVERRIDES + A colon-separated list of overrides that apply to the current + machine. By default, this list includes the value of + :term:`MACHINE`. + + You can extend ``MACHINEOVERRIDES`` to add extra overrides that + should apply to a machine. For example, all machines emulated in QEMU + (e.g. ``qemuarm``, ``qemux86``, and so forth) include a file named + ``meta/conf/machine/include/qemu.inc`` that prepends the following + override to ``MACHINEOVERRIDES``: + :: + + MACHINEOVERRIDES =. "qemuall:" + + This + override allows variables to be overriden for all machines emulated + in QEMU, like in the following example from the ``connman-conf`` + recipe: + :: + + SRC_URI_append_qemuall = "file://wired.config \ + file://wired-setup \ + " + + The underlying mechanism behind + ``MACHINEOVERRIDES`` is simply that it is included in the default + value of :term:`OVERRIDES`. + + MAINTAINER + The email address of the distribution maintainer. + + MIRRORS + Specifies additional paths from which the OpenEmbedded build system + gets source code. When the build system searches for source code, it + first tries the local download directory. If that location fails, the + build system tries locations defined by + :term:`PREMIRRORS`, the upstream source, and then + locations specified by ``MIRRORS`` in that order. + + Assuming your distribution (:term:`DISTRO`) is "poky", + the default value for ``MIRRORS`` is defined in the + ``conf/distro/poky.conf`` file in the ``meta-poky`` Git repository. + + MLPREFIX + Specifies a prefix has been added to :term:`PN` to create a + special version of a recipe or package (i.e. a Multilib version). The + variable is used in places where the prefix needs to be added to or + removed from a the name (e.g. the :term:`BPN` variable). + ``MLPREFIX`` gets set when a prefix has been added to ``PN``. + + .. note:: + + The "ML" in + MLPREFIX + stands for "MultiLib". This representation is historical and comes + from a time when + nativesdk + was a suffix rather than a prefix on the recipe name. When + nativesdk + was turned into a prefix, it made sense to set + MLPREFIX + for it as well. + + To help understand when ``MLPREFIX`` might be needed, consider when + :term:`BBCLASSEXTEND` is used to provide a + ``nativesdk`` version of a recipe in addition to the target version. + If that recipe declares build-time dependencies on tasks in other + recipes by using :term:`DEPENDS`, then a dependency on + "foo" will automatically get rewritten to a dependency on + "nativesdk-foo". However, dependencies like the following will not + get rewritten automatically: + :: + + do_foo[depends] += "recipe:do_foo" + + If you want such a dependency to also get transformed, you can do the + following: + :: + + do_foo[depends] += "${MLPREFIX}recipe:do_foo" + + module_autoload + This variable has been replaced by the ``KERNEL_MODULE_AUTOLOAD`` + variable. You should replace all occurrences of ``module_autoload`` + with additions to ``KERNEL_MODULE_AUTOLOAD``, for example: + :: + + module_autoload_rfcomm = "rfcomm" + + should now be replaced with: + :: + + KERNEL_MODULE_AUTOLOAD += "rfcomm" + + See the :term:`KERNEL_MODULE_AUTOLOAD` variable for more information. + + module_conf + Specifies `modprobe.d `_ + syntax lines for inclusion in the ``/etc/modprobe.d/modname.conf`` + file. + + You can use this variable anywhere that it can be recognized by the + kernel recipe or out-of-tree kernel module recipe (e.g. a machine + configuration file, a distribution configuration file, an append file + for the recipe, or the recipe itself). If you use this variable, you + must also be sure to list the module name in the + :term:`KERNEL_MODULE_AUTOLOAD` + variable. + + Here is the general syntax: + :: + + module_conf_module_name = "modprobe.d-syntax" + + You must use the kernel module name override. + + Run ``man modprobe.d`` in the shell to find out more information on + the exact syntax you want to provide with ``module_conf``. + + Including ``module_conf`` causes the OpenEmbedded build system to + populate the ``/etc/modprobe.d/modname.conf`` file with + ``modprobe.d`` syntax lines. Here is an example that adds the options + ``arg1`` and ``arg2`` to a module named ``mymodule``: + :: + + module_conf_mymodule = "options mymodule arg1=val1 arg2=val2" + + For information on how to specify kernel modules to auto-load on + boot, see the :term:`KERNEL_MODULE_AUTOLOAD` variable. + + MODULE_TARBALL_DEPLOY + Controls creation of the ``modules-*.tgz`` file. Set this variable to + "0" to disable creation of this file, which contains all of the + kernel modules resulting from a kernel build. + + MODULE_TARBALL_LINK_NAME + The link name of the kernel module tarball. This variable is set in + the ``meta/classes/kernel-artifact-names.bbclass`` file as follows: + :: + + MODULE_TARBALL_LINK_NAME ?= "${KERNEL_ARTIFACT_LINK_NAME}" + + The value + of the ``KERNEL_ARTIFACT_LINK_NAME`` variable, which is set in the + same file, has the following value: + :: + + KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" + + See the :term:`MACHINE` variable for additional information. + + MODULE_TARBALL_NAME + The base name of the kernel module tarball. This variable is set in + the ``meta/classes/kernel-artifact-names.bbclass`` file as follows: + :: + + MODULE_TARBALL_NAME ?= "${KERNEL_ARTIFACT_NAME}" + + The value of the :term:`KERNEL_ARTIFACT_NAME` variable, + which is set in the same file, has the following value: + :: + + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" + + MULTIMACH_TARGET_SYS + Uniquely identifies the type of the target system for which packages + are being built. This variable allows output for different types of + target systems to be put into different subdirectories of the same + output directory. + + The default value of this variable is: + :: + + ${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS} + + Some classes (e.g. + :ref:`cross-canadian `) modify the + ``MULTIMACH_TARGET_SYS`` value. + + See the :term:`STAMP` variable for an example. See the + :term:`STAGING_DIR_TARGET` variable for more information. + + NATIVELSBSTRING + A string identifying the host distribution. Strings consist of the + host distributor ID followed by the release, as reported by the + ``lsb_release`` tool or as read from ``/etc/lsb-release``. For + example, when running a build on Ubuntu 12.10, the value is + "Ubuntu-12.10". If this information is unable to be determined, the + value resolves to "Unknown". + + This variable is used by default to isolate native shared state + packages for different distributions (e.g. to avoid problems with + ``glibc`` version incompatibilities). Additionally, the variable is + checked against + :term:`SANITY_TESTED_DISTROS` if that + variable is set. + + NM + The minimal command and arguments to run ``nm``. + + NO_GENERIC_LICENSE + Avoids QA errors when you use a non-common, non-CLOSED license in a + recipe. Packages exist, such as the linux-firmware package, with many + licenses that are not in any way common. Also, new licenses are added + occasionally to avoid introducing a lot of common license files, + which are only applicable to a specific package. + ``NO_GENERIC_LICENSE`` is used to allow copying a license that does + not exist in common licenses. + + The following example shows how to add ``NO_GENERIC_LICENSE`` to a + recipe: + :: + + NO_GENERIC_LICENSE[license_name] = "license_file_in_fetched_source" + + The following is an example that + uses the ``LICENSE.Abilis.txt`` file as the license from the fetched + source: + :: + + NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENSE.Abilis.txt" + + NO_RECOMMENDATIONS + Prevents installation of all "recommended-only" packages. + Recommended-only packages are packages installed only through the + :term:`RRECOMMENDS` variable). Setting the + ``NO_RECOMMENDATIONS`` variable to "1" turns this feature on: :: + + NO_RECOMMENDATIONS = "1" + + You can set this variable globally in your ``local.conf`` file or you + can attach it to a specific image recipe by using the recipe name + override: :: + + NO_RECOMMENDATIONS_pn-target_image = "1" + + It is important to realize that if you choose to not install packages + using this variable and some other packages are dependent on them + (i.e. listed in a recipe's :term:`RDEPENDS` + variable), the OpenEmbedded build system ignores your request and + will install the packages to avoid dependency errors. + + .. note:: + + Some recommended packages might be required for certain system + functionality, such as kernel modules. It is up to you to add + packages with the IMAGE_INSTALL variable. + + Support for this variable exists only when using the IPK and RPM + packaging backend. Support does not exist for DEB. + + See the :term:`BAD_RECOMMENDATIONS` and + the :term:`PACKAGE_EXCLUDE` variables for + related information. + + NOAUTOPACKAGEDEBUG + Disables auto package from splitting ``.debug`` files. If a recipe + requires ``FILES_${PN}-dbg`` to be set manually, the + ``NOAUTOPACKAGEDEBUG`` can be defined allowing you to define the + content of the debug package. For example: + :: + + NOAUTOPACKAGEDEBUG = "1" + FILES_${PN}-dev = "${includedir}/${QT_DIR_NAME}/Qt/*" + FILES_${PN}-dbg = "/usr/src/debug/" + FILES_${QT_BASE_NAME}-demos-doc = "${docdir}/${QT_DIR_NAME}/qch/qt.qch" + + OBJCOPY + The minimal command and arguments to run ``objcopy``. + + OBJDUMP + The minimal command and arguments to run ``objdump``. + + OE_BINCONFIG_EXTRA_MANGLE + When inheriting the :ref:`binconfig ` class, + this variable specifies additional arguments passed to the "sed" + command. The sed command alters any paths in configuration scripts + that have been set up during compilation. Inheriting this class + results in all paths in these scripts being changed to point into the + ``sysroots/`` directory so that all builds that use the script will + use the correct directories for the cross compiling layout. + + See the ``meta/classes/binconfig.bbclass`` in the + :term:`Source Directory` for details on how this class + applies these additional sed command arguments. For general + information on the ``binconfig`` class, see the + ":ref:`binconfig.bbclass `" section. + + OE_IMPORTS + An internal variable used to tell the OpenEmbedded build system what + Python modules to import for every Python function run by the system. + + .. note:: + + Do not set this variable. It is for internal use only. + + OE_INIT_ENV_SCRIPT + The name of the build environment setup script for the purposes of + setting up the environment within the extensible SDK. The default + value is "oe-init-build-env". + + If you use a custom script to set up your build environment, set the + ``OE_INIT_ENV_SCRIPT`` variable to its name. + + OE_TERMINAL + Controls how the OpenEmbedded build system spawns interactive + terminals on the host development system (e.g. using the BitBake + command with the ``-c devshell`` command-line option). For more + information, see the ":ref:`platdev-appdev-devshell`" section in + the Yocto Project Development Tasks Manual. + + You can use the following values for the ``OE_TERMINAL`` variable: + + - auto + - gnome + - xfce + - rxvt + - screen + - konsole + - none + + OEROOT + The directory from which the top-level build environment setup script + is sourced. The Yocto Project provides a top-level build environment + setup script: ````` <#structure-core-script>`__. When you run this + script, the ``OEROOT`` variable resolves to the directory that + contains the script. + + For additional information on how this variable is used, see the + initialization script. + + OLDEST_KERNEL + Declares the oldest version of the Linux kernel that the produced + binaries must support. This variable is passed into the build of the + Embedded GNU C Library (``glibc``). + + The default for this variable comes from the + ``meta/conf/bitbake.conf`` configuration file. You can override this + default by setting the variable in a custom distribution + configuration file. + + OVERRIDES + A colon-separated list of overrides that currently apply. Overrides + are a BitBake mechanism that allows variables to be selectively + overridden at the end of parsing. The set of overrides in + ``OVERRIDES`` represents the "state" during building, which includes + the current recipe being built, the machine for which it is being + built, and so forth. + + As an example, if the string "an-override" appears as an element in + the colon-separated list in ``OVERRIDES``, then the following + assignment will override ``FOO`` with the value "overridden" at the + end of parsing: + :: + + FOO_an-override = "overridden" + + See the + ":ref:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata:conditional syntax (overrides)`" + section in the BitBake User Manual for more information on the + overrides mechanism. + + The default value of ``OVERRIDES`` includes the values of the + :term:`CLASSOVERRIDE`, + :term:`MACHINEOVERRIDES`, and + :term:`DISTROOVERRIDES` variables. Another + important override included by default is ``pn-${PN}``. This override + allows variables to be set for a single recipe within configuration + (``.conf``) files. Here is an example: + :: + + FOO_pn-myrecipe = "myrecipe-specific value" + + .. note:: + + An easy way to see what overrides apply is to search for + OVERRIDES + in the output of the + bitbake -e + command. See the " + Viewing Variable Values + " section in the Yocto Project Development Tasks Manual for more + information. + + P + The recipe name and version. ``P`` is comprised of the following: + :: + + ${PN}-${PV} + + PACKAGE_ADD_METADATA + This variable defines additional metdata to add to packages. + + You may find you need to inject additional metadata into packages. + This variable allows you to do that by setting the injected data as + the value. Multiple fields can be added by splitting the content with + the literal separator "\n". + + The suffixes '_IPK', '_DEB', or '_RPM' can be applied to the variable + to do package type specific settings. It can also be made package + specific by using the package name as a suffix. + + You can find out more about applying this variable in the + ":ref:`dev-manual/dev-manual-common-tasks:adding custom metadata to packages`" + section in the Yocto Project Development Tasks Manual. + + PACKAGE_ARCH + The architecture of the resulting package or packages. + + By default, the value of this variable is set to + :term:`TUNE_PKGARCH` when building for the + target, :term:`BUILD_ARCH` when building for the + build host, and "${SDK_ARCH}-${SDKPKGSUFFIX}" when building for the + SDK. + + .. note:: + + See + SDK_ARCH + for more information. + + However, if your recipe's output packages are built specific to the + target machine rather than generally for the architecture of the + machine, you should set ``PACKAGE_ARCH`` to the value of + :term:`MACHINE_ARCH` in the recipe as follows: + :: + + PACKAGE_ARCH = "${MACHINE_ARCH}" + + PACKAGE_ARCHS + Specifies a list of architectures compatible with the target machine. + This variable is set automatically and should not normally be + hand-edited. Entries are separated using spaces and listed in order + of priority. The default value for ``PACKAGE_ARCHS`` is "all any + noarch ${PACKAGE_EXTRA_ARCHS} ${MACHINE_ARCH}". + + PACKAGE_BEFORE_PN + Enables easily adding packages to ``PACKAGES`` before ``${PN}`` so + that those added packages can pick up files that would normally be + included in the default package. + + PACKAGE_CLASSES + This variable, which is set in the ``local.conf`` configuration file + found in the ``conf`` folder of the + :term:`Build Directory`, specifies the package manager the + OpenEmbedded build system uses when packaging data. + + You can provide one or more of the following arguments for the + variable: PACKAGE_CLASSES ?= "package_rpm package_deb package_ipk + package_tar" + + .. note:: + + While it is a legal option, the + package_tar + class has limited functionality due to no support for package + dependencies by that backend. Therefore, it is recommended that + you do not use it. + + The build system uses only the first argument in the list as the + package manager when creating your image or SDK. However, packages + will be created using any additional packaging classes you specify. + For example, if you use the following in your ``local.conf`` file: + :: + + PACKAGE_CLASSES ?= "package_ipk" + + The OpenEmbedded build system uses + the IPK package manager to create your image or SDK. + + For information on packaging and build performance effects as a + result of the package manager in use, see the + ":ref:`package.bbclass `" section. + + PACKAGE_DEBUG_SPLIT_STYLE + Determines how to split up the binary and debug information when + creating ``*-dbg`` packages to be used with the GNU Project Debugger + (GDB). + + With the ``PACKAGE_DEBUG_SPLIT_STYLE`` variable, you can control + where debug information, which can include or exclude source files, + is stored: + + - ".debug": Debug symbol files are placed next to the binary in a + ``.debug`` directory on the target. For example, if a binary is + installed into ``/bin``, the corresponding debug symbol files are + installed in ``/bin/.debug``. Source files are placed in + ``/usr/src/debug``. + + - "debug-file-directory": Debug symbol files are placed under + ``/usr/lib/debug`` on the target, and separated by the path from + where the binary is installed. For example, if a binary is + installed in ``/bin``, the corresponding debug symbols are + installed in ``/usr/lib/debug/bin``. Source files are placed in + ``/usr/src/debug``. + + - "debug-without-src": The same behavior as ".debug" previously + described with the exception that no source files are installed. + + - "debug-with-srcpkg": The same behavior as ".debug" previously + described with the exception that all source files are placed in a + separate ``*-src`` pkg. This is the default behavior. + + You can find out more about debugging using GDB by reading the + ":ref:`platdev-gdb-remotedebug`" section + in the Yocto Project Development Tasks Manual. + + PACKAGE_EXCLUDE_COMPLEMENTARY + Prevents specific packages from being installed when you are + installing complementary packages. + + You might find that you want to prevent installing certain packages + when you are installing complementary packages. For example, if you + are using :term:`IMAGE_FEATURES` to install + ``dev-pkgs``, you might not want to install all packages from a + particular multilib. If you find yourself in this situation, you can + use the ``PACKAGE_EXCLUDE_COMPLEMENTARY`` variable to specify regular + expressions to match the packages you want to exclude. + + PACKAGE_EXCLUDE + Lists packages that should not be installed into an image. For + example: + :: + + PACKAGE_EXCLUDE = "package_name package_name package_name ..." + + You can set this variable globally in your ``local.conf`` file or you + can attach it to a specific image recipe by using the recipe name + override: + :: + + PACKAGE_EXCLUDE_pn-target_image = "package_name" + + If you choose to not install a package using this variable and some + other package is dependent on it (i.e. listed in a recipe's + :term:`RDEPENDS` variable), the OpenEmbedded build + system generates a fatal installation error. Because the build system + halts the process with a fatal error, you can use the variable with + an iterative development process to remove specific components from a + system. + + Support for this variable exists only when using the IPK and RPM + packaging backend. Support does not exist for DEB. + + See the :term:`NO_RECOMMENDATIONS` and the + :term:`BAD_RECOMMENDATIONS` variables for + related information. + + PACKAGE_EXTRA_ARCHS + Specifies the list of architectures compatible with the device CPU. + This variable is useful when you build for several different devices + that use miscellaneous processors such as XScale and ARM926-EJS. + + PACKAGE_FEED_ARCHS + Optionally specifies the package architectures used as part of the + package feed URIs during the build. When used, the + ``PACKAGE_FEED_ARCHS`` variable is appended to the final package feed + URI, which is constructed using the + :term:`PACKAGE_FEED_URIS` and + :term:`PACKAGE_FEED_BASE_PATHS` + variables. + + .. note:: + + You can use the + PACKAGE_FEEDS_ARCHS + variable to whitelist specific package architectures. If you do + not need to whitelist specific architectures, which is a common + case, you can omit this variable. Omitting the variable results in + all available architectures for the current machine being included + into remote package feeds. + + Consider the following example where the ``PACKAGE_FEED_URIS``, + ``PACKAGE_FEED_BASE_PATHS``, and ``PACKAGE_FEED_ARCHS`` variables are + defined in your ``local.conf`` file: + :: + + PACKAGE_FEED_URIS = "https://example.com/packagerepos/release \ + https://example.com/packagerepos/updates" + PACKAGE_FEED_BASE_PATHS = "rpm rpm-dev" + PACKAGE_FEED_ARCHS = "all core2-64" + + Given these settings, the resulting package feeds are as follows: + :: + + https://example.com/packagerepos/release/rpm/all + https://example.com/packagerepos/release/rpm/core2-64 + https://example.com/packagerepos/release/rpm-dev/all + https://example.com/packagerepos/release/rpm-dev/core2-64 + https://example.com/packagerepos/updates/rpm/all + https://example.com/packagerepos/updates/rpm/core2-64 + https://example.com/packagerepos/updates/rpm-dev/all + https://example.com/packagerepos/updates/rpm-dev/core2-64 + + PACKAGE_FEED_BASE_PATHS + Specifies the base path used when constructing package feed URIs. The + ``PACKAGE_FEED_BASE_PATHS`` variable makes up the middle portion of a + package feed URI used by the OpenEmbedded build system. The base path + lies between the :term:`PACKAGE_FEED_URIS` + and :term:`PACKAGE_FEED_ARCHS` variables. + + Consider the following example where the ``PACKAGE_FEED_URIS``, + ``PACKAGE_FEED_BASE_PATHS``, and ``PACKAGE_FEED_ARCHS`` variables are + defined in your ``local.conf`` file: + :: + + PACKAGE_FEED_URIS = "https://example.com/packagerepos/release \ + https://example.com/packagerepos/updates" + PACKAGE_FEED_BASE_PATHS = "rpm rpm-dev" + PACKAGE_FEED_ARCHS = "all core2-64" + + Given these settings, the resulting package feeds are as follows: + :: + + https://example.com/packagerepos/release/rpm/all + https://example.com/packagerepos/release/rpm/core2-64 + https://example.com/packagerepos/release/rpm-dev/all + https://example.com/packagerepos/release/rpm-dev/core2-64 + https://example.com/packagerepos/updates/rpm/all + https://example.com/packagerepos/updates/rpm/core2-64 + https://example.com/packagerepos/updates/rpm-dev/all + https://example.com/packagerepos/updates/rpm-dev/core2-64 + + PACKAGE_FEED_URIS + Specifies the front portion of the package feed URI used by the + OpenEmbedded build system. Each final package feed URI is comprised + of ``PACKAGE_FEED_URIS``, + :term:`PACKAGE_FEED_BASE_PATHS`, and + :term:`PACKAGE_FEED_ARCHS` variables. + + Consider the following example where the ``PACKAGE_FEED_URIS``, + ``PACKAGE_FEED_BASE_PATHS``, and ``PACKAGE_FEED_ARCHS`` variables are + defined in your ``local.conf`` file: + :: + + PACKAGE_FEED_URIS = "https://example.com/packagerepos/release \ + https://example.com/packagerepos/updates" + PACKAGE_FEED_BASE_PATHS = "rpm rpm-dev" + PACKAGE_FEED_ARCHS = "all core2-64" + + Given these settings, the resulting package feeds are as follows: + :: + + https://example.com/packagerepos/release/rpm/all + https://example.com/packagerepos/release/rpm/core2-64 + https://example.com/packagerepos/release/rpm-dev/all + https://example.com/packagerepos/release/rpm-dev/core2-64 + https://example.com/packagerepos/updates/rpm/all + https://example.com/packagerepos/updates/rpm/core2-64 + https://example.com/packagerepos/updates/rpm-dev/all + https://example.com/packagerepos/updates/rpm-dev/core2-64 + + PACKAGE_INSTALL + The final list of packages passed to the package manager for + installation into the image. + + Because the package manager controls actual installation of all + packages, the list of packages passed using ``PACKAGE_INSTALL`` is + not the final list of packages that are actually installed. This + variable is internal to the image construction code. Consequently, in + general, you should use the + :term:`IMAGE_INSTALL` variable to specify + packages for installation. The exception to this is when working with + the + ```core-image-minimal-initramfs`` <#images-core-image-minimal-initramfs>`__ + image. When working with an initial RAM filesystem (initramfs) image, + use the ``PACKAGE_INSTALL`` variable. For information on creating an + initramfs, see the ":ref:`building-an-initramfs-image`" section + in the Yocto Project Development Tasks Manual. + + PACKAGE_INSTALL_ATTEMPTONLY + Specifies a list of packages the OpenEmbedded build system attempts + to install when creating an image. If a listed package fails to + install, the build system does not generate an error. This variable + is generally not user-defined. + + PACKAGE_PREPROCESS_FUNCS + Specifies a list of functions run to pre-process the + :term:`PKGD` directory prior to splitting the files out + to individual packages. + + PACKAGE_WRITE_DEPS + Specifies a list of dependencies for post-installation and + pre-installation scripts on native/cross tools. If your + post-installation or pre-installation script can execute at rootfs + creation time rather than on the target but depends on a native tool + in order to execute, you need to list the tools in + ``PACKAGE_WRITE_DEPS``. + + For information on running post-installation scripts, see the + ":ref:`dev-manual/dev-manual-common-tasks:post-installation scripts`" + section in the Yocto Project Development Tasks Manual. + + PACKAGECONFIG + This variable provides a means of enabling or disabling features of a + recipe on a per-recipe basis. ``PACKAGECONFIG`` blocks are defined in + recipes when you specify features and then arguments that define + feature behaviors. Here is the basic block structure (broken over + multiple lines for readability): + :: + + PACKAGECONFIG ??= "f1 f2 f3 ..." + PACKAGECONFIG[f1] = "\ + --with-f1, \ + --without-f1, \ + build-deps-for-f1, \ + runtime-deps-for-f1, \ + runtime-recommends-for-f1, \ + packageconfig-conflicts-for-f1" + PACKAGECONFIG[f2] = "\ + ... and so on and so on ... + + The ``PACKAGECONFIG`` variable itself specifies a space-separated + list of the features to enable. Following the features, you can + determine the behavior of each feature by providing up to six + order-dependent arguments, which are separated by commas. You can + omit any argument you like but must retain the separating commas. The + order is important and specifies the following: + + 1. Extra arguments that should be added to the configure script + argument list (:term:`EXTRA_OECONF` or + :term:`PACKAGECONFIG_CONFARGS`) if + the feature is enabled. + + 2. Extra arguments that should be added to ``EXTRA_OECONF`` or + ``PACKAGECONFIG_CONFARGS`` if the feature is disabled. + + 3. Additional build dependencies (:term:`DEPENDS`) + that should be added if the feature is enabled. + + 4. Additional runtime dependencies (:term:`RDEPENDS`) + that should be added if the feature is enabled. + + 5. Additional runtime recommendations + (:term:`RRECOMMENDS`) that should be added if + the feature is enabled. + + 6. Any conflicting (that is, mutually exclusive) ``PACKAGECONFIG`` + settings for this feature. + + Consider the following ``PACKAGECONFIG`` block taken from the + ``librsvg`` recipe. In this example the feature is ``gtk``, which has + three arguments that determine the feature's behavior. + :: + + PACKAGECONFIG[gtk] = "--with-gtk3,--without-gtk3,gtk+3" + + The + ``--with-gtk3`` and ``gtk+3`` arguments apply only if the feature is + enabled. In this case, ``--with-gtk3`` is added to the configure + script argument list and ``gtk+3`` is added to ``DEPENDS``. On the + other hand, if the feature is disabled say through a ``.bbappend`` + file in another layer, then the second argument ``--without-gtk3`` is + added to the configure script instead. + + The basic ``PACKAGECONFIG`` structure previously described holds true + regardless of whether you are creating a block or changing a block. + When creating a block, use the structure inside your recipe. + + If you want to change an existing ``PACKAGECONFIG`` block, you can do + so one of two ways: + + - *Append file:* Create an append file named + recipename\ ``.bbappend`` in your layer and override the value of + ``PACKAGECONFIG``. You can either completely override the + variable: + :: + + PACKAGECONFIG = "f4 f5" + + Or, you can just append the variable: + :: + + PACKAGECONFIG_append = " f4" + + - *Configuration file:* This method is identical to changing the + block through an append file except you edit your ``local.conf`` + or ``mydistro.conf`` file. As with append files previously + described, you can either completely override the variable: + PACKAGECONFIG_pn-recipename = "f4 f5" Or, you can just amend the + variable: + :: + + PACKAGECONFIG_append_pn-recipename = " f4" + + PACKAGECONFIG_CONFARGS + A space-separated list of configuration options generated from the + :term:`PACKAGECONFIG` setting. + + Classes such as :ref:`autotools ` and + :ref:`cmake ` use ``PACKAGECONFIG_CONFARGS`` to + pass ``PACKAGECONFIG`` options to ``configure`` and ``cmake``, + respectively. If you are using ``PACKAGECONFIG`` but not a class that + handles the ``do_configure`` task, then you need to use + ``PACKAGECONFIG_CONFARGS`` appropriately. + + PACKAGEGROUP_DISABLE_COMPLEMENTARY + For recipes inheriting the + :ref:`packagegroup ` class, setting + ``PACKAGEGROUP_DISABLE_COMPLEMENTARY`` to "1" specifies that the + normal complementary packages (i.e. ``-dev``, ``-dbg``, and so forth) + should not be automatically created by the ``packagegroup`` recipe, + which is the default behavior. + + PACKAGES + The list of packages the recipe creates. The default value is the + following: + :: + + ${PN}-dbg ${PN}-staticdev ${PN}-dev ${PN}-doc ${PN}-locale ${PACKAGE_BEFORE_PN} ${PN} + + During packaging, the :ref:`ref-tasks-package` task + goes through ``PACKAGES`` and uses the :term:`FILES` + variable corresponding to each package to assign files to the + package. If a file matches the ``FILES`` variable for more than one + package in ``PACKAGES``, it will be assigned to the earliest + (leftmost) package. + + Packages in the variable's list that are empty (i.e. where none of + the patterns in ``FILES_``\ pkg match any files installed by the + :ref:`ref-tasks-install` task) are not generated, + unless generation is forced through the + :term:`ALLOW_EMPTY` variable. + + PACKAGES_DYNAMIC + A promise that your recipe satisfies runtime dependencies for + optional modules that are found in other recipes. + ``PACKAGES_DYNAMIC`` does not actually satisfy the dependencies, it + only states that they should be satisfied. For example, if a hard, + runtime dependency (:term:`RDEPENDS`) of another + package is satisfied at build time through the ``PACKAGES_DYNAMIC`` + variable, but a package with the module name is never actually + produced, then the other package will be broken. Thus, if you attempt + to include that package in an image, you will get a dependency + failure from the packaging system during the + :ref:`ref-tasks-rootfs` task. + + Typically, if there is a chance that such a situation can occur and + the package that is not created is valid without the dependency being + satisfied, then you should use :term:`RRECOMMENDS` + (a soft runtime dependency) instead of ``RDEPENDS``. + + For an example of how to use the ``PACKAGES_DYNAMIC`` variable when + you are splitting packages, see the + ":ref:`dev-manual/dev-manual-common-tasks:handling optional module packaging`" + section in the Yocto Project Development Tasks Manual. + + PACKAGESPLITFUNCS + Specifies a list of functions run to perform additional splitting of + files into individual packages. Recipes can either prepend to this + variable or prepend to the ``populate_packages`` function in order to + perform additional package splitting. In either case, the function + should set :term:`PACKAGES`, + :term:`FILES`, :term:`RDEPENDS` and + other packaging variables appropriately in order to perform the + desired splitting. + + PARALLEL_MAKE + Extra options passed to the ``make`` command during the + :ref:`ref-tasks-compile` task in order to specify + parallel compilation on the local build host. This variable is + usually in the form "-j x", where x represents the maximum number of + parallel threads ``make`` can run. + + .. note:: + + In order for + PARALLEL_MAKE + to be effective, + make + must be called with + ${ + EXTRA_OEMAKE + } + . An easy way to ensure this is to use the + oe_runmake + function. + + By default, the OpenEmbedded build system automatically sets this + variable to be equal to the number of cores the build system uses. + + .. note:: + + If the software being built experiences dependency issues during + the + do_compile + task that result in race conditions, you can clear the + PARALLEL_MAKE + variable within the recipe as a workaround. For information on + addressing race conditions, see the " + Debugging Parallel Make Races + " section in the Yocto Project Development Tasks Manual. + + For single socket systems (i.e. one CPU), you should not have to + override this variable to gain optimal parallelism during builds. + However, if you have very large systems that employ multiple physical + CPUs, you might want to make sure the ``PARALLEL_MAKE`` variable is + not set higher than "-j 20". + + For more information on speeding up builds, see the + ":ref:`dev-manual/dev-manual-common-tasks:speeding up a build`" + section in the Yocto Project Development Tasks Manual. + + PARALLEL_MAKEINST + Extra options passed to the ``make install`` command during the + :ref:`ref-tasks-install` task in order to specify + parallel installation. This variable defaults to the value of + :term:`PARALLEL_MAKE`. + + .. note:: + + In order for ``PARALLEL_MAKEINST`` to be effective, ``make`` must + be called with + ``${``\ :term:`EXTRA_OEMAKE`\ ``}``. An easy + way to ensure this is to use the ``oe_runmake`` function. + + If the software being built experiences dependency issues during + the ``do_install`` task that result in race conditions, you can + clear the ``PARALLEL_MAKEINST`` variable within the recipe as a + workaround. For information on addressing race conditions, see the + ":ref:`dev-manual/dev-manual-common-tasks:debugging parallel make races`" + section in the Yocto Project Development Tasks Manual. + + PATCHRESOLVE + Determines the action to take when a patch fails. You can set this + variable to one of two values: "noop" and "user". + + The default value of "noop" causes the build to simply fail when the + OpenEmbedded build system cannot successfully apply a patch. Setting + the value to "user" causes the build system to launch a shell and + places you in the right location so that you can manually resolve the + conflicts. + + Set this variable in your ``local.conf`` file. + + PATCHTOOL + Specifies the utility used to apply patches for a recipe during the + :ref:`ref-tasks-patch` task. You can specify one of + three utilities: "patch", "quilt", or "git". The default utility used + is "quilt" except for the quilt-native recipe itself. Because the + quilt tool is not available at the time quilt-native is being + patched, it uses "patch". + + If you wish to use an alternative patching tool, set the variable in + the recipe using one of the following: + :: + + PATCHTOOL = "patch" + PATCHTOOL = "quilt" + PATCHTOOL = "git" + + PE + The epoch of the recipe. By default, this variable is unset. The + variable is used to make upgrades possible when the versioning scheme + changes in some backwards incompatible way. + + ``PE`` is the default value of the :term:`PKGE` variable. + + PF + Specifies the recipe or package name and includes all version and + revision numbers (i.e. ``glibc-2.13-r20+svnr15508/`` and + ``bash-4.2-r1/``). This variable is comprised of the following: + ${:term:`PN`}-${:term:`EXTENDPE`}${:term:`PV`}-${:term:`PR`} + + PIXBUF_PACKAGES + When inheriting the :ref:`pixbufcache ` + class, this variable identifies packages that contain the pixbuf + loaders used with ``gdk-pixbuf``. By default, the ``pixbufcache`` + class assumes that the loaders are in the recipe's main package (i.e. + ``${``\ :term:`PN`\ ``}``). Use this variable if the + loaders you need are in a package other than that main package. + + PKG + The name of the resulting package created by the OpenEmbedded build + system. + + .. note:: + + When using the + PKG + variable, you must use a package name override. + + For example, when the :ref:`debian ` class + renames the output package, it does so by setting + ``PKG_packagename``. + + PKG_CONFIG_PATH + The path to ``pkg-config`` files for the current build context. + ``pkg-config`` reads this variable from the environment. + + PKGD + Points to the destination directory for files to be packaged before + they are split into individual packages. This directory defaults to + the following: + :: + + ${WORKDIR}/package + + Do not change this default. + + PKGDATA_DIR + Points to a shared, global-state directory that holds data generated + during the packaging process. During the packaging process, the + :ref:`ref-tasks-packagedata` task packages data + for each recipe and installs it into this temporary, shared area. + This directory defaults to the following, which you should not + change: + :: + + ${STAGING_DIR_HOST}/pkgdata + + For examples of how this data is used, see the + ":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" + section in the Yocto Project Overview and Concepts Manual and the + ":ref:`dev-manual/dev-manual-common-tasks:viewing package information with \`\`oe-pkgdata-util\`\``" + section in the Yocto Project Development Tasks Manual. For more + information on the shared, global-state directory, see + :term:`STAGING_DIR_HOST`. + + PKGDEST + Points to the parent directory for files to be packaged after they + have been split into individual packages. This directory defaults to + the following: + :: + + ${WORKDIR}/packages-split + + Under this directory, the build system creates directories for each + package specified in :term:`PACKAGES`. Do not change + this default. + + PKGDESTWORK + Points to a temporary work area where the + :ref:`ref-tasks-package` task saves package metadata. + The ``PKGDESTWORK`` location defaults to the following: + :: + + ${WORKDIR}/pkgdata + + Do not change this default. + + The :ref:`ref-tasks-packagedata` task copies the + package metadata from ``PKGDESTWORK`` to + :term:`PKGDATA_DIR` to make it available globally. + + PKGE + The epoch of the package(s) built by the recipe. By default, ``PKGE`` + is set to :term:`PE`. + + PKGR + The revision of the package(s) built by the recipe. By default, + ``PKGR`` is set to :term:`PR`. + + PKGV + The version of the package(s) built by the recipe. By default, + ``PKGV`` is set to :term:`PV`. + + PN + This variable can have two separate functions depending on the + context: a recipe name or a resulting package name. + + ``PN`` refers to a recipe name in the context of a file used by the + OpenEmbedded build system as input to create a package. The name is + normally extracted from the recipe file name. For example, if the + recipe is named ``expat_2.0.1.bb``, then the default value of ``PN`` + will be "expat". + + The variable refers to a package name in the context of a file + created or produced by the OpenEmbedded build system. + + If applicable, the ``PN`` variable also contains any special suffix + or prefix. For example, using ``bash`` to build packages for the + native machine, ``PN`` is ``bash-native``. Using ``bash`` to build + packages for the target and for Multilib, ``PN`` would be ``bash`` + and ``lib64-bash``, respectively. + + PNBLACKLIST + Lists recipes you do not want the OpenEmbedded build system to build. + This variable works in conjunction with the + :ref:`blacklist ` class, which is inherited + globally. + + To prevent a recipe from being built, use the ``PNBLACKLIST`` + variable in your ``local.conf`` file. Here is an example that + prevents ``myrecipe`` from being built: + :: + + PNBLACKLIST[myrecipe] = "Not supported by our organization." + + POPULATE_SDK_POST_HOST_COMMAND + Specifies a list of functions to call once the OpenEmbedded build + system has created the host part of the SDK. You can specify + functions separated by semicolons: + :: + + POPULATE_SDK_POST_HOST_COMMAND += "function; ... " + + If you need to pass the SDK path to a command within a function, you + can use ``${SDK_DIR}``, which points to the parent directory used by + the OpenEmbedded build system when creating SDK output. See the + :term:`SDK_DIR` variable for more information. + + POPULATE_SDK_POST_TARGET_COMMAND + Specifies a list of functions to call once the OpenEmbedded build + system has created the target part of the SDK. You can specify + functions separated by semicolons: + :: + + POPULATE_SDK_POST_TARGET_COMMAND += "function; ... " + + If you need to pass the SDK path to a command within a function, you + can use ``${SDK_DIR}``, which points to the parent directory used by + the OpenEmbedded build system when creating SDK output. See the + :term:`SDK_DIR` variable for more information. + + PR + The revision of the recipe. The default value for this variable is + "r0". Subsequent revisions of the recipe conventionally have the + values "r1", "r2", and so forth. When :term:`PV` increases, + ``PR`` is conventionally reset to "r0". + + .. note:: + + The OpenEmbedded build system does not need the aid of + PR + to know when to rebuild a recipe. The build system uses the task + input checksums + along with the + stamp + and + shared state cache + mechanisms. + + The ``PR`` variable primarily becomes significant when a package + manager dynamically installs packages on an already built image. In + this case, ``PR``, which is the default value of + :term:`PKGR`, helps the package manager distinguish which + package is the most recent one in cases where many packages have the + same ``PV`` (i.e. ``PKGV``). A component having many packages with + the same ``PV`` usually means that the packages all install the same + upstream version, but with later (``PR``) version packages including + packaging fixes. + + .. note:: + + PR + does not need to be increased for changes that do not change the + package contents or metadata. + + Because manually managing ``PR`` can be cumbersome and error-prone, + an automated solution exists. See the + ":ref:`dev-manual/dev-manual-common-tasks:working with a pr service`" section + in the Yocto Project Development Tasks Manual for more information. + + PREFERRED_PROVIDER + If multiple recipes provide the same item, this variable determines + which recipe is preferred and thus provides the item (i.e. the + preferred provider). You should always suffix this variable with the + name of the provided item. And, you should define the variable using + the preferred recipe's name (:term:`PN`). Here is a common + example: + :: + + PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" + + In the previous example, multiple recipes are providing "virtual/kernel". + The ``PREFERRED_PROVIDER`` variable is set with the name (``PN``) of + the recipe you prefer to provide "virtual/kernel". + + Following are more examples: + :: + + PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86" + PREFERRED_PROVIDER_virtual/libgl ?= "mesa" + + For more + information, see the ":ref:`metadata-virtual-providers`" + section in the Yocto Project Development Tasks Manual. + + .. note:: + + If you use a + virtual/\* + item with + PREFERRED_PROVIDER + , then any recipe that + PROVIDES + that item but is not selected (defined) by + PREFERRED_PROVIDER + is prevented from building, which is usually desirable since this + mechanism is designed to select between mutually exclusive + alternative providers. + + PREFERRED_VERSION + If multiple versions of recipes exist, this variable determines which + version is given preference. You must always suffix the variable with + the :term:`PN` you want to select, and you should set the + :term:`PV` accordingly for precedence. + + The ``PREFERRED_VERSION`` variable supports limited wildcard use + through the "``%``" character. You can use the character to match any + number of characters, which can be useful when specifying versions + that contain long revision numbers that potentially change. Here are + two examples: + :: + + PREFERRED_VERSION_python = "3.4.0" + PREFERRED_VERSION_linux-yocto = "5.0%" + + .. note:: + + The use of the "%" character is limited in that it only works at the end of the + string. You cannot use the wildcard character in any other + location of the string. + + The specified version is matched against :term:`PV`, which + does not necessarily match the version part of the recipe's filename. + For example, consider two recipes ``foo_1.2.bb`` and ``foo_git.bb`` + where ``foo_git.bb`` contains the following assignment: + :: + + PV = "1.1+git${SRCPV}" + + In this case, the correct way to select + ``foo_git.bb`` is by using an assignment such as the following: + :: + + PREFERRED_VERSION_foo = "1.1+git%" + + Compare that previous example + against the following incorrect example, which does not work: + :: + + PREFERRED_VERSION_foo = "git" + + Sometimes the ``PREFERRED_VERSION`` variable can be set by + configuration files in a way that is hard to change. You can use + :term:`OVERRIDES` to set a machine-specific + override. Here is an example: + :: + + PREFERRED_VERSION_linux-yocto_qemux86 = "5.0%" + + Although not recommended, worst case, you can also use the + "forcevariable" override, which is the strongest override possible. + Here is an example: + :: + + PREFERRED_VERSION_linux-yocto_forcevariable = "5.0%" + + .. note:: + + The \_forcevariable override is not handled specially. This override + only works because the default value of OVERRIDES includes "forcevariable". + + PREMIRRORS + Specifies additional paths from which the OpenEmbedded build system + gets source code. When the build system searches for source code, it + first tries the local download directory. If that location fails, the + build system tries locations defined by ``PREMIRRORS``, the upstream + source, and then locations specified by + :term:`MIRRORS` in that order. + + Assuming your distribution (:term:`DISTRO`) is "poky", + the default value for ``PREMIRRORS`` is defined in the + ``conf/distro/poky.conf`` file in the ``meta-poky`` Git repository. + + Typically, you could add a specific server for the build system to + attempt before any others by adding something like the following to + the ``local.conf`` configuration file in the + :term:`Build Directory`: + :: + + PREMIRRORS_prepend = "\ + git://.*/.* http://www.yoctoproject.org/sources/ \n \ + ftp://.*/.* http://www.yoctoproject.org/sources/ \n \ + http://.*/.* http://www.yoctoproject.org/sources/ \n \ + https://.*/.* http://www.yoctoproject.org/sources/ \n" + + These changes cause the + build system to intercept Git, FTP, HTTP, and HTTPS requests and + direct them to the ``http://`` sources mirror. You can use + ``file://`` URLs to point to local directories or network shares as + well. + + PRIORITY + Indicates the importance of a package. + + ``PRIORITY`` is considered to be part of the distribution policy + because the importance of any given recipe depends on the purpose for + which the distribution is being produced. Thus, ``PRIORITY`` is not + normally set within recipes. + + You can set ``PRIORITY`` to "required", "standard", "extra", and + "optional", which is the default. + + PRIVATE_LIBS + Specifies libraries installed within a recipe that should be ignored + by the OpenEmbedded build system's shared library resolver. This + variable is typically used when software being built by a recipe has + its own private versions of a library normally provided by another + recipe. In this case, you would not want the package containing the + private libraries to be set as a dependency on other unrelated + packages that should instead depend on the package providing the + standard version of the library. + + Libraries specified in this variable should be specified by their + file name. For example, from the Firefox recipe in meta-browser: + :: + + PRIVATE_LIBS = "libmozjs.so \ + libxpcom.so \ + libnspr4.so \ + libxul.so \ + libmozalloc.so \ + libplc4.so \ + libplds4.so" + + For more information, see the + ":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" + section in the Yocto Project Overview and Concepts Manual. + + PROVIDES + A list of aliases by which a particular recipe can be known. By + default, a recipe's own ``PN`` is implicitly already in its + ``PROVIDES`` list and therefore does not need to mention that it + provides itself. If a recipe uses ``PROVIDES``, the additional + aliases are synonyms for the recipe and can be useful for satisfying + dependencies of other recipes during the build as specified by + ``DEPENDS``. + + Consider the following example ``PROVIDES`` statement from the recipe + file ``eudev_3.2.9.bb``: + :: + + PROVIDES = "udev" + + The ``PROVIDES`` statement + results in the "eudev" recipe also being available as simply "udev". + + .. note:: + + Given that a recipe's own recipe name is already implicitly in its + own + PROVIDES + list, it is unnecessary to add aliases with the "+=" operator; + using a simple assignment will be sufficient. In other words, + while you could write: + :: + + PROVIDES += "udev" + + + in the above, the "+=" is overkill and unnecessary. + + In addition to providing recipes under alternate names, the + ``PROVIDES`` mechanism is also used to implement virtual targets. A + virtual target is a name that corresponds to some particular + functionality (e.g. a Linux kernel). Recipes that provide the + functionality in question list the virtual target in ``PROVIDES``. + Recipes that depend on the functionality in question can include the + virtual target in ``DEPENDS`` to leave the choice of provider open. + + Conventionally, virtual targets have names on the form + "virtual/function" (e.g. "virtual/kernel"). The slash is simply part + of the name and has no syntactical significance. + + The :term:`PREFERRED_PROVIDER` variable is + used to select which particular recipe provides a virtual target. + + .. note:: + + A corresponding mechanism for virtual runtime dependencies + (packages) exists. However, the mechanism does not depend on any + special functionality beyond ordinary variable assignments. For + example, ``VIRTUAL-RUNTIME_dev_manager`` refers to the package of + the component that manages the ``/dev`` directory. + + Setting the "preferred provider" for runtime dependencies is as + simple as using the following assignment in a configuration file: + :: + + VIRTUAL-RUNTIME_dev_manager = "udev" + + + PRSERV_HOST + The network based :term:`PR` service host and port. + + The ``conf/local.conf.sample.extended`` configuration file in the + :term:`Source Directory` shows how the + ``PRSERV_HOST`` variable is set: + :: + + PRSERV_HOST = "localhost:0" + + You must + set the variable if you want to automatically start a local :ref:`PR + service `. You can + set ``PRSERV_HOST`` to other values to use a remote PR service. + + PTEST_ENABLED + Specifies whether or not :ref:`Package + Test ` (ptest) + functionality is enabled when building a recipe. You should not set + this variable directly. Enabling and disabling building Package Tests + at build time should be done by adding "ptest" to (or removing it + from) :term:`DISTRO_FEATURES`. + + PV + The version of the recipe. The version is normally extracted from the + recipe filename. For example, if the recipe is named + ``expat_2.0.1.bb``, then the default value of ``PV`` will be "2.0.1". + ``PV`` is generally not overridden within a recipe unless it is + building an unstable (i.e. development) version from a source code + repository (e.g. Git or Subversion). + + ``PV`` is the default value of the :term:`PKGV` variable. + + PYTHON_ABI + When used by recipes that inherit the + :ref:`distutils3 `, + :ref:`setuptools3 `, + :ref:`distutils `, or + :ref:`setuptools ` classes, denotes the + Application Binary Interface (ABI) currently in use for Python. By + default, the ABI is "m". You do not have to set this variable as the + OpenEmbedded build system sets it for you. + + The OpenEmbedded build system uses the ABI to construct directory + names used when installing the Python headers and libraries in + sysroot (e.g. ``.../python3.3m/...``). + + Recipes that inherit the ``distutils`` class during cross-builds also + use this variable to locate the headers and libraries of the + appropriate Python that the extension is targeting. + + PYTHON_PN + When used by recipes that inherit the + `distutils3 `, + :ref:`setuptools3 `, + :ref:`distutils `, or + :ref:`setuptools ` classes, specifies the + major Python version being built. For Python 3.x, ``PYTHON_PN`` would + be "python3". You do not have to set this variable as the + OpenEmbedded build system automatically sets it for you. + + The variable allows recipes to use common infrastructure such as the + following: + :: + + DEPENDS += "${PYTHON_PN}-native" + + In the previous example, + the version of the dependency is ``PYTHON_PN``. + + RANLIB + The minimal command and arguments to run ``ranlib``. + + RCONFLICTS + The list of packages that conflict with packages. Note that packages + will not be installed if conflicting packages are not first removed. + + Like all package-controlling variables, you must always use them in + conjunction with a package name override. Here is an example: + :: + + RCONFLICTS_${PN} = "another_conflicting_package_name" + + BitBake, which the OpenEmbedded build system uses, supports + specifying versioned dependencies. Although the syntax varies + depending on the packaging format, BitBake hides these differences + from you. Here is the general syntax to specify versions with the + ``RCONFLICTS`` variable: + :: + + RCONFLICTS_${PN} = "package (operator version)" + + For ``operator``, you can specify the following: = < > <= + >= For example, the following sets up a dependency on version 1.2 or + greater of the package ``foo``: + :: + + RCONFLICTS_${PN} = "foo (>= 1.2)" + + RDEPENDS + Lists runtime dependencies of a package. These dependencies are other + packages that must be installed in order for the package to function + correctly. As an example, the following assignment declares that the + package ``foo`` needs the packages ``bar`` and ``baz`` to be + installed: + :: + + RDEPENDS_foo = "bar baz" + + The most common types of package + runtime dependencies are automatically detected and added. Therefore, + most recipes do not need to set ``RDEPENDS``. For more information, + see the + ":ref:`overview-manual/overview-manual-concepts:automatically added runtime dependencies`" + section in the Yocto Project Overview and Concepts Manual. + + The practical effect of the above ``RDEPENDS`` assignment is that + ``bar`` and ``baz`` will be declared as dependencies inside the + package ``foo`` when it is written out by one of the + ```do_package_write_*`` <#ref-tasks-package_write_deb>`__ tasks. + Exactly how this is done depends on which package format is used, + which is determined by + :term:`PACKAGE_CLASSES`. When the + corresponding package manager installs the package, it will know to + also install the packages on which it depends. + + To ensure that the packages ``bar`` and ``baz`` get built, the + previous ``RDEPENDS`` assignment also causes a task dependency to be + added. This dependency is from the recipe's + :ref:`ref-tasks-build` (not to be confused with + :ref:`ref-tasks-compile`) task to the + ``do_package_write_*`` task of the recipes that build ``bar`` and + ``baz``. + + The names of the packages you list within ``RDEPENDS`` must be the + names of other packages - they cannot be recipe names. Although + package names and recipe names usually match, the important point + here is that you are providing package names within the ``RDEPENDS`` + variable. For an example of the default list of packages created from + a recipe, see the :term:`PACKAGES` variable. + + Because the ``RDEPENDS`` variable applies to packages being built, + you should always use the variable in a form with an attached package + name (remember that a single recipe can build multiple packages). For + example, suppose you are building a development package that depends + on the ``perl`` package. In this case, you would use the following + ``RDEPENDS`` statement: + :: + + RDEPENDS_${PN}-dev += "perl" + + In the example, + the development package depends on the ``perl`` package. Thus, the + ``RDEPENDS`` variable has the ``${PN}-dev`` package name as part of + the variable. + + .. note:: + + RDEPENDS_${PN}-dev + includes + ${ + PN + } + by default. This default is set in the BitBake configuration file + ( + meta/conf/bitbake.conf + ). Be careful not to accidentally remove + ${PN} + when modifying + RDEPENDS_${PN}-dev + . Use the "+=" operator rather than the "=" operator. + + The package names you use with ``RDEPENDS`` must appear as they would + in the ``PACKAGES`` variable. The :term:`PKG` variable + allows a different name to be used for the final package (e.g. the + :ref:`debian ` class uses this to rename + packages), but this final package name cannot be used with + ``RDEPENDS``, which makes sense as ``RDEPENDS`` is meant to be + independent of the package format used. + + BitBake, which the OpenEmbedded build system uses, supports + specifying versioned dependencies. Although the syntax varies + depending on the packaging format, BitBake hides these differences + from you. Here is the general syntax to specify versions with the + ``RDEPENDS`` variable: + :: + + RDEPENDS_${PN} = "package (operator version)" + + For operator, you can specify the following: = < > <= >= For version, + provide the version number. + + .. note:: + + You can use + EXTENDPKGV + to provide a full package version specification. + + For example, the following sets up a dependency on version 1.2 or + greater of the package ``foo``: + :: + + RDEPENDS_${PN} = "foo (>= 1.2)" + + For information on build-time dependencies, see the + :term:`DEPENDS` variable. You can also see the + ":ref:`Tasks `" and + ":ref:`Dependencies `" sections in the + BitBake User Manual for additional information on tasks and + dependencies. + + REQUIRED_DISTRO_FEATURES + When inheriting the + :ref:`distro_features_check ` + class, this variable identifies distribution features that must exist + in the current configuration in order for the OpenEmbedded build + system to build the recipe. In other words, if the + ``REQUIRED_DISTRO_FEATURES`` variable lists a feature that does not + appear in ``DISTRO_FEATURES`` within the current configuration, an + error occurs and the build stops. + + RM_WORK_EXCLUDE + With ``rm_work`` enabled, this variable specifies a list of recipes + whose work directories should not be removed. See the + ":ref:`rm_work.bbclass `" section for more + details. + + ROOT_HOME + Defines the root home directory. By default, this directory is set as + follows in the BitBake configuration file: + :: + + ROOT_HOME ??= "/home/root" + + .. note:: + + This default value is likely used because some embedded solutions + prefer to have a read-only root filesystem and prefer to keep + writeable data in one place. + + You can override the default by setting the variable in any layer or + in the ``local.conf`` file. Because the default is set using a "weak" + assignment (i.e. "??="), you can use either of the following forms to + define your override: + :: + + ROOT_HOME = "/root" + ROOT_HOME ?= "/root" + + These + override examples use ``/root``, which is probably the most commonly + used override. + + ROOTFS + Indicates a filesystem image to include as the root filesystem. + + The ``ROOTFS`` variable is an optional variable used with the + :ref:`image-live ` class. + + ROOTFS_POSTINSTALL_COMMAND + Specifies a list of functions to call after the OpenEmbedded build + system has installed packages. You can specify functions separated by + semicolons: + :: + + ROOTFS_POSTINSTALL_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within a + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + ROOTFS_POSTPROCESS_COMMAND + Specifies a list of functions to call once the OpenEmbedded build + system has created the root filesystem. You can specify functions + separated by semicolons: + :: + + ROOTFS_POSTPROCESS_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within a + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + ROOTFS_POSTUNINSTALL_COMMAND + Specifies a list of functions to call after the OpenEmbedded build + system has removed unnecessary packages. When runtime package + management is disabled in the image, several packages are removed + including ``base-passwd``, ``shadow``, and ``update-alternatives``. + You can specify functions separated by semicolons: + :: + + ROOTFS_POSTUNINSTALL_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within a + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + ROOTFS_PREPROCESS_COMMAND + Specifies a list of functions to call before the OpenEmbedded build + system has created the root filesystem. You can specify functions + separated by semicolons: + :: + + ROOTFS_PREPROCESS_COMMAND += "function; ... " + + If you need to pass the root filesystem path to a command within a + function, you can use ``${IMAGE_ROOTFS}``, which points to the + directory that becomes the root filesystem image. See the + :term:`IMAGE_ROOTFS` variable for more + information. + + RPROVIDES + A list of package name aliases that a package also provides. These + aliases are useful for satisfying runtime dependencies of other + packages both during the build and on the target (as specified by + ``RDEPENDS``). + + .. note:: + + A package's own name is implicitly already in its + RPROVIDES + list. + + As with all package-controlling variables, you must always use the + variable in conjunction with a package name override. Here is an + example: + :: + + RPROVIDES_${PN} = "widget-abi-2" + + RRECOMMENDS + A list of packages that extends the usability of a package being + built. The package being built does not depend on this list of + packages in order to successfully build, but rather uses them for + extended usability. To specify runtime dependencies for packages, see + the ``RDEPENDS`` variable. + + The package manager will automatically install the ``RRECOMMENDS`` + list of packages when installing the built package. However, you can + prevent listed packages from being installed by using the + :term:`BAD_RECOMMENDATIONS`, + :term:`NO_RECOMMENDATIONS`, and + :term:`PACKAGE_EXCLUDE` variables. + + Packages specified in ``RRECOMMENDS`` need not actually be produced. + However, a recipe must exist that provides each package, either + through the :term:`PACKAGES` or + :term:`PACKAGES_DYNAMIC` variables or the + :term:`RPROVIDES` variable, or an error will occur + during the build. If such a recipe does exist and the package is not + produced, the build continues without error. + + Because the ``RRECOMMENDS`` variable applies to packages being built, + you should always attach an override to the variable to specify the + particular package whose usability is being extended. For example, + suppose you are building a development package that is extended to + support wireless functionality. In this case, you would use the + following: + :: + + RRECOMMENDS_${PN}-dev += "wireless_package_name" + + In the + example, the package name (``${PN}-dev``) must appear as it would in + the ``PACKAGES`` namespace before any renaming of the output package + by classes such as ``debian.bbclass``. + + BitBake, which the OpenEmbedded build system uses, supports + specifying versioned recommends. Although the syntax varies depending + on the packaging format, BitBake hides these differences from you. + Here is the general syntax to specify versions with the + ``RRECOMMENDS`` variable: + :: + + RRECOMMENDS_${PN} = "package (operator version)" + + For ``operator``, you can specify the following: + + - = + - < + - > + - <= + - >= + + For example, the following sets up a recommend on version 1.2 or + greater of the package ``foo``: + :: + + RRECOMMENDS_${PN} = "foo (>= 1.2)" + + RREPLACES + A list of packages replaced by a package. The package manager uses + this variable to determine which package should be installed to + replace other package(s) during an upgrade. In order to also have the + other package(s) removed at the same time, you must add the name of + the other package to the ``RCONFLICTS`` variable. + + As with all package-controlling variables, you must use this variable + in conjunction with a package name override. Here is an example: + :: + + RREPLACES_${PN} = "other_package_being_replaced" + + BitBake, which the OpenEmbedded build system uses, supports + specifying versioned replacements. Although the syntax varies + depending on the packaging format, BitBake hides these differences + from you. Here is the general syntax to specify versions with the + ``RREPLACES`` variable: + :: + + RREPLACES_${PN} = "package (operator version)" + + For ``operator``, you can specify the following: + + - = + - < + - > + - <= + - >= + + For example, the following sets up a replacement using version 1.2 + or greater of the package ``foo``: + :: + + RREPLACES_${PN} = "foo (>= 1.2)" + + RSUGGESTS + A list of additional packages that you can suggest for installation + by the package manager at the time a package is installed. Not all + package managers support this functionality. + + As with all package-controlling variables, you must always use this + variable in conjunction with a package name override. Here is an + example: + :: + + RSUGGESTS_${PN} = "useful_package another_package" + + S + The location in the :term:`Build Directory` where + unpacked recipe source code resides. By default, this directory is + ``${``\ :term:`WORKDIR`\ ``}/${``\ :term:`BPN`\ ``}-${``\ :term:`PV`\ ``}``, + where ``${BPN}`` is the base recipe name and ``${PV}`` is the recipe + version. If the source tarball extracts the code to a directory named + anything other than ``${BPN}-${PV}``, or if the source code is + fetched from an SCM such as Git or Subversion, then you must set + ``S`` in the recipe so that the OpenEmbedded build system knows where + to find the unpacked source. + + As an example, assume a :term:`Source Directory` + top-level folder named ``poky`` and a default Build Directory at + ``poky/build``. In this case, the work directory the build system + uses to keep the unpacked recipe for ``db`` is the following: + :: + + poky/build/tmp/work/qemux86-poky-linux/db/5.1.19-r3/db-5.1.19 + + The unpacked source code resides in the ``db-5.1.19`` folder. + + This next example assumes a Git repository. By default, Git + repositories are cloned to ``${WORKDIR}/git`` during + :ref:`ref-tasks-fetch`. Since this path is different + from the default value of ``S``, you must set it specifically so the + source can be located: + :: + + SRC_URI = "git://path/to/repo.git" + S = "${WORKDIR}/git" + + SANITY_REQUIRED_UTILITIES + Specifies a list of command-line utilities that should be checked for + during the initial sanity checking process when running BitBake. If + any of the utilities are not installed on the build host, then + BitBake immediately exits with an error. + + SANITY_TESTED_DISTROS + A list of the host distribution identifiers that the build system has + been tested against. Identifiers consist of the host distributor ID + followed by the release, as reported by the ``lsb_release`` tool or + as read from ``/etc/lsb-release``. Separate the list items with + explicit newline characters (``\n``). If ``SANITY_TESTED_DISTROS`` is + not empty and the current value of + :term:`NATIVELSBSTRING` does not appear in the + list, then the build system reports a warning that indicates the + current host distribution has not been tested as a build host. + + SDK_ARCH + The target architecture for the SDK. Typically, you do not directly + set this variable. Instead, use :term:`SDKMACHINE`. + + SDK_DEPLOY + The directory set up and used by the + :ref:`populate_sdk_base ` class to which + the SDK is deployed. The ``populate_sdk_base`` class defines + ``SDK_DEPLOY`` as follows: + :: + + SDK_DEPLOY = "${TMPDIR}/deploy/sdk" + + SDK_DIR + The parent directory used by the OpenEmbedded build system when + creating SDK output. The + :ref:`populate_sdk_base ` class defines + the variable as follows: + :: + + SDK_DIR = "${WORKDIR}/sdk" + + .. note:: + + The + SDK_DIR + directory is a temporary directory as it is part of + WORKDIR + . The final output directory is + SDK_DEPLOY + . + + SDK_EXT_TYPE + Controls whether or not shared state artifacts are copied into the + extensible SDK. The default value of "full" copies all of the + required shared state artifacts into the extensible SDK. The value + "minimal" leaves these artifacts out of the SDK. + + .. note:: + + If you set the variable to "minimal", you need to ensure + SSTATE_MIRRORS + is set in the SDK's configuration to enable the artifacts to be + fetched as needed. + + SDK_HOST_MANIFEST + The manifest file for the host part of the SDK. This file lists all + the installed packages that make up the host part of the SDK. The + file contains package information on a line-per-package basis as + follows: + :: + + packagename packagearch version + + The :ref:`populate_sdk_base ` class + defines the manifest file as follows: + :: + + SDK_HOST_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.host.manifest" + + The location is derived using the :term:`SDK_DEPLOY` and + :term:`TOOLCHAIN_OUTPUTNAME` variables. + + SDK_INCLUDE_PKGDATA + When set to "1", specifies to include the packagedata for all recipes + in the "world" target in the extensible SDK. Including this data + allows the ``devtool search`` command to find these recipes in search + results, as well as allows the ``devtool add`` command to map + dependencies more effectively. + + .. note:: + + Enabling the + SDK_INCLUDE_PKGDATA + variable significantly increases build time because all of world + needs to be built. Enabling the variable also slightly increases + the size of the extensible SDK. + + SDK_INCLUDE_TOOLCHAIN + When set to "1", specifies to include the toolchain in the extensible + SDK. Including the toolchain is useful particularly when + :term:`SDK_EXT_TYPE` is set to "minimal" to keep + the SDK reasonably small but you still want to provide a usable + toolchain. For example, suppose you want to use the toolchain from an + IDE or from other tools and you do not want to perform additional + steps to install the toolchain. + + The ``SDK_INCLUDE_TOOLCHAIN`` variable defaults to "0" if + ``SDK_EXT_TYPE`` is set to "minimal", and defaults to "1" if + ``SDK_EXT_TYPE`` is set to "full". + + SDK_INHERIT_BLACKLIST + A list of classes to remove from the :term:`INHERIT` + value globally within the extensible SDK configuration. The + :ref:`populate-sdk-ext ` class sets the + default value: + :: + + SDK_INHERIT_BLACKLIST ?= "buildhistory icecc" + + Some classes are not generally applicable within the extensible SDK + context. You can use this variable to disable those classes. + + For additional information on how to customize the extensible SDK's + configuration, see the + ":ref:`sdk-manual/sdk-appendix-customizing:configuring the extensible sdk`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + SDK_LOCAL_CONF_BLACKLIST + A list of variables not allowed through from the OpenEmbedded build + system configuration into the extensible SDK configuration. Usually, + these are variables that are specific to the machine on which the + build system is running and thus would be potentially problematic + within the extensible SDK. + + By default, ``SDK_LOCAL_CONF_BLACKLIST`` is set in the + :ref:`populate-sdk-ext ` class and + excludes the following variables: + + - :term:`CONF_VERSION` + - :term:`BB_NUMBER_THREADS` + - :term:`bitbake:BB_NUMBER_PARSE_THREADS` + - :term:`PARALLEL_MAKE` + - :term:`PRSERV_HOST` + - :term:`SSTATE_MIRRORS` :term:`DL_DIR` + - :term:`SSTATE_DIR` :term:`TMPDIR` + - :term:`BB_SERVER_TIMEOUT` + + For additional information on how to customize the extensible SDK's + configuration, see the + ":ref:`sdk-manual/sdk-appendix-customizing:configuring the extensible sdk`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + SDK_LOCAL_CONF_WHITELIST + A list of variables allowed through from the OpenEmbedded build + system configuration into the extensible SDK configuration. By + default, the list of variables is empty and is set in the + :ref:`populate-sdk-ext ` class. + + This list overrides the variables specified using the + :term:`SDK_LOCAL_CONF_BLACKLIST` + variable as well as any variables identified by automatic + blacklisting due to the "/" character being found at the start of the + value, which is usually indicative of being a path and thus might not + be valid on the system where the SDK is installed. + + For additional information on how to customize the extensible SDK's + configuration, see the + ":ref:`sdk-manual/sdk-appendix-customizing:configuring the extensible sdk`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + SDK_NAME + The base name for SDK output files. The name is derived from the + :term:`DISTRO`, :term:`TCLIBC`, + :term:`SDK_ARCH`, + :term:`IMAGE_BASENAME`, and + :term:`TUNE_PKGARCH` variables: + :: + + SDK_NAME = "${DISTRO}-${TCLIBC}-${SDK_ARCH}-${IMAGE_BASENAME}-${TUNE_PKGARCH}" + + SDK_OS + Specifies the operating system for which the SDK will be built. The + default value is the value of :term:`BUILD_OS`. + + SDK_OUTPUT + The location used by the OpenEmbedded build system when creating SDK + output. The :ref:`populate_sdk_base ` + class defines the variable as follows: + :: + + SDK_DIR = "${WORKDIR}/sdk" + SDK_OUTPUT = "${SDK_DIR}/image" + SDK_DEPLOY = "${DEPLOY_DIR}/sdk" + + .. note:: + + The SDK_OUTPUT directory is a temporary directory as it is part of + WORKDIR by way of SDK_DIR. The final output directory is + SDK_DEPLOY. + + SDK_PACKAGE_ARCHS + Specifies a list of architectures compatible with the SDK machine. + This variable is set automatically and should not normally be + hand-edited. Entries are separated using spaces and listed in order + of priority. The default value for ``SDK_PACKAGE_ARCHS`` is "all any + noarch ${SDK_ARCH}-${SDKPKGSUFFIX}". + + SDK_POSTPROCESS_COMMAND + Specifies a list of functions to call once the OpenEmbedded build + system creates the SDK. You can specify functions separated by + semicolons: SDK_POSTPROCESS_COMMAND += "function; ... " + + If you need to pass an SDK path to a command within a function, you + can use ``${SDK_DIR}``, which points to the parent directory used by + the OpenEmbedded build system when creating SDK output. See the + :term:`SDK_DIR` variable for more information. + + SDK_PREFIX + The toolchain binary prefix used for ``nativesdk`` recipes. The + OpenEmbedded build system uses the ``SDK_PREFIX`` value to set the + :term:`TARGET_PREFIX` when building + ``nativesdk`` recipes. The default value is "${SDK_SYS}-". + + SDK_RECRDEP_TASKS + A list of shared state tasks added to the extensible SDK. By default, + the following tasks are added: + + - do_populate_lic + - do_package_qa + - do_populate_sysroot + - do_deploy + + Despite the default value of "" for the + ``SDK_RECRDEP_TASKS`` variable, the above four tasks are always added + to the SDK. To specify tasks beyond these four, you need to use the + ``SDK_RECRDEP_TASKS`` variable (e.g. you are defining additional + tasks that are needed in order to build + :term:`SDK_TARGETS`). + + SDK_SYS + Specifies the system, including the architecture and the operating + system, for which the SDK will be built. + + The OpenEmbedded build system automatically sets this variable based + on :term:`SDK_ARCH`, + :term:`SDK_VENDOR`, and + :term:`SDK_OS`. You do not need to set the ``SDK_SYS`` + variable yourself. + + SDK_TARGET_MANIFEST + The manifest file for the target part of the SDK. This file lists all + the installed packages that make up the target part of the SDK. The + file contains package information on a line-per-package basis as + follows: + :: + + packagename packagearch version + + The :ref:`populate_sdk_base ` class + defines the manifest file as follows: + :: + + SDK_TARGET_MANIFEST = "${SDK_DEPLOY}/${TOOLCHAIN_OUTPUTNAME}.target.manifest" + + The location is derived using the :term:`SDK_DEPLOY` and + :term:`TOOLCHAIN_OUTPUTNAME` variables. + + SDK_TARGETS + A list of targets to install from shared state as part of the + standard or extensible SDK installation. The default value is "${PN}" + (i.e. the image from which the SDK is built). + + The ``SDK_TARGETS`` variable is an internal variable and typically + would not be changed. + + SDK_TITLE + The title to be printed when running the SDK installer. By default, + this title is based on the :term:`DISTRO_NAME` or + :term:`DISTRO` variable and is set in the + :ref:`populate_sdk_base ` class as + follows: + :: + + SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK" + + For the default distribution "poky", + ``SDK_TITLE`` is set to "Poky (Yocto Project Reference Distro)". + + For information on how to change this default title, see the + ":ref:`sdk-manual/sdk-appendix-customizing:changing the extensible sdk installer title`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + SDK_UPDATE_URL + An optional URL for an update server for the extensible SDK. If set, + the value is used as the default update server when running + ``devtool sdk-update`` within the extensible SDK. + + SDK_VENDOR + Specifies the name of the SDK vendor. + + SDK_VERSION + Specifies the version of the SDK. The distribution configuration file + (e.g. ``/meta-poky/conf/distro/poky.conf``) defines the + ``SDK_VERSION`` as follows: + :: + + SDK_VERSION = "${@d.getVar('DISTRO_VERSION').replace('snapshot-${DATE}','snapshot')}" + + For additional information, see the + :term:`DISTRO_VERSION` and + :term:`DATE` variables. + + SDKEXTPATH + The default installation directory for the Extensible SDK. By + default, this directory is based on the :term:`DISTRO` + variable and is set in the + :ref:`populate_sdk_base ` class as + follows: + :: + + SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk" + + For the + default distribution "poky", the ``SDKEXTPATH`` is set to "poky_sdk". + + For information on how to change this default directory, see the + ":ref:`sdk-manual/sdk-appendix-customizing:changing the default sdk installation directory`" + section in the Yocto Project Application Development and the + Extensible Software Development Kit (eSDK) manual. + + SDKIMAGE_FEATURES + Equivalent to ``IMAGE_FEATURES``. However, this variable applies to + the SDK generated from an image using the following command: + :: + + $ bitbake -c populate_sdk imagename + + SDKMACHINE + The machine for which the SDK is built. In other words, the SDK is + built such that it runs on the target you specify with the + ``SDKMACHINE`` value. The value points to a corresponding ``.conf`` + file under ``conf/machine-sdk/``. + + You can use "i686" and "x86_64" as possible values for this variable. + The variable defaults to "i686" and is set in the local.conf file in + the Build Directory. + :: + + SDKMACHINE ?= "i686" + + .. note:: + + You cannot set the + SDKMACHINE + variable in your distribution configuration file. If you do, the + configuration will not take affect. + + SDKPATH + Defines the path offered to the user for installation of the SDK that + is generated by the OpenEmbedded build system. The path appears as + the default location for installing the SDK when you run the SDK's + installation script. You can override the offered path when you run + the script. + + SDKTARGETSYSROOT + The full path to the sysroot used for cross-compilation within an SDK + as it will be when installed into the default + :term:`SDKPATH`. + + SECTION + The section in which packages should be categorized. Package + management utilities can make use of this variable. + + SELECTED_OPTIMIZATION + Specifies the optimization flags passed to the C compiler when + building for the target. The flags are passed through the default + value of the :term:`TARGET_CFLAGS` variable. + + The ``SELECTED_OPTIMIZATION`` variable takes the value of + ``FULL_OPTIMIZATION`` unless ``DEBUG_BUILD`` = "1". If that is the + case, the value of ``DEBUG_OPTIMIZATION`` is used. + + SERIAL_CONSOLE + Defines a serial console (TTY) to enable using + `getty `__. Provide a + value that specifies the baud rate followed by the TTY device name + separated by a space. You cannot specify more than one TTY device: + :: + + SERIAL_CONSOLE = "115200 ttyS0" + + .. note:: + + The + SERIAL_CONSOLE + variable is deprecated. Please use the + SERIAL_CONSOLES + variable. + + SERIAL_CONSOLES + Defines a serial console (TTY) to enable using + `getty `__. Provide a + value that specifies the baud rate followed by the TTY device name + separated by a semicolon. Use spaces to separate multiple devices: + :: + + SERIAL_CONSOLES = "115200;ttyS0 115200;ttyS1" + + SERIAL_CONSOLES_CHECK + Specifies serial consoles, which must be listed in + :term:`SERIAL_CONSOLES`, to check against + ``/proc/console`` before enabling them using getty. This variable + allows aliasing in the format: :. If a device was + listed as "sclp_line0" in ``/dev/`` and "ttyS0" was listed in + ``/proc/console``, you would do the following: :: + + SERIAL_CONSOLES_CHECK = "slcp_line0:ttyS0" + + This variable is currently only supported with SysVinit (i.e. not + with systemd). + + SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS + A list of recipe dependencies that should not be used to determine + signatures of tasks from one recipe when they depend on tasks from + another recipe. For example: :: + + SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += "intone->mplayer2" + + In the previous example, ``intone`` depends on ``mplayer2``. + + You can use the special token ``"*"`` on the left-hand side of the + dependency to match all recipes except the one on the right-hand + side. Here is an example: :: + + SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += "*->quilt-native" + + In the previous example, all recipes except ``quilt-native`` ignore + task signatures from the ``quilt-native`` recipe when determining + their task signatures. + + Use of this variable is one mechanism to remove dependencies that + affect task signatures and thus force rebuilds when a recipe changes. + + .. note:: + + If you add an inappropriate dependency for a recipe relationship, + the software might break during runtime if the interface of the + second recipe was changed after the first recipe had been built. + + SIGGEN_EXCLUDERECIPES_ABISAFE + A list of recipes that are completely stable and will never change. + The ABI for the recipes in the list are presented by output from the + tasks run to build the recipe. Use of this variable is one way to + remove dependencies from one recipe on another that affect task + signatures and thus force rebuilds when the recipe changes. + + .. note:: + + If you add an inappropriate variable to this list, the software + might break at runtime if the interface of the recipe was changed + after the other had been built. + + SITEINFO_BITS + Specifies the number of bits for the target system CPU. The value + should be either "32" or "64". + + SITEINFO_ENDIANNESS + Specifies the endian byte order of the target system. The value + should be either "le" for little-endian or "be" for big-endian. + + SKIP_FILEDEPS + Enables removal of all files from the "Provides" section of an RPM + package. Removal of these files is required for packages containing + prebuilt binaries and libraries such as ``libstdc++`` and ``glibc``. + + To enable file removal, set the variable to "1" in your + ``conf/local.conf`` configuration file in your: + :term:`Build Directory`. + :: + + SKIP_FILEDEPS = "1" + + SOC_FAMILY + Groups together machines based upon the same family of SOC (System On + Chip). You typically set this variable in a common ``.inc`` file that + you include in the configuration files of all the machines. + + .. note:: + + You must include + conf/machine/include/soc-family.inc + for this variable to appear in + MACHINEOVERRIDES + . + + SOLIBS + Defines the suffix for shared libraries used on the target platform. + By default, this suffix is ".so.*" for all Linux-based systems and is + defined in the ``meta/conf/bitbake.conf`` configuration file. + + You will see this variable referenced in the default values of + ``FILES_${PN}``. + + SOLIBSDEV + Defines the suffix for the development symbolic link (symlink) for + shared libraries on the target platform. By default, this suffix is + ".so" for Linux-based systems and is defined in the + ``meta/conf/bitbake.conf`` configuration file. + + You will see this variable referenced in the default values of + ``FILES_${PN}-dev``. + + SOURCE_MIRROR_FETCH + When you are fetching files to create a mirror of sources (i.e. + creating a source mirror), setting ``SOURCE_MIRROR_FETCH`` to "1" in + your ``local.conf`` configuration file ensures the source for all + recipes are fetched regardless of whether or not a recipe is + compatible with the configuration. A recipe is considered + incompatible with the currently configured machine when either or + both the :term:`COMPATIBLE_MACHINE` + variable and :term:`COMPATIBLE_HOST` variables + specify compatibility with a machine other than that of the current + machine or host. + + .. note:: + + Do not set the + SOURCE_MIRROR_FETCH + variable unless you are creating a source mirror. In other words, + do not set the variable during a normal build. + + SOURCE_MIRROR_URL + Defines your own :term:`PREMIRRORS` from which to + first fetch source before attempting to fetch from the upstream + specified in :term:`SRC_URI`. + + To use this variable, you must globally inherit the + :ref:`own-mirrors ` class and then provide + the URL to your mirrors. Here is the general syntax: + :: + + INHERIT += "own-mirrors" + SOURCE_MIRROR_URL = "http://example.com/my_source_mirror" + + .. note:: + + You can specify only a single URL in + SOURCE_MIRROR_URL + . + + SPDXLICENSEMAP + Maps commonly used license names to their SPDX counterparts found in + ``meta/files/common-licenses/``. For the default ``SPDXLICENSEMAP`` + mappings, see the ``meta/conf/licenses.conf`` file. + + For additional information, see the :term:`LICENSE` + variable. + + SPECIAL_PKGSUFFIX + A list of prefixes for :term:`PN` used by the OpenEmbedded + build system to create variants of recipes or packages. The list + specifies the prefixes to strip off during certain circumstances such + as the generation of the :term:`BPN` variable. + + SPL_BINARY + The file type for the Secondary Program Loader (SPL). Some devices + use an SPL from which to boot (e.g. the BeagleBone development + board). For such cases, you can declare the file type of the SPL + binary in the ``u-boot.inc`` include file, which is used in the + U-Boot recipe. + + The SPL file type is set to "null" by default in the ``u-boot.inc`` + file as follows: + :: + + # Some versions of u-boot build an SPL (Second Program Loader) image that + # should be packaged along with the u-boot binary as well as placed in the + # deploy directory. For those versions they can set the following variables + # to allow packaging the SPL. + SPL_BINARY ?= "" + SPL_BINARYNAME ?= "${@os.path.basename(d.getVar("SPL_BINARY"))}" + SPL_IMAGE ?= "${SPL_BINARYNAME}-${MACHINE}-${PV}-${PR}" + SPL_SYMLINK ?= "${SPL_BINARYNAME}-${MACHINE}" + + The ``SPL_BINARY`` variable helps form + various ``SPL_*`` variables used by the OpenEmbedded build system. + + See the BeagleBone machine configuration example in the + ":ref:`dev-manual/dev-manual-common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`" + section in the Yocto Project Board Support Package Developer's Guide + for additional information. + + SRC_URI + The list of source files - local or remote. This variable tells the + OpenEmbedded build system which bits to pull in for the build and how + to pull them in. For example, if the recipe or append file only needs + to fetch a tarball from the Internet, the recipe or append file uses + a single ``SRC_URI`` entry. On the other hand, if the recipe or + append file needs to fetch a tarball, apply two patches, and include + a custom file, the recipe or append file would include four instances + of the variable. + + The following list explains the available URI protocols. URI + protocols are highly dependent on particular BitBake Fetcher + submodules. Depending on the fetcher BitBake uses, various URL + parameters are employed. For specifics on the supported Fetchers, see + the ":ref:`Fetchers `" section in the + BitBake User Manual. + + - ``file://`` - Fetches files, which are usually files shipped + with the :term:`Metadata`, from the local machine (e.g. + :ref:`patch ` files). + The path is relative to the :term:`FILESPATH` + variable. Thus, the build system searches, in order, from the + following directories, which are assumed to be a subdirectories of + the directory in which the recipe file (``.bb``) or append file + (``.bbappend``) resides: + + - ``${BPN}`` - The base recipe name without any special suffix + or version numbers. + + - ``${BP}`` - ``${BPN}-${PV}``. The base recipe name and + version but without any special package name suffix. + + - *files -* Files within a directory, which is named ``files`` + and is also alongside the recipe or append file. + + .. note:: + + If you want the build system to pick up files specified through + a + SRC_URI + statement from your append file, you need to be sure to extend + the + FILESPATH + variable by also using the + FILESEXTRAPATHS + variable from within your append file. + + - ``bzr://`` - Fetches files from a Bazaar revision control + repository. + + - ``git://`` - Fetches files from a Git revision control + repository. + + - ``osc://`` - Fetches files from an OSC (OpenSUSE Build service) + revision control repository. + + - ``repo://`` - Fetches files from a repo (Git) repository. + + - ``ccrc://`` - Fetches files from a ClearCase repository. + + - ``http://`` - Fetches files from the Internet using ``http``. + + - ``https://`` - Fetches files from the Internet using ``https``. + + - ``ftp://`` - Fetches files from the Internet using ``ftp``. + + - ``cvs://`` - Fetches files from a CVS revision control + repository. + + - ``hg://`` - Fetches files from a Mercurial (``hg``) revision + control repository. + + - ``p4://`` - Fetches files from a Perforce (``p4``) revision + control repository. + + - ``ssh://`` - Fetches files from a secure shell. + + - ``svn://`` - Fetches files from a Subversion (``svn``) revision + control repository. + + - ``npm://`` - Fetches JavaScript modules from a registry. + + Standard and recipe-specific options for ``SRC_URI`` exist. Here are + standard options: + + - ``apply`` - Whether to apply the patch or not. The default + action is to apply the patch. + + - ``striplevel`` - Which striplevel to use when applying the + patch. The default level is 1. + + - ``patchdir`` - Specifies the directory in which the patch should + be applied. The default is ``${``\ :term:`S`\ ``}``. + + Here are options specific to recipes building code from a revision + control system: + + - ``mindate`` - Apply the patch only if + :term:`SRCDATE` is equal to or greater than + ``mindate``. + + - ``maxdate`` - Apply the patch only if ``SRCDATE`` is not later + than ``maxdate``. + + - ``minrev`` - Apply the patch only if ``SRCREV`` is equal to or + greater than ``minrev``. + + - ``maxrev`` - Apply the patch only if ``SRCREV`` is not later + than ``maxrev``. + + - ``rev`` - Apply the patch only if ``SRCREV`` is equal to + ``rev``. + + - ``notrev`` - Apply the patch only if ``SRCREV`` is not equal to + ``rev``. + + Here are some additional options worth mentioning: + + - ``unpack`` - Controls whether or not to unpack the file if it is + an archive. The default action is to unpack the file. + + - ``destsuffix`` - Places the file (or extracts its contents) into + the specified subdirectory of :term:`WORKDIR` when + the Git fetcher is used. + + - ``subdir`` - Places the file (or extracts its contents) into the + specified subdirectory of ``WORKDIR`` when the local (``file://``) + fetcher is used. + + - ``localdir`` - Places the file (or extracts its contents) into + the specified subdirectory of ``WORKDIR`` when the CVS fetcher is + used. + + - ``subpath`` - Limits the checkout to a specific subpath of the + tree when using the Git fetcher is used. + + - ``name`` - Specifies a name to be used for association with + ``SRC_URI`` checksums when you have more than one file specified + in ``SRC_URI``. + + - ``downloadfilename`` - Specifies the filename used when storing + the downloaded file. + + SRC_URI_OVERRIDES_PACKAGE_ARCH + By default, the OpenEmbedded build system automatically detects + whether ``SRC_URI`` contains files that are machine-specific. If so, + the build system automatically changes ``PACKAGE_ARCH``. Setting this + variable to "0" disables this behavior. + + SRCDATE + The date of the source code used to build the package. This variable + applies only if the source was fetched from a Source Code Manager + (SCM). + + SRCPV + Returns the version string of the current package. This string is + used to help define the value of :term:`PV`. + + The ``SRCPV`` variable is defined in the ``meta/conf/bitbake.conf`` + configuration file in the :term:`Source Directory` as + follows: + :: + + SRCPV = "${@bb.fetch2.get_srcrev(d)}" + + Recipes that need to define ``PV`` do so with the help of the + ``SRCPV``. For example, the ``ofono`` recipe (``ofono_git.bb``) + located in ``meta/recipes-connectivity`` in the Source Directory + defines ``PV`` as follows: + :: + + PV = "0.12-git${SRCPV}" + + SRCREV + The revision of the source code used to build the package. This + variable applies to Subversion, Git, Mercurial, and Bazaar only. Note + that if you want to build a fixed revision and you want to avoid + performing a query on the remote repository every time BitBake parses + your recipe, you should specify a ``SRCREV`` that is a full revision + identifier and not just a tag. + + .. note:: + + For information on limitations when inheriting the latest revision + of software using + SRCREV + , see the + AUTOREV + variable description and the " + Automatically Incrementing a Binary Package Revision Number + " section, which is in the Yocto Project Development Tasks Manual. + + SSTATE_DIR + The directory for the shared state cache. + + SSTATE_MIRROR_ALLOW_NETWORK + If set to "1", allows fetches from mirrors that are specified in + :term:`SSTATE_MIRRORS` to work even when + fetching from the network is disabled by setting ``BB_NO_NETWORK`` to + "1". Using the ``SSTATE_MIRROR_ALLOW_NETWORK`` variable is useful if + you have set ``SSTATE_MIRRORS`` to point to an internal server for + your shared state cache, but you want to disable any other fetching + from the network. + + SSTATE_MIRRORS + Configures the OpenEmbedded build system to search other mirror + locations for prebuilt cache data objects before building out the + data. This variable works like fetcher :term:`MIRRORS` + and :term:`PREMIRRORS` and points to the cache + locations to check for the shared state (sstate) objects. + + You can specify a filesystem directory or a remote URL such as HTTP + or FTP. The locations you specify need to contain the shared state + cache (sstate-cache) results from previous builds. The sstate-cache + you point to can also be from builds on other machines. + + When pointing to sstate build artifacts on another machine that uses + a different GCC version for native builds, you must configure + ``SSTATE_MIRRORS`` with a regular expression that maps local search + paths to server paths. The paths need to take into account + :term:`NATIVELSBSTRING` set by the + :ref:`uninative ` class. For example, the + following maps the local search path ``universal-4.9`` to the + server-provided path server_url_sstate_path: + :: + + SSTATE_MIRRORS ?= "file://universal-4.9/(.*) http://server_url_sstate_path/universal-4.8/\1 \n" + + If a mirror uses the same structure as + :term:`SSTATE_DIR`, you need to add "PATH" at the + end as shown in the examples below. The build system substitutes the + correct path within the directory structure. + :: + + SSTATE_MIRRORS ?= "\ + file://.* http://someserver.tld/share/sstate/PATH;downloadfilename=PATH \n \ + file://.* file:///some-local-dir/sstate/PATH" + + SSTATE_SCAN_FILES + Controls the list of files the OpenEmbedded build system scans for + hardcoded installation paths. The variable uses a space-separated + list of filenames (not paths) with standard wildcard characters + allowed. + + During a build, the OpenEmbedded build system creates a shared state + (sstate) object during the first stage of preparing the sysroots. + That object is scanned for hardcoded paths for original installation + locations. The list of files that are scanned for paths is controlled + by the ``SSTATE_SCAN_FILES`` variable. Typically, recipes add files + they want to be scanned to the value of ``SSTATE_SCAN_FILES`` rather + than the variable being comprehensively set. The + :ref:`sstate ` class specifies the default list + of files. + + For details on the process, see the + :ref:`staging ` class. + + STAGING_BASE_LIBDIR_NATIVE + Specifies the path to the ``/lib`` subdirectory of the sysroot + directory for the build host. + + STAGING_BASELIBDIR + Specifies the path to the ``/lib`` subdirectory of the sysroot + directory for the target for which the current recipe is being built + (:term:`STAGING_DIR_HOST`). + + STAGING_BINDIR + Specifies the path to the ``/usr/bin`` subdirectory of the sysroot + directory for the target for which the current recipe is being built + (:term:`STAGING_DIR_HOST`). + + STAGING_BINDIR_CROSS + Specifies the path to the directory containing binary configuration + scripts. These scripts provide configuration information for other + software that wants to make use of libraries or include files + provided by the software associated with the script. + + .. note:: + + This style of build configuration has been largely replaced by + pkg-config + . Consequently, if + pkg-config + is supported by the library to which you are linking, it is + recommended you use + pkg-config + instead of a provided configuration script. + + STAGING_BINDIR_NATIVE + Specifies the path to the ``/usr/bin`` subdirectory of the sysroot + directory for the build host. + + STAGING_DATADIR + Specifies the path to the ``/usr/share`` subdirectory of the sysroot + directory for the target for which the current recipe is being built + (:term:`STAGING_DIR_HOST`). + + STAGING_DATADIR_NATIVE + Specifies the path to the ``/usr/share`` subdirectory of the sysroot + directory for the build host. + + STAGING_DIR + Helps construct the ``recipe-sysroots`` directory, which is used + during packaging. + + For information on how staging for recipe-specific sysroots occurs, + see the :ref:`ref-tasks-populate_sysroot` + task, the ":ref:`sdk-manual/sdk-extensible:sharing files between recipes`" + section in the Yocto Project Development Tasks Manual, the + ":ref:`configuration-compilation-and-staging-dev-environment`" + section in the Yocto Project Overview and Concepts Manual, and the + :term:`SYSROOT_DIRS` variable. + + .. note:: + + Recipes should never write files directly under the + STAGING_DIR + directory because the OpenEmbedded build system manages the + directory automatically. Instead, files should be installed to + ${ + D + } + within your recipe's + do_install + task and then the OpenEmbedded build system will stage a subset of + those files into the sysroot. + + STAGING_DIR_HOST + Specifies the path to the sysroot directory for the system on which + the component is built to run (the system that hosts the component). + For most recipes, this sysroot is the one in which that recipe's + :ref:`ref-tasks-populate_sysroot` task copies + files. Exceptions include ``-native`` recipes, where the + ``do_populate_sysroot`` task instead uses + :term:`STAGING_DIR_NATIVE`. Depending on + the type of recipe and the build target, ``STAGING_DIR_HOST`` can + have the following values: + + - For recipes building for the target machine, the value is + "${:term:`STAGING_DIR`}/${:term:`MACHINE`}". + + - For native recipes building for the build host, the value is empty + given the assumption that when building for the build host, the + build host's own directories should be used. + + .. note:: + + ``-native`` recipes are not installed into host paths like such + as ``/usr``. Rather, these recipes are installed into + ``STAGING_DIR_NATIVE``. When compiling ``-native`` recipes, + standard build environment variables such as + :term:`CPPFLAGS` and + :term:`CFLAGS` are set up so that both host paths + and ``STAGING_DIR_NATIVE`` are searched for libraries and + headers using, for example, GCC's ``-isystem`` option. + + Thus, the emphasis is that the ``STAGING_DIR*`` variables + should be viewed as input variables by tasks such as + :ref:`ref-tasks-configure`, + :ref:`ref-tasks-compile`, and + :ref:`ref-tasks-install`. Having the real system + root correspond to ``STAGING_DIR_HOST`` makes conceptual sense + for ``-native`` recipes, as they make use of host headers and + libraries. + + STAGING_DIR_NATIVE + Specifies the path to the sysroot directory used when building + components that run on the build host itself. + + STAGING_DIR_TARGET + Specifies the path to the sysroot used for the system for which the + component generates code. For components that do not generate code, + which is the majority, ``STAGING_DIR_TARGET`` is set to match + :term:`STAGING_DIR_HOST`. + + Some recipes build binaries that can run on the target system but + those binaries in turn generate code for another different system + (e.g. cross-canadian recipes). Using terminology from GNU, the + primary system is referred to as the "HOST" and the secondary, or + different, system is referred to as the "TARGET". Thus, the binaries + run on the "HOST" system and generate binaries for the "TARGET" + system. The ``STAGING_DIR_HOST`` variable points to the sysroot used + for the "HOST" system, while ``STAGING_DIR_TARGET`` points to the + sysroot used for the "TARGET" system. + + STAGING_ETCDIR_NATIVE + Specifies the path to the ``/etc`` subdirectory of the sysroot + directory for the build host. + + STAGING_EXECPREFIXDIR + Specifies the path to the ``/usr`` subdirectory of the sysroot + directory for the target for which the current recipe is being built + (:term:`STAGING_DIR_HOST`). + + STAGING_INCDIR + Specifies the path to the ``/usr/include`` subdirectory of the + sysroot directory for the target for which the current recipe being + built (:term:`STAGING_DIR_HOST`). + + STAGING_INCDIR_NATIVE + Specifies the path to the ``/usr/include`` subdirectory of the + sysroot directory for the build host. + + STAGING_KERNEL_BUILDDIR + Points to the directory containing the kernel build artifacts. + Recipes building software that needs to access kernel build artifacts + (e.g. ``systemtap-uprobes``) can look in the directory specified with + the ``STAGING_KERNEL_BUILDDIR`` variable to find these artifacts + after the kernel has been built. + + STAGING_KERNEL_DIR + The directory with kernel headers that are required to build + out-of-tree modules. + + STAGING_LIBDIR + Specifies the path to the ``/usr/lib`` subdirectory of the sysroot + directory for the target for which the current recipe is being built + (:term:`STAGING_DIR_HOST`). + + STAGING_LIBDIR_NATIVE + Specifies the path to the ``/usr/lib`` subdirectory of the sysroot + directory for the build host. + + STAMP + Specifies the base path used to create recipe stamp files. The path + to an actual stamp file is constructed by evaluating this string and + then appending additional information. Currently, the default + assignment for ``STAMP`` as set in the ``meta/conf/bitbake.conf`` + file is: + :: + + STAMP = "${STAMPS_DIR}/${MULTIMACH_TARGET_SYS}/${PN}/${EXTENDPE}${PV}-${PR}" + + For information on how BitBake uses stamp files to determine if a + task should be rerun, see the + ":ref:`overview-manual/overview-manual-concepts:stamp files and the rerunning of tasks`" + section in the Yocto Project Overview and Concepts Manual. + + See :term:`STAMPS_DIR`, + :term:`MULTIMACH_TARGET_SYS`, + :term:`PN`, :term:`EXTENDPE`, + :term:`PV`, and :term:`PR` for related variable + information. + + STAMPS_DIR + Specifies the base directory in which the OpenEmbedded build system + places stamps. The default directory is ``${TMPDIR}/stamps``. + + STRIP + The minimal command and arguments to run ``strip``, which is used to + strip symbols. + + SUMMARY + The short (72 characters or less) summary of the binary package for + packaging systems such as ``opkg``, ``rpm``, or ``dpkg``. By default, + ``SUMMARY`` is used to define the + :term:`DESCRIPTION` variable if ``DESCRIPTION`` is + not set in the recipe. + + SVNDIR + The directory in which files checked out of a Subversion system are + stored. + + SYSLINUX_DEFAULT_CONSOLE + Specifies the kernel boot default console. If you want to use a + console other than the default, set this variable in your recipe as + follows where "X" is the console number you want to use: + :: + + SYSLINUX_DEFAULT_CONSOLE = "console=ttyX" + + The :ref:`syslinux ` class initially sets + this variable to null but then checks for a value later. + + SYSLINUX_OPTS + Lists additional options to add to the syslinux file. You need to set + this variable in your recipe. If you want to list multiple options, + separate the options with a semicolon character (``;``). + + The :ref:`syslinux ` class uses this variable + to create a set of options. + + SYSLINUX_SERIAL + Specifies the alternate serial port or turns it off. To turn off + serial, set this variable to an empty string in your recipe. The + variable's default value is set in the + :ref:`syslinux ` class as follows: + :: + + SYSLINUX_SERIAL ?= "0 115200" + + The class checks for and uses the variable as needed. + + SYSLINUX_SPLASH + An ``.LSS`` file used as the background for the VGA boot menu when + you use the boot menu. You need to set this variable in your recipe. + + The :ref:`syslinux ` class checks for this + variable and if found, the OpenEmbedded build system installs the + splash screen. + + SYSLINUX_SERIAL_TTY + Specifies the alternate console=tty... kernel boot argument. The + variable's default value is set in the + :ref:`syslinux ` class as follows: + :: + + SYSLINUX_SERIAL_TTY ?= "console=ttyS0,115200" + + The class checks for and uses the variable as needed. + + SYSROOT_DESTDIR + Points to the temporary directory under the work directory (default + "``${``\ :term:`WORKDIR`\ ``}/sysroot-destdir``") + where the files populated into the sysroot are assembled during the + :ref:`ref-tasks-populate_sysroot` task. + + SYSROOT_DIRS + Directories that are staged into the sysroot by the + :ref:`ref-tasks-populate_sysroot` task. By + default, the following directories are staged: + :: + + SYSROOT_DIRS = " \ + ${includedir} \ + ${libdir} \ + ${base_libdir} \ + ${nonarch_base_libdir} \ + ${datadir} \ + " + + SYSROOT_DIRS_BLACKLIST + Directories that are not staged into the sysroot by the + :ref:`ref-tasks-populate_sysroot` task. You + can use this variable to exclude certain subdirectories of + directories listed in :term:`SYSROOT_DIRS` from + staging. By default, the following directories are not staged: + :: + + SYSROOT_DIRS_BLACKLIST = " \ + ${mandir} \ + ${docdir} \ + ${infodir} \ + ${datadir}/locale \ + ${datadir}/applications \ + ${datadir}/fonts \ + ${datadir}/pixmaps \ + " + + SYSROOT_DIRS_NATIVE + Extra directories staged into the sysroot by the + :ref:`ref-tasks-populate_sysroot` task for + ``-native`` recipes, in addition to those specified in + :term:`SYSROOT_DIRS`. By default, the following + extra directories are staged: + :: + + SYSROOT_DIRS_NATIVE = " \ + ${bindir} \ + ${sbindir} \ + ${base_bindir} \ + ${base_sbindir} \ + ${libexecdir} \ + ${sysconfdir} \ + ${localstatedir} \ + " + + .. note:: + + Programs built by + -native + recipes run directly from the sysroot ( + STAGING_DIR_NATIVE + ), which is why additional directories containing program + executables and supporting files need to be staged. + + SYSROOT_PREPROCESS_FUNCS + A list of functions to execute after files are staged into the + sysroot. These functions are usually used to apply additional + processing on the staged files, or to stage additional files. + + SYSTEMD_AUTO_ENABLE + When inheriting the :ref:`systemd ` class, + this variable specifies whether the specified service in + :term:`SYSTEMD_SERVICE` should start + automatically or not. By default, the service is enabled to + automatically start at boot time. The default setting is in the + :ref:`systemd ` class as follows: + :: + + SYSTEMD_AUTO_ENABLE ??= "enable" + + You can disable the service by setting the variable to "disable". + + SYSTEMD_BOOT_CFG + When :term:`EFI_PROVIDER` is set to + "systemd-boot", the ``SYSTEMD_BOOT_CFG`` variable specifies the + configuration file that should be used. By default, the + :ref:`systemd-boot ` class sets the + ``SYSTEMD_BOOT_CFG`` as follows: + :: + + SYSTEMD_BOOT_CFG ?= "${:term:`S`}/loader.conf" + + For information on Systemd-boot, see the `Systemd-boot + documentation `__. + + SYSTEMD_BOOT_ENTRIES + When :term:`EFI_PROVIDER` is set to + "systemd-boot", the ``SYSTEMD_BOOT_ENTRIES`` variable specifies a + list of entry files (``*.conf``) to install that contain one boot + entry per file. By default, the + :ref:`systemd-boot ` class sets the + ``SYSTEMD_BOOT_ENTRIES`` as follows: + :: + + SYSTEMD_BOOT_ENTRIES ?= "" + + For information on Systemd-boot, see the `Systemd-boot + documentation `__. + + SYSTEMD_BOOT_TIMEOUT + When :term:`EFI_PROVIDER` is set to + "systemd-boot", the ``SYSTEMD_BOOT_TIMEOUT`` variable specifies the + boot menu timeout in seconds. By default, the + :ref:`systemd-boot ` class sets the + ``SYSTEMD_BOOT_TIMEOUT`` as follows: + :: + + SYSTEMD_BOOT_TIMEOUT ?= "10" + + For information on Systemd-boot, see the `Systemd-boot + documentation `__. + + SYSTEMD_PACKAGES + When inheriting the :ref:`systemd ` class, + this variable locates the systemd unit files when they are not found + in the main recipe's package. By default, the ``SYSTEMD_PACKAGES`` + variable is set such that the systemd unit files are assumed to + reside in the recipes main package: + :: + + SYSTEMD_PACKAGES ?= "${PN}" + + If these unit files are not in this recipe's main package, you need + to use ``SYSTEMD_PACKAGES`` to list the package or packages in which + the build system can find the systemd unit files. + + SYSTEMD_SERVICE + When inheriting the :ref:`systemd ` class, + this variable specifies the systemd service name for a package. + + When you specify this file in your recipe, use a package name + override to indicate the package to which the value applies. Here is + an example from the connman recipe: + :: + + SYSTEMD_SERVICE_${PN} = "connman.service" + + SYSVINIT_ENABLED_GETTYS + When using + :ref:`SysVinit `, + specifies a space-separated list of the virtual terminals that should + run a `getty `__ + (allowing login), assuming :term:`USE_VT` is not set to + "0". + + The default value for ``SYSVINIT_ENABLED_GETTYS`` is "1" (i.e. only + run a getty on the first virtual terminal). + + T + This variable points to a directory were BitBake places temporary + files, which consist mostly of task logs and scripts, when building a + particular recipe. The variable is typically set as follows: + :: + + T = "${WORKDIR}/temp" + + The :term:`WORKDIR` is the directory into which + BitBake unpacks and builds the recipe. The default ``bitbake.conf`` + file sets this variable. + + The ``T`` variable is not to be confused with the + :term:`TMPDIR` variable, which points to the root of + the directory tree where BitBake places the output of an entire + build. + + TARGET_ARCH + The target machine's architecture. The OpenEmbedded build system + supports many architectures. Here is an example list of architectures + supported. This list is by no means complete as the architecture is + configurable: + + - arm + - i586 + - x86_64 + - powerpc + - powerpc64 + - mips + - mipsel + + For additional information on machine architectures, see the + :term:`TUNE_ARCH` variable. + + TARGET_AS_ARCH + Specifies architecture-specific assembler flags for the target + system. ``TARGET_AS_ARCH`` is initialized from + :term:`TUNE_ASARGS` by default in the BitBake + configuration file (``meta/conf/bitbake.conf``): + :: + + TARGET_AS_ARCH = "${TUNE_ASARGS}" + + TARGET_CC_ARCH + Specifies architecture-specific C compiler flags for the target + system. ``TARGET_CC_ARCH`` is initialized from + :term:`TUNE_CCARGS` by default. + + .. note:: + + It is a common workaround to append + LDFLAGS + to + TARGET_CC_ARCH + in recipes that build software for the target that would not + otherwise respect the exported + LDFLAGS + variable. + + TARGET_CC_KERNEL_ARCH + This is a specific kernel compiler flag for a CPU or Application + Binary Interface (ABI) tune. The flag is used rarely and only for + cases where a userspace :term:`TUNE_CCARGS` is not + compatible with the kernel compilation. The ``TARGET_CC_KERNEL_ARCH`` + variable allows the kernel (and associated modules) to use a + different configuration. See the + ``meta/conf/machine/include/arm/feature-arm-thumb.inc`` file in the + :term:`Source Directory` for an example. + + TARGET_CFLAGS + Specifies the flags to pass to the C compiler when building for the + target. When building in the target context, + :term:`CFLAGS` is set to the value of this variable by + default. + + Additionally, the SDK's environment setup script sets the ``CFLAGS`` + variable in the environment to the ``TARGET_CFLAGS`` value so that + executables built using the SDK also have the flags applied. + + TARGET_CPPFLAGS + Specifies the flags to pass to the C pre-processor (i.e. to both the + C and the C++ compilers) when building for the target. When building + in the target context, :term:`CPPFLAGS` is set to the + value of this variable by default. + + Additionally, the SDK's environment setup script sets the + ``CPPFLAGS`` variable in the environment to the ``TARGET_CPPFLAGS`` + value so that executables built using the SDK also have the flags + applied. + + TARGET_CXXFLAGS + Specifies the flags to pass to the C++ compiler when building for the + target. When building in the target context, + :term:`CXXFLAGS` is set to the value of this variable + by default. + + Additionally, the SDK's environment setup script sets the + ``CXXFLAGS`` variable in the environment to the ``TARGET_CXXFLAGS`` + value so that executables built using the SDK also have the flags + applied. + + TARGET_FPU + Specifies the method for handling FPU code. For FPU-less targets, + which include most ARM CPUs, the variable must be set to "soft". If + not, the kernel emulation gets used, which results in a performance + penalty. + + TARGET_LD_ARCH + Specifies architecture-specific linker flags for the target system. + ``TARGET_LD_ARCH`` is initialized from + :term:`TUNE_LDARGS` by default in the BitBake + configuration file (``meta/conf/bitbake.conf``): + :: + + TARGET_LD_ARCH = "${TUNE_LDARGS}" + + TARGET_LDFLAGS + Specifies the flags to pass to the linker when building for the + target. When building in the target context, + :term:`LDFLAGS` is set to the value of this variable + by default. + + Additionally, the SDK's environment setup script sets the + :term:`LDFLAGS` variable in the environment to the + ``TARGET_LDFLAGS`` value so that executables built using the SDK also + have the flags applied. + + TARGET_OS + Specifies the target's operating system. The variable can be set to + "linux" for glibc-based systems (GNU C Library) and to "linux-musl" + for musl libc. For ARM/EABI targets, "linux-gnueabi" and + "linux-musleabi" possible values exist. + + TARGET_PREFIX + Specifies the prefix used for the toolchain binary target tools. + + Depending on the type of recipe and the build target, + ``TARGET_PREFIX`` is set as follows: + + - For recipes building for the target machine, the value is + "${:term:`TARGET_SYS`}-". + + - For native recipes, the build system sets the variable to the + value of ``BUILD_PREFIX``. + + - For native SDK recipes (``nativesdk``), the build system sets the + variable to the value of ``SDK_PREFIX``. + + TARGET_SYS + Specifies the system, including the architecture and the operating + system, for which the build is occurring in the context of the + current recipe. + + The OpenEmbedded build system automatically sets this variable based + on :term:`TARGET_ARCH`, + :term:`TARGET_VENDOR`, and + :term:`TARGET_OS` variables. + + .. note:: + + You do not need to set the TARGET_SYS variable yourself. + + Consider these two examples: + + - Given a native recipe on a 32-bit, x86 machine running Linux, the + value is "i686-linux". + + - Given a recipe being built for a little-endian, MIPS target + running Linux, the value might be "mipsel-linux". + + TARGET_VENDOR + Specifies the name of the target vendor. + + TCLIBC + Specifies the GNU standard C library (``libc``) variant to use during + the build process. This variable replaces ``POKYLIBC``, which is no + longer supported. + + You can select "glibc", "musl", "newlib", or "baremetal" + + TCLIBCAPPEND + Specifies a suffix to be appended onto the + :term:`TMPDIR` value. The suffix identifies the + ``libc`` variant for building. When you are building for multiple + variants with the same :term:`Build Directory`, this + mechanism ensures that output for different ``libc`` variants is kept + separate to avoid potential conflicts. + + In the ``defaultsetup.conf`` file, the default value of + ``TCLIBCAPPEND`` is "-${TCLIBC}". However, distros such as poky, + which normally only support one ``libc`` variant, set + ``TCLIBCAPPEND`` to "" in their distro configuration file resulting + in no suffix being applied. + + TCMODE + Specifies the toolchain selector. ``TCMODE`` controls the + characteristics of the generated packages and images by telling the + OpenEmbedded build system which toolchain profile to use. By default, + the OpenEmbedded build system builds its own internal toolchain. The + variable's default value is "default", which uses that internal + toolchain. + + .. note:: + + If + TCMODE + is set to a value other than "default", then it is your + responsibility to ensure that the toolchain is compatible with the + default toolchain. Using older or newer versions of these + components might cause build problems. See the Release Notes for + the Yocto Project release for the specific components with which + the toolchain must be compatible. To access the Release Notes, go + to the + Downloads + page on the Yocto Project website and click on the "RELEASE + INFORMATION" link for the appropriate release. + + The ``TCMODE`` variable is similar to :term:`TCLIBC`, + which controls the variant of the GNU standard C library (``libc``) + used during the build process: ``glibc`` or ``musl``. + + With additional layers, it is possible to use a pre-compiled external + toolchain. One example is the Sourcery G++ Toolchain. The support for + this toolchain resides in the separate Mentor Graphics + ``meta-sourcery`` layer at + http://github.com/MentorEmbedded/meta-sourcery/. + + The layer's ``README`` file contains information on how to use the + Sourcery G++ Toolchain as an external toolchain. In summary, you must + be sure to add the layer to your ``bblayers.conf`` file in front of + the ``meta`` layer and then set the ``EXTERNAL_TOOLCHAIN`` variable + in your ``local.conf`` file to the location in which you installed + the toolchain. + + The fundamentals used for this example apply to any external + toolchain. You can use ``meta-sourcery`` as a template for adding + support for other external toolchains. + + TEST_EXPORT_DIR + The location the OpenEmbedded build system uses to export tests when + the :term:`TEST_EXPORT_ONLY` variable is set + to "1". + + The ``TEST_EXPORT_DIR`` variable defaults to + ``"${TMPDIR}/testimage/${PN}"``. + + TEST_EXPORT_ONLY + Specifies to export the tests only. Set this variable to "1" if you + do not want to run the tests but you want them to be exported in a + manner that you to run them outside of the build system. + + TEST_LOG_DIR + Holds the SSH log and the boot log for QEMU machines. The + ``TEST_LOG_DIR`` variable defaults to ``"${WORKDIR}/testimage"``. + + .. note:: + + Actual test results reside in the task log ( + log.do_testimage + ), which is in the + ${WORKDIR}/temp/ + directory. + + TEST_POWERCONTROL_CMD + For automated hardware testing, specifies the command to use to + control the power of the target machine under test. Typically, this + command would point to a script that performs the appropriate action + (e.g. interacting with a web-enabled power strip). The specified + command should expect to receive as the last argument "off", "on" or + "cycle" specifying to power off, on, or cycle (power off and then + power on) the device, respectively. + + TEST_POWERCONTROL_EXTRA_ARGS + For automated hardware testing, specifies additional arguments to + pass through to the command specified in + :term:`TEST_POWERCONTROL_CMD`. Setting + ``TEST_POWERCONTROL_EXTRA_ARGS`` is optional. You can use it if you + wish, for example, to separate the machine-specific and + non-machine-specific parts of the arguments. + + TEST_QEMUBOOT_TIMEOUT + The time in seconds allowed for an image to boot before automated + runtime tests begin to run against an image. The default timeout + period to allow the boot process to reach the login prompt is 500 + seconds. You can specify a different value in the ``local.conf`` + file. + + For more information on testing images, see the + ":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" + section in the Yocto Project Development Tasks Manual. + + TEST_SERIALCONTROL_CMD + For automated hardware testing, specifies the command to use to + connect to the serial console of the target machine under test. This + command simply needs to connect to the serial console and forward + that connection to standard input and output as any normal terminal + program does. + + For example, to use the Picocom terminal program on serial device + ``/dev/ttyUSB0`` at 115200bps, you would set the variable as follows: + :: + + TEST_SERIALCONTROL_CMD = "picocom /dev/ttyUSB0 -b 115200" + + TEST_SERIALCONTROL_EXTRA_ARGS + For automated hardware testing, specifies additional arguments to + pass through to the command specified in + :term:`TEST_SERIALCONTROL_CMD`. Setting + ``TEST_SERIALCONTROL_EXTRA_ARGS`` is optional. You can use it if you + wish, for example, to separate the machine-specific and + non-machine-specific parts of the command. + + TEST_SERVER_IP + The IP address of the build machine (host machine). This IP address + is usually automatically detected. However, if detection fails, this + variable needs to be set to the IP address of the build machine (i.e. + where the build is taking place). + + .. note:: + + The + TEST_SERVER_IP + variable is only used for a small number of tests such as the + "dnf" test suite, which needs to download packages from + WORKDIR/oe-rootfs-repo + . + + TEST_TARGET + Specifies the target controller to use when running tests against a + test image. The default controller to use is "qemu": + :: + + TEST_TARGET = "qemu" + + A target controller is a class that defines how an image gets + deployed on a target and how a target is started. A layer can extend + the controllers by adding a module in the layer's + ``/lib/oeqa/controllers`` directory and by inheriting the + ``BaseTarget`` class, which is an abstract class that cannot be used + as a value of ``TEST_TARGET``. + + You can provide the following arguments with ``TEST_TARGET``: + + - *"qemu":* Boots a QEMU image and runs the tests. See the + ":ref:`qemu-image-enabling-tests`" section + in the Yocto Project Development Tasks Manual for more + information. + + - *"simpleremote":* Runs the tests on target hardware that is + already up and running. The hardware can be on the network or it + can be a device running an image on QEMU. You must also set + :term:`TEST_TARGET_IP` when you use + "simpleremote". + + .. note:: + + This argument is defined in + meta/lib/oeqa/controllers/simpleremote.py + . + + For information on running tests on hardware, see the + ":ref:`hardware-image-enabling-tests`" + section in the Yocto Project Development Tasks Manual. + + TEST_TARGET_IP + The IP address of your hardware under test. The ``TEST_TARGET_IP`` + variable has no effect when :term:`TEST_TARGET` is + set to "qemu". + + When you specify the IP address, you can also include a port. Here is + an example: + :: + + TEST_TARGET_IP = "192.168.1.4:2201" + + Specifying a port is + useful when SSH is started on a non-standard port or in cases when + your hardware under test is behind a firewall or network that is not + directly accessible from your host and you need to do port address + translation. + + TEST_SUITES + An ordered list of tests (modules) to run against an image when + performing automated runtime testing. + + The OpenEmbedded build system provides a core set of tests that can + be used against images. + + .. note:: + + Currently, there is only support for running these tests under + QEMU. + + Tests include ``ping``, ``ssh``, ``df`` among others. You can add + your own tests to the list of tests by appending ``TEST_SUITES`` as + follows: + :: + + TEST_SUITES_append = " mytest" + + Alternatively, you can + provide the "auto" option to have all applicable tests run against + the image. + :: + + TEST_SUITES_append = " auto" + + Using this option causes the + build system to automatically run tests that are applicable to the + image. Tests that are not applicable are skipped. + + The order in which tests are run is important. Tests that depend on + another test must appear later in the list than the test on which + they depend. For example, if you append the list of tests with two + tests (``test_A`` and ``test_B``) where ``test_B`` is dependent on + ``test_A``, then you must order the tests as follows: + :: + + TEST_SUITES = "test_A test_B" + + For more information on testing images, see the + ":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" + section in the Yocto Project Development Tasks Manual. + + TESTIMAGE_AUTO + Automatically runs the series of automated tests for images when an + image is successfully built. Setting ``TESTIMAGE_AUTO`` to "1" causes + any image that successfully builds to automatically boot under QEMU. + Using the variable also adds in dependencies so that any SDK for + which testing is requested is automatically built first. + + These tests are written in Python making use of the ``unittest`` + module, and the majority of them run commands on the target system + over ``ssh``. You can set this variable to "1" in your ``local.conf`` + file in the :term:`Build Directory` to have the + OpenEmbedded build system automatically run these tests after an + image successfully builds: + + TESTIMAGE_AUTO = "1" + + For more information + on enabling, running, and writing these tests, see the + ":ref:`dev-manual/dev-manual-common-tasks:performing automated runtime testing`" + section in the Yocto Project Development Tasks Manual and the + ":ref:`testimage*.bbclass `" section. + + THISDIR + The directory in which the file BitBake is currently parsing is + located. Do not manually set this variable. + + TIME + The time the build was started. Times appear using the hour, minute, + and second (HMS) format (e.g. "140159" for one minute and fifty-nine + seconds past 1400 hours). + + TMPDIR + This variable is the base directory the OpenEmbedded build system + uses for all build output and intermediate files (other than the + shared state cache). By default, the ``TMPDIR`` variable points to + ``tmp`` within the :term:`Build Directory`. + + If you want to establish this directory in a location other than the + default, you can uncomment and edit the following statement in the + ``conf/local.conf`` file in the :term:`Source Directory`: + :: + + #TMPDIR = "${TOPDIR}/tmp" + + An example use for this scenario is to set ``TMPDIR`` to a local disk, + which does not use NFS, while having the Build Directory use NFS. + + The filesystem used by ``TMPDIR`` must have standard filesystem + semantics (i.e. mixed-case files are unique, POSIX file locking, and + persistent inodes). Due to various issues with NFS and bugs in some + implementations, NFS does not meet this minimum requirement. + Consequently, ``TMPDIR`` cannot be on NFS. + + TOOLCHAIN_HOST_TASK + This variable lists packages the OpenEmbedded build system uses when + building an SDK, which contains a cross-development environment. The + packages specified by this variable are part of the toolchain set + that runs on the :term:`SDKMACHINE`, and each + package should usually have the prefix ``nativesdk-``. For example, + consider the following command when building an SDK: + :: + + $ bitbake -c populate_sdk imagename + + In this case, a default list of packages is + set in this variable, but you can add additional packages to the + list. See the + ":ref:`sdk-manual/sdk-appendix-customizing-standard:adding individual packages to the standard sdk`" section + in the Yocto Project Application Development and the Extensible + Software Development Kit (eSDK) manual for more information. + + For background information on cross-development toolchains in the + Yocto Project development environment, see the + ":ref:`sdk-manual/sdk-intro:the cross-development toolchain`" + section in the Yocto Project Overview and Concepts Manual. For + information on setting up a cross-development environment, see the + :doc:`../sdk-manual/sdk-manual` manual. + + TOOLCHAIN_OUTPUTNAME + This variable defines the name used for the toolchain output. The + :ref:`populate_sdk_base ` class sets + the ``TOOLCHAIN_OUTPUTNAME`` variable as follows: + :: + + TOOLCHAIN_OUTPUTNAME ?= "${SDK_NAME}-toolchain-${SDK_VERSION}" + + See + the :term:`SDK_NAME` and + :term:`SDK_VERSION` variables for additional + information. + + TOOLCHAIN_TARGET_TASK + This variable lists packages the OpenEmbedded build system uses when + it creates the target part of an SDK (i.e. the part built for the + target hardware), which includes libraries and headers. Use this + variable to add individual packages to the part of the SDK that runs + on the target. See the + ":ref:`sdk-manual/sdk-appendix-customizing-standard:adding individual packages to the standard sdk`" section + in the Yocto Project Application Development and the Extensible + Software Development Kit (eSDK) manual for more information. + + For background information on cross-development toolchains in the + Yocto Project development environment, see the + ":ref:`sdk-manual/sdk-intro:the cross-development toolchain`" + section in the Yocto Project Overview and Concepts Manual. For + information on setting up a cross-development environment, see the + :doc:`../sdk-manual/sdk-manual` manual. + + TOPDIR + The top-level :term:`Build Directory`. BitBake + automatically sets this variable when you initialize your build + environment using ````` <#structure-core-script>`__. + + TRANSLATED_TARGET_ARCH + A sanitized version of :term:`TARGET_ARCH`. This + variable is used where the architecture is needed in a value where + underscores are not allowed, for example within package filenames. In + this case, dash characters replace any underscore characters used in + ``TARGET_ARCH``. + + Do not edit this variable. + + TUNE_ARCH + The GNU canonical architecture for a specific architecture (i.e. + ``arm``, ``armeb``, ``mips``, ``mips64``, and so forth). BitBake uses + this value to setup configuration. + + ``TUNE_ARCH`` definitions are specific to a given architecture. The + definitions can be a single static definition, or can be dynamically + adjusted. You can see details for a given CPU family by looking at + the architecture's ``README`` file. For example, the + ``meta/conf/machine/include/mips/README`` file in the + :term:`Source Directory` provides information for + ``TUNE_ARCH`` specific to the ``mips`` architecture. + + ``TUNE_ARCH`` is tied closely to + :term:`TARGET_ARCH`, which defines the target + machine's architecture. The BitBake configuration file + (``meta/conf/bitbake.conf``) sets ``TARGET_ARCH`` as follows: + :: + + TARGET_ARCH = "${TUNE_ARCH}" + + The following list, which is by no means complete since architectures + are configurable, shows supported machine architectures: + + - arm + - i586 + - x86_64 + - powerpc + - powerpc64 + - mips + - mipsel + + TUNE_ASARGS + Specifies architecture-specific assembler flags for the target + system. The set of flags is based on the selected tune features. + ``TUNE_ASARGS`` is set using the tune include files, which are + typically under ``meta/conf/machine/include/`` and are influenced + through :term:`TUNE_FEATURES`. For example, the + ``meta/conf/machine/include/x86/arch-x86.inc`` file defines the flags + for the x86 architecture as follows: + :: + + TUNE_ASARGS += "${@bb.utils.contains("TUNE_FEATURES", "mx32", "-x32", "", d)}" + + .. note:: + + Board Support Packages (BSPs) select the tune. The selected tune, + in turn, affects the tune variables themselves (i.e. the tune can + supply its own set of flags). + + TUNE_CCARGS + Specifies architecture-specific C compiler flags for the target + system. The set of flags is based on the selected tune features. + ``TUNE_CCARGS`` is set using the tune include files, which are + typically under ``meta/conf/machine/include/`` and are influenced + through :term:`TUNE_FEATURES`. + + .. note:: + + Board Support Packages (BSPs) select the tune. The selected tune, + in turn, affects the tune variables themselves (i.e. the tune can + supply its own set of flags). + + TUNE_LDARGS + Specifies architecture-specific linker flags for the target system. + The set of flags is based on the selected tune features. + ``TUNE_LDARGS`` is set using the tune include files, which are + typically under ``meta/conf/machine/include/`` and are influenced + through :term:`TUNE_FEATURES`. For example, the + ``meta/conf/machine/include/x86/arch-x86.inc`` file defines the flags + for the x86 architecture as follows: + :: + + TUNE_LDARGS += "${@bb.utils.contains("TUNE_FEATURES", "mx32", "-m elf32_x86_64", "", d)}" + + .. note:: + + Board Support Packages (BSPs) select the tune. The selected tune, + in turn, affects the tune variables themselves (i.e. the tune can + supply its own set of flags). + + TUNE_FEATURES + Features used to "tune" a compiler for optimal use given a specific + processor. The features are defined within the tune files and allow + arguments (i.e. ``TUNE_*ARGS``) to be dynamically generated based on + the features. + + The OpenEmbedded build system verifies the features to be sure they + are not conflicting and that they are supported. + + The BitBake configuration file (``meta/conf/bitbake.conf``) defines + ``TUNE_FEATURES`` as follows: + :: + + TUNE_FEATURES ??= "${TUNE_FEATURES_tune-${DEFAULTTUNE}}" + + See the :term:`DEFAULTTUNE` variable for more information. + + TUNE_PKGARCH + The package architecture understood by the packaging system to define + the architecture, ABI, and tuning of output packages. The specific + tune is defined using the "_tune" override as follows: + :: + + TUNE_PKGARCH_tune-tune = "tune" + + These tune-specific package architectures are defined in the machine + include files. Here is an example of the "core2-32" tuning as used in + the ``meta/conf/machine/include/tune-core2.inc`` file: + :: + + TUNE_PKGARCH_tune-core2-32 = "core2-32" + + TUNEABI + An underlying Application Binary Interface (ABI) used by a particular + tuning in a given toolchain layer. Providers that use prebuilt + libraries can use the ``TUNEABI``, + :term:`TUNEABI_OVERRIDE`, and + :term:`TUNEABI_WHITELIST` variables to check + compatibility of tunings against their selection of libraries. + + If ``TUNEABI`` is undefined, then every tuning is allowed. See the + :ref:`sanity ` class to see how the variable is + used. + + TUNEABI_OVERRIDE + If set, the OpenEmbedded system ignores the + :term:`TUNEABI_WHITELIST` variable. + Providers that use prebuilt libraries can use the + ``TUNEABI_OVERRIDE``, ``TUNEABI_WHITELIST``, and + :term:`TUNEABI` variables to check compatibility of a + tuning against their selection of libraries. + + See the :ref:`sanity ` class to see how the + variable is used. + + TUNEABI_WHITELIST + A whitelist of permissible :term:`TUNEABI` values. If + ``TUNEABI_WHITELIST`` is not set, all tunes are allowed. Providers + that use prebuilt libraries can use the ``TUNEABI_WHITELIST``, + :term:`TUNEABI_OVERRIDE`, and ``TUNEABI`` + variables to check compatibility of a tuning against their selection + of libraries. + + See the :ref:`sanity ` class to see how the + variable is used. + + TUNECONFLICTS[feature] + Specifies CPU or Application Binary Interface (ABI) tuning features + that conflict with feature. + + Known tuning conflicts are specified in the machine include files in + the :term:`Source Directory`. Here is an example from + the ``meta/conf/machine/include/mips/arch-mips.inc`` include file + that lists the "o32" and "n64" features as conflicting with the "n32" + feature: + :: + + TUNECONFLICTS[n32] = "o32 n64" + + TUNEVALID[feature] + Specifies a valid CPU or Application Binary Interface (ABI) tuning + feature. The specified feature is stored as a flag. Valid features + are specified in the machine include files (e.g. + ``meta/conf/machine/include/arm/arch-arm.inc``). Here is an example + from that file: + :: + + TUNEVALID[bigendian] = "Enable big-endian mode." + + See the machine include files in the :term:`Source Directory` + for these features. + + UBOOT_CONFIG + Configures the :term:`UBOOT_MACHINE` and can + also define :term:`IMAGE_FSTYPES` for individual + cases. + + Following is an example from the ``meta-fsl-arm`` layer. :: + + UBOOT_CONFIG ??= "sd" + UBOOT_CONFIG[sd] = "mx6qsabreauto_config,sdcard" + UBOOT_CONFIG[eimnor] = "mx6qsabreauto_eimnor_config" + UBOOT_CONFIG[nand] = "mx6qsabreauto_nand_config,ubifs" + UBOOT_CONFIG[spinor] = "mx6qsabreauto_spinor_config" + + In this example, "sd" is selected as the configuration of the possible four for the + ``UBOOT_MACHINE``. The "sd" configuration defines + "mx6qsabreauto_config" as the value for ``UBOOT_MACHINE``, while the + "sdcard" specifies the ``IMAGE_FSTYPES`` to use for the U-boot image. + + For more information on how the ``UBOOT_CONFIG`` is handled, see the + :ref:`uboot-config ` + class. + + UBOOT_DTB_LOADADDRESS + Specifies the load address for the dtb image used by U-boot. During FIT + image creation, the ``UBOOT_DTB_LOADADDRESS`` variable is used in + :ref:`kernel-fitimage ` class to specify + the load address to be used in + creating the dtb sections of Image Tree Source for the FIT image. + + UBOOT_DTBO_LOADADDRESS + Specifies the load address for the dtbo image used by U-boot. During FIT + image creation, the ``UBOOT_DTBO_LOADADDRESS`` variable is used in + :ref:`kernel-fitimage ` class to specify the load address to be used in + creating the dtbo sections of Image Tree Source for the FIT image. + + UBOOT_ENTRYPOINT + Specifies the entry point for the U-Boot image. During U-Boot image + creation, the ``UBOOT_ENTRYPOINT`` variable is passed as a + command-line parameter to the ``uboot-mkimage`` utility. + + UBOOT_LOADADDRESS + Specifies the load address for the U-Boot image. During U-Boot image + creation, the ``UBOOT_LOADADDRESS`` variable is passed as a + command-line parameter to the ``uboot-mkimage`` utility. + + UBOOT_LOCALVERSION + Appends a string to the name of the local version of the U-Boot + image. For example, assuming the version of the U-Boot image built + was "2013.10", the full version string reported by U-Boot would be + "2013.10-yocto" given the following statement: + :: + + UBOOT_LOCALVERSION = "-yocto" + + UBOOT_MACHINE + Specifies the value passed on the ``make`` command line when building + a U-Boot image. The value indicates the target platform + configuration. You typically set this variable from the machine + configuration file (i.e. ``conf/machine/machine_name.conf``). + + Please see the "Selection of Processor Architecture and Board Type" + section in the U-Boot README for valid values for this variable. + + UBOOT_MAKE_TARGET + Specifies the target called in the ``Makefile``. The default target + is "all". + + UBOOT_MKIMAGE_DTCOPTS + Options for the device tree compiler passed to mkimage '-D' + feature while creating FIT image in :ref:`kernel-fitimage ` class. + + UBOOT_RD_LOADADDRESS + Specifies the load address for the RAM disk image. + During FIT image creation, the + ``UBOOT_RD_LOADADDRESS`` variable is used + in :ref:`kernel-fitimage ` class to specify the + load address to be used in creating the Image Tree Source for + the FIT image. + + UBOOT_RD_ENTRYPOINT + Specifies the entrypoint for the RAM disk image. + During FIT image creation, the + ``UBOOT_RD_ENTRYPOINT`` variable is used + in :ref:`kernel-fitimage ` class to specify the + entrypoint to be used in creating the Image Tree Source for + the FIT image. + + UBOOT_SIGN_ENABLE + Enable signing of FIT image. The default value is "0". + + UBOOT_SIGN_KEYDIR + Location of the directory containing the RSA key and + certificate used for signing FIT image. + + UBOOT_SIGN_KEYNAME + The name of keys used for signing U-boot FIT image stored in + :term:`UBOOT_SIGN_KEYDIR` directory. For e.g. dev.key key and dev.crt + certificate stored in :term:`UBOOT_SIGN_KEYDIR` directory will have + :term:`UBOOT_SIGN_KEYNAME` set to "dev". + + UBOOT_SUFFIX + Points to the generated U-Boot extension. For example, ``u-boot.sb`` + has a ``.sb`` extension. + + The default U-Boot extension is ``.bin`` + + UBOOT_TARGET + Specifies the target used for building U-Boot. The target is passed + directly as part of the "make" command (e.g. SPL and AIS). If you do + not specifically set this variable, the OpenEmbedded build process + passes and uses "all" for the target during the U-Boot building + process. + + UNKNOWN_CONFIGURE_WHITELIST + Specifies a list of options that, if reported by the configure script + as being invalid, should not generate a warning during the + :ref:`ref-tasks-configure` task. Normally, invalid + configure options are simply not passed to the configure script (e.g. + should be removed from :term:`EXTRA_OECONF` or + :term:`PACKAGECONFIG_CONFARGS`). + However, common options, for example, exist that are passed to all + configure scripts at a class level that might not be valid for some + configure scripts. It follows that no benefit exists in seeing a + warning about these options. For these cases, the options are added + to ``UNKNOWN_CONFIGURE_WHITELIST``. + + The configure arguments check that uses + ``UNKNOWN_CONFIGURE_WHITELIST`` is part of the + :ref:`insane ` class and is only enabled if the + recipe inherits the :ref:`autotools ` class. + + UPDATERCPN + For recipes inheriting the + :ref:`update-rc.d ` class, ``UPDATERCPN`` + specifies the package that contains the initscript that is enabled. + + The default value is "${PN}". Given that almost all recipes that + install initscripts package them in the main package for the recipe, + you rarely need to set this variable in individual recipes. + + UPSTREAM_CHECK_GITTAGREGEX + You can perform a per-recipe check for what the latest upstream + source code version is by calling ``bitbake -c checkpkg`` recipe. If + the recipe source code is provided from Git repositories, the + OpenEmbedded build system determines the latest upstream version by + picking the latest tag from the list of all repository tags. + + You can use the ``UPSTREAM_CHECK_GITTAGREGEX`` variable to provide a + regular expression to filter only the relevant tags should the + default filter not work correctly. + :: + + UPSTREAM_CHECK_GITTAGREGEX = "git_tag_regex" + + UPSTREAM_CHECK_REGEX + Use the ``UPSTREAM_CHECK_REGEX`` variable to specify a different + regular expression instead of the default one when the package + checking system is parsing the page found using + :term:`UPSTREAM_CHECK_URI`. + :: + + UPSTREAM_CHECK_REGEX = "package_regex" + + UPSTREAM_CHECK_URI + You can perform a per-recipe check for what the latest upstream + source code version is by calling ``bitbake -c checkpkg`` recipe. If + the source code is provided from tarballs, the latest version is + determined by fetching the directory listing where the tarball is and + attempting to find a later tarball. When this approach does not work, + you can use ``UPSTREAM_CHECK_URI`` to provide a different URI that + contains the link to the latest tarball. + :: + + UPSTREAM_CHECK_URI = "recipe_url" + + USE_DEVFS + Determines if ``devtmpfs`` is used for ``/dev`` population. The + default value used for ``USE_DEVFS`` is "1" when no value is + specifically set. Typically, you would set ``USE_DEVFS`` to "0" for a + statically populated ``/dev`` directory. + + See the ":ref:`selecting-dev-manager`" section in + the Yocto Project Development Tasks Manual for information on how to + use this variable. + + USE_VT + When using + :ref:`SysVinit `, + determines whether or not to run a + `getty `__ on any + virtual terminals in order to enable logging in through those + terminals. + + The default value used for ``USE_VT`` is "1" when no default value is + specifically set. Typically, you would set ``USE_VT`` to "0" in the + machine configuration file for machines that do not have a graphical + display attached and therefore do not need virtual terminal + functionality. + + USER_CLASSES + A list of classes to globally inherit. These classes are used by the + OpenEmbedded build system to enable extra features (e.g. + ``buildstats``, ``image-mklibs``, and so forth). + + The default list is set in your ``local.conf`` file: + :: + + USER_CLASSES ?= "buildstats image-mklibs image-prelink" + + For more information, see + ``meta-poky/conf/local.conf.sample`` in the :term:`Source Directory`. + + USERADD_ERROR_DYNAMIC + If set to ``error``, forces the OpenEmbedded build system to produce + an error if the user identification (``uid``) and group + identification (``gid``) values are not defined in any of the files + listed in :term:`USERADD_UID_TABLES` and + :term:`USERADD_GID_TABLES`. If set to + ``warn``, a warning will be issued instead. + + The default behavior for the build system is to dynamically apply + ``uid`` and ``gid`` values. Consequently, the + ``USERADD_ERROR_DYNAMIC`` variable is by default not set. If you plan + on using statically assigned ``gid`` and ``uid`` values, you should + set the ``USERADD_ERROR_DYNAMIC`` variable in your ``local.conf`` + file as follows: + :: + + USERADD_ERROR_DYNAMIC = "error" + + Overriding the + default behavior implies you are going to also take steps to set + static ``uid`` and ``gid`` values through use of the + :term:`USERADDEXTENSION`, + :term:`USERADD_UID_TABLES`, and + :term:`USERADD_GID_TABLES` variables. + + .. note:: + + There is a difference in behavior between setting + USERADD_ERROR_DYNAMIC + to + error + and setting it to + warn + . When it is set to + warn + , the build system will report a warning for every undefined + uid + and + gid + in any recipe. But when it is set to + error + , it will only report errors for recipes that are actually built. + This saves you from having to add static IDs for recipes that you + know will never be built. + + USERADD_GID_TABLES + Specifies a password file to use for obtaining static group + identification (``gid``) values when the OpenEmbedded build system + adds a group to the system during package installation. + + When applying static group identification (``gid``) values, the + OpenEmbedded build system looks in :term:`BBPATH` for a + ``files/group`` file and then applies those ``uid`` values. Set the + variable as follows in your ``local.conf`` file: + :: + + + USERADD_GID_TABLES = "files/group" + + .. note:: + + Setting the + USERADDEXTENSION + variable to "useradd-staticids" causes the build system to use + static + gid + values. + + USERADD_PACKAGES + When inheriting the :ref:`useradd ` class, + this variable specifies the individual packages within the recipe + that require users and/or groups to be added. + + You must set this variable if the recipe inherits the class. For + example, the following enables adding a user for the main package in + a recipe: + :: + + USERADD_PACKAGES = "${PN}" + + .. note:: + + It follows that if you are going to use the + USERADD_PACKAGES + variable, you need to set one or more of the + USERADD_PARAM + , + GROUPADD_PARAM + , or + GROUPMEMS_PARAM + variables. + + USERADD_PARAM + When inheriting the :ref:`useradd ` class, + this variable specifies for a package what parameters should pass to + the ``useradd`` command if you add a user to the system when the + package is installed. + + Here is an example from the ``dbus`` recipe: + :: + + USERADD_PARAM_${PN} = "--system --home ${localstatedir}/lib/dbus \ + --no-create-home --shell /bin/false \ + --user-group messagebus" + + For information on the + standard Linux shell command ``useradd``, see + http://linux.die.net/man/8/useradd. + + USERADD_UID_TABLES + Specifies a password file to use for obtaining static user + identification (``uid``) values when the OpenEmbedded build system + adds a user to the system during package installation. + + When applying static user identification (``uid``) values, the + OpenEmbedded build system looks in :term:`BBPATH` for a + ``files/passwd`` file and then applies those ``uid`` values. Set the + variable as follows in your ``local.conf`` file: + :: + + USERADD_UID_TABLES = "files/passwd" + + .. note:: + + Setting the + USERADDEXTENSION + variable to "useradd-staticids" causes the build system to use + static + uid + values. + + USERADDEXTENSION + When set to "useradd-staticids", causes the OpenEmbedded build system + to base all user and group additions on a static ``passwd`` and + ``group`` files found in :term:`BBPATH`. + + To use static user identification (``uid``) and group identification + (``gid``) values, set the variable as follows in your ``local.conf`` + file: USERADDEXTENSION = "useradd-staticids" + + .. note:: + + Setting this variable to use static + uid + and + gid + values causes the OpenEmbedded build system to employ the + useradd-staticids + class. + + If you use static ``uid`` and ``gid`` information, you must also + specify the ``files/passwd`` and ``files/group`` files by setting the + :term:`USERADD_UID_TABLES` and + :term:`USERADD_GID_TABLES` variables. + Additionally, you should also set the + :term:`USERADD_ERROR_DYNAMIC` variable. + + VOLATILE_LOG_DIR + Specifies the persistence of the target's ``/var/log`` directory, + which is used to house postinstall target log files. + + By default, ``VOLATILE_LOG_DIR`` is set to "yes", which means the + file is not persistent. You can override this setting by setting the + variable to "no" to make the log directory persistent. + + WARN_QA + Specifies the quality assurance checks whose failures are reported as + warnings by the OpenEmbedded build system. You set this variable in + your distribution configuration file. For a list of the checks you + can control with this variable, see the + ":ref:`insane.bbclass `" section. + + WKS_FILE_DEPENDS + When placed in the recipe that builds your image, this variable lists + build-time dependencies. The ``WKS_FILE_DEPENDS`` variable is only + applicable when Wic images are active (i.e. when + :term:`IMAGE_FSTYPES` contains entries related + to Wic). If your recipe does not create Wic images, the variable has + no effect. + + The ``WKS_FILE_DEPENDS`` variable is similar to the + :term:`DEPENDS` variable. When you use the variable in + your recipe that builds the Wic image, dependencies you list in the + ``WIC_FILE_DEPENDS`` variable are added to the ``DEPENDS`` variable. + + With the ``WKS_FILE_DEPENDS`` variable, you have the possibility to + specify a list of additional dependencies (e.g. native tools, + bootloaders, and so forth), that are required to build Wic images. + Following is an example: + :: + + WKS_FILE_DEPENDS = "some-native-tool" + + In the + previous example, some-native-tool would be replaced with an actual + native tool on which the build would depend. + + WKS_FILE + Specifies the location of the Wic kickstart file that is used by the + OpenEmbedded build system to create a partitioned image + (image\ ``.wic``). For information on how to create a partitioned + image, see the + ":ref:`dev-manual/dev-manual-common-tasks:creating partitioned images using wic`" + section in the Yocto Project Development Tasks Manual. For details on + the kickstart file format, see the ":doc:`../ref-manual/ref-kickstart`" Chapter. + + WORKDIR + The pathname of the work directory in which the OpenEmbedded build + system builds a recipe. This directory is located within the + :term:`TMPDIR` directory structure and is specific to + the recipe being built and the system for which it is being built. + + The ``WORKDIR`` directory is defined as follows: + :: + + ${TMPDIR}/work/${MULTIMACH_TARGET_SYS}/${PN}/${EXTENDPE}${PV}-${PR} + + The actual directory depends on several things: + + - TMPDIR + : The top-level build output directory + - MULTIMACH_TARGET_SYS + : The target system identifier + - PN + : The recipe name + - EXTENDPE + : The epoch - (if + PE + is not specified, which is usually the case for most recipes, then + EXTENDPE + is blank) + - PV + : The recipe version + - PR + : The recipe revision + + As an example, assume a Source Directory top-level folder name + ``poky``, a default Build Directory at ``poky/build``, and a + ``qemux86-poky-linux`` machine target system. Furthermore, suppose + your recipe is named ``foo_1.3.0-r0.bb``. In this case, the work + directory the build system uses to build the package would be as + follows: + :: + + poky/build/tmp/work/qemux86-poky-linux/foo/1.3.0-r0 + + XSERVER + Specifies the packages that should be installed to provide an X + server and drivers for the current machine, assuming your image + directly includes ``packagegroup-core-x11-xserver`` or, perhaps + indirectly, includes "x11-base" in + :term:`IMAGE_FEATURES`. + + The default value of ``XSERVER``, if not specified in the machine + configuration, is "xserver-xorg xf86-video-fbdev xf86-input-evdev". + diff --git a/poky/documentation/ref-manual/ref-varlocality.rst b/poky/documentation/ref-manual/ref-varlocality.rst new file mode 100644 index 000000000..a95504b57 --- /dev/null +++ b/poky/documentation/ref-manual/ref-varlocality.rst @@ -0,0 +1,166 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +**************** +Variable Context +**************** + +While you can use most variables in almost any context such as +``.conf``, ``.bbclass``, ``.inc``, and ``.bb`` files, some variables are +often associated with a particular locality or context. This chapter +describes some common associations. + +.. _ref-varlocality-configuration: + +Configuration +============= + +The following subsections provide lists of variables whose context is +configuration: distribution, machine, and local. + +.. _ref-varlocality-config-distro: + +Distribution (Distro) +--------------------- + +This section lists variables whose configuration context is the +distribution, or distro. + +- :term:`DISTRO` + +- :term:`DISTRO_NAME` + +- :term:`DISTRO_VERSION` + +- :term:`MAINTAINER` + +- :term:`PACKAGE_CLASSES` + +- :term:`TARGET_OS` + +- :term:`TARGET_FPU` + +- :term:`TCMODE` + +- :term:`TCLIBC` + +.. _ref-varlocality-config-machine: + +Machine +------- + +This section lists variables whose configuration context is the machine. + +- :term:`TARGET_ARCH` + +- :term:`SERIAL_CONSOLES` + +- :term:`PACKAGE_EXTRA_ARCHS` + +- :term:`IMAGE_FSTYPES` + +- :term:`MACHINE_FEATURES` + +- :term:`MACHINE_EXTRA_RDEPENDS` + +- :term:`MACHINE_EXTRA_RRECOMMENDS` + +- :term:`MACHINE_ESSENTIAL_EXTRA_RDEPENDS` + +- :term:`MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS` + +.. _ref-varlocality-config-local: + +Local +----- + +This section lists variables whose configuration context is the local +configuration through the ``local.conf`` file. + +- :term:`DISTRO` + +- :term:`MACHINE` + +- :term:`DL_DIR` + +- :term:`BBFILES` + +- :term:`EXTRA_IMAGE_FEATURES` + +- :term:`PACKAGE_CLASSES` + +- :term:`BB_NUMBER_THREADS` + +- :term:`BBINCLUDELOGS` + +- :term:`ENABLE_BINARY_LOCALE_GENERATION` + +.. _ref-varlocality-recipes: + +Recipes +======= + +The following subsections provide lists of variables whose context is +recipes: required, dependencies, path, and extra build information. + +.. _ref-varlocality-recipe-required: + +Required +-------- + +This section lists variables that are required for recipes. + +- :term:`LICENSE` + +- :term:`LIC_FILES_CHKSUM` + +- :term:`SRC_URI` - used in recipes that fetch local or remote files. + +.. _ref-varlocality-recipe-dependencies: + +Dependencies +------------ + +This section lists variables that define recipe dependencies. + +- :term:`DEPENDS` + +- :term:`RDEPENDS` + +- :term:`RRECOMMENDS` + +- :term:`RCONFLICTS` + +- :term:`RREPLACES` + +.. _ref-varlocality-recipe-paths: + +Paths +----- + +This section lists variables that define recipe paths. + +- :term:`WORKDIR` + +- :term:`S` + +- :term:`FILES` + +.. _ref-varlocality-recipe-build: + +Extra Build Information +----------------------- + +This section lists variables that define extra build information for +recipes. + +- :term:`DEFAULT_PREFERENCE` + +- :term:`EXTRA_OECMAKE` + +- :term:`EXTRA_OECONF` + +- :term:`EXTRA_OEMAKE` + +- :term:`PACKAGECONFIG_CONFARGS` + +- :term:`PACKAGES` diff --git a/poky/documentation/ref-manual/resources.rst b/poky/documentation/ref-manual/resources.rst new file mode 100644 index 000000000..2b82b7910 --- /dev/null +++ b/poky/documentation/ref-manual/resources.rst @@ -0,0 +1,197 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +**************************************** +Contributions and Additional Information +**************************************** + +.. _resources-intro: + +Introduction +============ + +The Yocto Project team is happy for people to experiment with the Yocto +Project. A number of places exist to find help if you run into +difficulties or find bugs. This presents information about contributing +and participating in the Yocto Project. + +.. _resources-contributions: + +Contributions +============= + +The Yocto Project gladly accepts contributions. You can submit changes +to the project either by creating and sending pull requests, or by +submitting patches through email. For information on how to do both as +well as information on how to identify the maintainer for each area of +code, see the ":ref:`how-to-submit-a-change`" section in the +Yocto Project Development Tasks Manual. + +.. _resources-bugtracker: + +Yocto Project Bugzilla +====================== + +The Yocto Project uses its own implementation of +:yocto_bugs:`Bugzilla <>` to track defects (bugs). +Implementations of Bugzilla work well for group development because they +track bugs and code changes, can be used to communicate changes and +problems with developers, can be used to submit and review patches, and +can be used to manage quality assurance. + +Sometimes it is helpful to submit, investigate, or track a bug against +the Yocto Project itself (e.g. when discovering an issue with some +component of the build system that acts contrary to the documentation or +your expectations). + +A general procedure and guidelines exist for when you use Bugzilla to +submit a bug. For information on how to use Bugzilla to submit a bug +against the Yocto Project, see the following: + +- The ":ref:`dev-manual/dev-manual-common-tasks:submitting a defect against the yocto project`" + section in the Yocto Project Development Tasks Manual. + +- The Yocto Project :yocto_wiki:`Bugzilla wiki page ` + +For information on Bugzilla in general, see http://www.bugzilla.org/about/. + +.. _resources-mailinglist: + +Mailing lists +============= + +A number of mailing lists maintained by the Yocto Project exist as well +as related OpenEmbedded mailing lists for discussion, patch submission +and announcements. To subscribe to one of the following mailing lists, +click on the appropriate URL in the following list and follow the +instructions: + +- https://lists.yoctoproject.org/g/yocto - General Yocto Project + discussion mailing list. + +- https://lists.openembedded.org/g/openembedded-core - Discussion mailing + list about OpenEmbedded-Core (the core metadata). + +- https://lists.openembedded.org/g/openembedded-devel - Discussion + mailing list about OpenEmbedded. + +- https://lists.openembedded.org/g/bitbake-devel - Discussion mailing + list about the :term:`BitBake` build tool. + +- https://lists.yoctoproject.org/g/poky - Discussion mailing list + about `Poky <#poky>`__. + +- https://lists.yoctoproject.org/g/yocto-announce - Mailing list to + receive official Yocto Project release and milestone announcements. + +For more Yocto Project-related mailing lists, see the +Yocto Project Website +. +.. _resources-irc: + +Internet Relay Chat (IRC) +========================= + +Two IRC channels on freenode are available for the Yocto Project and +Poky discussions: + +- ``#yocto`` + +- ``#poky`` + +.. _resources-links-and-related-documentation: + +Links and Related Documentation +=============================== + +Here is a list of resources you might find helpful: + +- :yocto_home:`The Yocto Project Website <>`\ *:* The home site + for the Yocto Project. + +- :yocto_wiki:`The Yocto Project Main Wiki Page `\ *:* The main wiki page for + the Yocto Project. This page contains information about project + planning, release engineering, QA & automation, a reference site map, + and other resources related to the Yocto Project. + +- `OpenEmbedded `__\ *:* The build system used by the + Yocto Project. This project is the upstream, generic, embedded + distribution from which the Yocto Project derives its build system + (Poky) and to which it contributes. + +- `BitBake `__\ *:* The tool + used to process metadata. + +- :doc:`BitBake User Manual `\ *:* A comprehensive + guide to the BitBake tool. If you want information on BitBake, see + this manual. + +- :doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` *:* This + short document lets you experience building an image using the Yocto + Project without having to understand any concepts or details. + +- :doc:`../overview-manual/overview-manual` *:* This manual provides overview + and conceptual information about the Yocto Project. + +- :doc:`../dev-manual/dev-manual` *:* This manual is a "how-to" guide + that presents procedures useful to both application and system + developers who use the Yocto Project. + +- :doc:`../sdk-manual/sdk-manual` *manual :* This + guide provides information that lets you get going with the standard + or extensible SDK. An SDK, with its cross-development toolchains, + allows you to develop projects inside or outside of the Yocto Project + environment. + +- :doc:`../bsp-guide/bsp` *:* This guide defines the structure + for BSP components. Having a commonly understood structure encourages + standardization. + +- :doc:`../kernel-dev/kernel-dev` *:* This manual describes + how to work with Linux Yocto kernels as well as provides a bit of + conceptual information on the construction of the Yocto Linux kernel + tree. + +- :doc:`../ref-manual/ref-manual` *:* This + manual provides reference material such as variable, task, and class + descriptions. + +- `Yocto Project Mega-Manual `__\ *:* This manual + is simply a single HTML file comprised of the bulk of the Yocto + Project manuals. The Mega-Manual primarily exists as a vehicle by + which you can easily search for phrases and terms used in the Yocto + Project documentation set. + +- :doc:`../profile-manual/profile-manual` *:* This manual presents a set of + common and generally useful tracing and profiling schemes along with + their applications (as appropriate) to each tool. + +- :doc:`../toaster-manual/toaster-manual` *:* This manual + introduces and describes how to set up and use Toaster. Toaster is an + Application Programming Interface (API) and web-based interface to + the :term:`OpenEmbedded Build System`, which uses + BitBake, that reports build information. + +- :yocto_wiki:`FAQ `\ *:* A list of commonly asked + questions and their answers. + +- *Release Notes:* Features, updates and known issues for the current + release of the Yocto Project. To access the Release Notes, go to the + :yocto_home:`Downloads ` page on + the Yocto Project website and click on the "RELEASE INFORMATION" link + for the appropriate release. + +- `Bugzilla `__\ *:* The bug tracking application + the Yocto Project uses. If you find problems with the Yocto Project, + you should report them using this application. + +- :yocto_wiki:`Bugzilla Configuration and Bug Tracking Wiki Page `\ *:* + Information on how to get set up and use the Yocto Project + implementation of Bugzilla for logging and tracking Yocto Project + defects. + +- *Internet Relay Chat (IRC):* Two IRC channels on freenode are + available for Yocto Project and Poky discussions: ``#yocto`` and + ``#poky``, respectively. + +- `Quick EMUlator (QEMU) `__\ *:* An + open-source machine emulator and virtualizer. diff --git a/poky/documentation/releases.rst b/poky/documentation/releases.rst new file mode 100644 index 000000000..49c33b3b5 --- /dev/null +++ b/poky/documentation/releases.rst @@ -0,0 +1,188 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +========================= + Current Release Manuals +========================= + +**************************** +3.1 'dunfell' Release Series +**************************** + +- :yocto_docs:`3.1 Documentation ` +- :yocto_docs:`3.1.1 Documentation ` +- :yocto_docs:`3.1.2 Documentation ` + +========================== + Previous Release Manuals +========================== + +************************* +3.0 'zeus' Release Series +************************* + +- :yocto_docs:`3.0 Documentation ` +- :yocto_docs:`3.0.1 Documentation ` +- :yocto_docs:`3.0.2 Documentation ` +- :yocto_docs:`3.0.3 Documentation ` + +**************************** +2.7 'warrior' Release Series +**************************** + +- :yocto_docs:`2.7 Documentation ` +- :yocto_docs:`2.7.1 Documentation ` +- :yocto_docs:`2.7.2 Documentation ` +- :yocto_docs:`2.7.3 Documentation ` +- :yocto_docs:`2.7.4 Documentation ` + +************************* +2.6 'thud' Release Series +************************* + +- :yocto_docs:`2.6 Documentation ` +- :yocto_docs:`2.6.1 Documentation ` +- :yocto_docs:`2.6.2 Documentation ` +- :yocto_docs:`2.6.3 Documentation ` +- :yocto_docs:`2.6.4 Documentation ` + +************************* +2.5 'sumo' Release Series +************************* + +- :yocto_docs:`2.5 Documentation ` +- :yocto_docs:`2.5.1 Documentation ` +- :yocto_docs:`2.5.2 Documentation ` +- :yocto_docs:`2.5.3 Documentation ` + +************************** +2.4 'rocko' Release Series +************************** + +- :yocto_docs:`2.4 Documentation ` +- :yocto_docs:`2.4.1 Documentation ` +- :yocto_docs:`2.4.2 Documentation ` +- :yocto_docs:`2.4.3 Documentation ` +- :yocto_docs:`2.4.4 Documentation ` + +************************* +2.3 'pyro' Release Series +************************* + +- :yocto_docs:`2.3 Documentation ` +- :yocto_docs:`2.3.1 Documentation ` +- :yocto_docs:`2.3.2 Documentation ` +- :yocto_docs:`2.3.3 Documentation ` +- :yocto_docs:`2.3.4 Documentation ` + +************************** +2.2 'morty' Release Series +************************** + +- :yocto_docs:`2.2 Documentation ` +- :yocto_docs:`2.2.1 Documentation ` +- :yocto_docs:`2.2.2 Documentation ` +- :yocto_docs:`2.2.3 Documentation ` + +**************************** +2.1 'krogoth' Release Series +**************************** + +- :yocto_docs:`2.1 Documentation ` +- :yocto_docs:`2.1.1 Documentation ` +- :yocto_docs:`2.1.2 Documentation ` +- :yocto_docs:`2.1.3 Documentation ` + +*************************** +2.0 'jethro' Release Series +*************************** + +- :yocto_docs:`1.9 Documentation ` +- :yocto_docs:`2.0 Documentation ` +- :yocto_docs:`2.0.1 Documentation ` +- :yocto_docs:`2.0.2 Documentation ` +- :yocto_docs:`2.0.3 Documentation ` + +************************* +1.8 'fido' Release Series +************************* + +- :yocto_docs:`1.8 Documentation ` +- :yocto_docs:`1.8.1 Documentation ` +- :yocto_docs:`1.8.2 Documentation ` + +************************** +1.7 'dizzy' Release Series +************************** + +- :yocto_docs:`1.7 Documentation ` +- :yocto_docs:`1.7.1 Documentation ` +- :yocto_docs:`1.7.2 Documentation ` +- :yocto_docs:`1.7.3 Documentation ` + +************************** +1.6 'daisy' Release Series +************************** + +- :yocto_docs:`1.6 Documentation ` +- :yocto_docs:`1.6.1 Documentation ` +- :yocto_docs:`1.6.2 Documentation ` +- :yocto_docs:`1.6.3 Documentation ` + +************************* +1.5 'dora' Release Series +************************* + +- :yocto_docs:`1.5 Documentation ` +- :yocto_docs:`1.5.1 Documentation ` +- :yocto_docs:`1.5.2 Documentation ` +- :yocto_docs:`1.5.3 Documentation ` +- :yocto_docs:`1.5.4 Documentation ` + +************************** +1.4 'dylan' Release Series +************************** + +- :yocto_docs:`1.4 Documentation ` +- :yocto_docs:`1.4.1 Documentation ` +- :yocto_docs:`1.4.2 Documentation ` +- :yocto_docs:`1.4.3 Documentation ` +- :yocto_docs:`1.4.4 Documentation ` +- :yocto_docs:`1.4.5 Documentation ` + +************************** +1.3 'danny' Release Series +************************** + +- :yocto_docs:`1.3 Documentation ` +- :yocto_docs:`1.3.1 Documentation ` +- :yocto_docs:`1.3.2 Documentation ` + +*************************** +1.2 'denzil' Release Series +*************************** + +- :yocto_docs:`1.2 Documentation ` +- :yocto_docs:`1.2.1 Documentation ` +- :yocto_docs:`1.2.2 Documentation ` + +*************************** +1.1 'edison' Release Series +*************************** + +- :yocto_docs:`1.1 Documentation ` +- :yocto_docs:`1.1.1 Documentation ` +- :yocto_docs:`1.1.2 Documentation ` + +**************************** +1.0 'bernard' Release Series +**************************** + +- :yocto_docs:`1.0 Documentation ` +- :yocto_docs:`1.0.1 Documentation ` +- :yocto_docs:`1.0.2 Documentation ` + +**************************** +0.9 'laverne' Release Series +**************************** + +- :yocto_docs:`0.9 Documentation ` diff --git a/poky/documentation/sdk-manual/history.rst b/poky/documentation/sdk-manual/history.rst new file mode 100644 index 000000000..af027c97f --- /dev/null +++ b/poky/documentation/sdk-manual/history.rst @@ -0,0 +1,40 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 2.1 + - April 2016 + - The initial document released with the Yocto Project 2.1 Release + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/sdk-manual/sdk-appendix-customizing-standard.rst b/poky/documentation/sdk-manual/sdk-appendix-customizing-standard.rst new file mode 100644 index 000000000..f6f2b6640 --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-appendix-customizing-standard.rst @@ -0,0 +1,34 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +**************************** +Customizing the Standard SDK +**************************** + +This appendix presents customizations you can apply to the standard SDK. + +Adding Individual Packages to the Standard SDK +============================================== + +When you build a standard SDK using the ``bitbake -c populate_sdk``, a +default set of packages is included in the resulting SDK. The +:term:`TOOLCHAIN_HOST_TASK` +and +:term:`TOOLCHAIN_TARGET_TASK` +variables control the set of packages adding to the SDK. + +If you want to add individual packages to the toolchain that runs on the +host, simply add those packages to the ``TOOLCHAIN_HOST_TASK`` variable. +Similarly, if you want to add packages to the default set that is part +of the toolchain that runs on the target, add the packages to the +``TOOLCHAIN_TARGET_TASK`` variable. + +Adding API Documentation to the Standard SDK +============================================ + +You can include API documentation as well as any other documentation +provided by recipes with the standard SDK by adding "api-documentation" +to the +:term:`DISTRO_FEATURES` +variable: DISTRO_FEATURES_append = " api-documentation" Setting this +variable as shown here causes the OpenEmbedded build system to build the +documentation and then include it in the standard SDK. diff --git a/poky/documentation/sdk-manual/sdk-appendix-customizing.rst b/poky/documentation/sdk-manual/sdk-appendix-customizing.rst new file mode 100644 index 000000000..7743e3c00 --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-appendix-customizing.rst @@ -0,0 +1,377 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +****************************** +Customizing the Extensible SDK +****************************** + +This appendix describes customizations you can apply to the extensible +SDK. + +Configuring the Extensible SDK +============================== + +The extensible SDK primarily consists of a pre-configured copy of the +OpenEmbedded build system from which it was produced. Thus, the SDK's +configuration is derived using that build system and the filters shown +in the following list. When these filters are present, the OpenEmbedded +build system applies them against ``local.conf`` and ``auto.conf``: + +- Variables whose values start with "/" are excluded since the + assumption is that those values are paths that are likely to be + specific to the :term:`Build Host`. + +- Variables listed in + :term:`SDK_LOCAL_CONF_BLACKLIST` + are excluded. These variables are not allowed through from the + OpenEmbedded build system configuration into the extensible SDK + configuration. Typically, these variables are specific to the machine + on which the build system is running and could be problematic as part + of the extensible SDK configuration. + + For a list of the variables excluded by default, see the + :term:`SDK_LOCAL_CONF_BLACKLIST` + in the glossary of the Yocto Project Reference Manual. + +- Variables listed in + :term:`SDK_LOCAL_CONF_WHITELIST` + are included. Including a variable in the value of + ``SDK_LOCAL_CONF_WHITELIST`` overrides either of the previous two + filters. The default value is blank. + +- Classes inherited globally with + :term:`INHERIT` that are listed in + :term:`SDK_INHERIT_BLACKLIST` + are disabled. Using ``SDK_INHERIT_BLACKLIST`` to disable these + classes is the typical method to disable classes that are problematic + or unnecessary in the SDK context. The default value blacklists the + :ref:`buildhistory ` + and :ref:`icecc ` classes. + +Additionally, the contents of ``conf/sdk-extra.conf``, when present, are +appended to the end of ``conf/local.conf`` within the produced SDK, +without any filtering. The ``sdk-extra.conf`` file is particularly +useful if you want to set a variable value just for the SDK and not the +OpenEmbedded build system used to create the SDK. + +Adjusting the Extensible SDK to Suit Your Build Host's Setup +============================================================ + +In most cases, the extensible SDK defaults should work with your :term:`Build +Host`'s setup. +However, some cases exist for which you might consider making +adjustments: + +- If your SDK configuration inherits additional classes using the + :term:`INHERIT` variable and you + do not need or want those classes enabled in the SDK, you can + blacklist them by adding them to the + :term:`SDK_INHERIT_BLACKLIST` + variable as described in the fourth bullet of the previous section. + + .. note:: + + The default value of + SDK_INHERIT_BLACKLIST + is set using the "?=" operator. Consequently, you will need to + either define the entire list by using the "=" operator, or you + will need to append a value using either "_append" or the "+=" + operator. You can learn more about these operators in the " + Basic Syntax + " section of the BitBake User Manual. + + . + +- If you have classes or recipes that add additional tasks to the + standard build flow (i.e. the tasks execute as the recipe builds as + opposed to being called explicitly), then you need to do one of the + following: + + - After ensuring the tasks are :ref:`shared + state ` tasks (i.e. the + output of the task is saved to and can be restored from the shared + state cache) or ensuring the tasks are able to be produced quickly + from a task that is a shared state task, add the task name to the + value of + :term:`SDK_RECRDEP_TASKS`. + + - Disable the tasks if they are added by a class and you do not need + the functionality the class provides in the extensible SDK. To + disable the tasks, add the class to the ``SDK_INHERIT_BLACKLIST`` + variable as described in the previous section. + +- Generally, you want to have a shared state mirror set up so users of + the SDK can add additional items to the SDK after installation + without needing to build the items from source. See the "`Providing + Additional Installable Extensible SDK + Content <#sdk-providing-additional-installable-extensible-sdk-content>`__" + section for information. + +- If you want users of the SDK to be able to easily update the SDK, you + need to set the + :term:`SDK_UPDATE_URL` + variable. For more information, see the "`Providing Updates to the + Extensible SDK After + Installation <#sdk-providing-updates-to-the-extensible-sdk-after-installation>`__" + section. + +- If you have adjusted the list of files and directories that appear in + :term:`COREBASE` (other than + layers that are enabled through ``bblayers.conf``), then you must + list these files in + :term:`COREBASE_FILES` so + that the files are copied into the SDK. + +- If your OpenEmbedded build system setup uses a different environment + setup script other than + :ref:`structure-core-script`, then you must + set + :term:`OE_INIT_ENV_SCRIPT` + to point to the environment setup script you use. + + .. note:: + + You must also reflect this change in the value used for the + COREBASE_FILES + variable as previously described. + +Changing the Extensible SDK Installer Title +=========================================== + +You can change the displayed title for the SDK installer by setting the +:term:`SDK_TITLE` variable and then +rebuilding the the SDK installer. For information on how to build an SDK +installer, see the "`Building an SDK +Installer <#sdk-building-an-sdk-installer>`__" section. + +By default, this title is derived from +:term:`DISTRO_NAME` when it is +set. If the ``DISTRO_NAME`` variable is not set, the title is derived +from the :term:`DISTRO` variable. + +The +:ref:`populate_sdk_base ` +class defines the default value of the ``SDK_TITLE`` variable as +follows: +:: + + SDK_TITLE ??= "${@d.getVar('DISTRO_NAME') or d.getVar('DISTRO')} SDK" + +While several ways exist to change this variable, an efficient method is +to set the variable in your distribution's configuration file. Doing so +creates an SDK installer title that applies across your distribution. As +an example, assume you have your own layer for your distribution named +"meta-mydistro" and you are using the same type of file hierarchy as +does the default "poky" distribution. If so, you could update the +``SDK_TITLE`` variable in the +``~/meta-mydistro/conf/distro/mydistro.conf`` file using the following +form: +:: + + SDK_TITLE = "your_title" + +Providing Updates to the Extensible SDK After Installation +========================================================== + +When you make changes to your configuration or to the metadata and if +you want those changes to be reflected in installed SDKs, you need to +perform additional steps. These steps make it possible for anyone using +the installed SDKs to update the installed SDKs by using the +``devtool sdk-update`` command: + +1. Create a directory that can be shared over HTTP or HTTPS. You can do + this by setting up a web server such as an `Apache HTTP + Server `__ or + `Nginx `__ server in the cloud + to host the directory. This directory must contain the published SDK. + +2. Set the + :term:`SDK_UPDATE_URL` + variable to point to the corresponding HTTP or HTTPS URL. Setting + this variable causes any SDK built to default to that URL and thus, + the user does not have to pass the URL to the ``devtool sdk-update`` + command as described in the "`Applying Updates to an Installed + Extensible + SDK <#sdk-applying-updates-to-an-installed-extensible-sdk>`__" + section. + +3. Build the extensible SDK normally (i.e., use the + ``bitbake -c populate_sdk_ext`` imagename command). + +4. Publish the SDK using the following command: + :: + + $ oe-publish-sdk some_path/sdk-installer.sh path_to_shared_http_directory + + You must + repeat this step each time you rebuild the SDK with changes that you + want to make available through the update mechanism. + +Completing the above steps allows users of the existing installed SDKs +to simply run ``devtool sdk-update`` to retrieve and apply the latest +updates. See the "`Applying Updates to an Installed Extensible +SDK <#sdk-applying-updates-to-an-installed-extensible-sdk>`__" section +for further information. + +Changing the Default SDK Installation Directory +=============================================== + +When you build the installer for the Extensible SDK, the default +installation directory for the SDK is based on the +:term:`DISTRO` and +:term:`SDKEXTPATH` variables from +within the +:ref:`populate_sdk_base ` +class as follows: +:: + + SDKEXTPATH ??= "~/${@d.getVar('DISTRO')}_sdk" + +You can +change this default installation directory by specifically setting the +``SDKEXTPATH`` variable. + +While a number of ways exist through which you can set this variable, +the method that makes the most sense is to set the variable in your +distribution's configuration file. Doing so creates an SDK installer +default directory that applies across your distribution. As an example, +assume you have your own layer for your distribution named +"meta-mydistro" and you are using the same type of file hierarchy as +does the default "poky" distribution. If so, you could update the +``SDKEXTPATH`` variable in the +``~/meta-mydistro/conf/distro/mydistro.conf`` file using the following +form: +:: + + SDKEXTPATH = "some_path_for_your_installed_sdk" + +After building your installer, running it prompts the user for +acceptance of the some_path_for_your_installed_sdk directory as the +default location to install the Extensible SDK. + +Providing Additional Installable Extensible SDK Content +======================================================= + +If you want the users of an extensible SDK you build to be able to add +items to the SDK without requiring the users to build the items from +source, you need to do a number of things: + +1. Ensure the additional items you want the user to be able to install + are already built: + + - Build the items explicitly. You could use one or more "meta" + recipes that depend on lists of other recipes. + + - Build the "world" target and set + ``EXCLUDE_FROM_WORLD_pn-``\ recipename for the recipes you do not + want built. See the + :term:`EXCLUDE_FROM_WORLD` + variable for additional information. + +2. Expose the ``sstate-cache`` directory produced by the build. + Typically, you expose this directory by making it available through + an `Apache HTTP + Server `__ or + `Nginx `__ server. + +3. Set the appropriate configuration so that the produced SDK knows how + to find the configuration. The variable you need to set is + :term:`SSTATE_MIRRORS`: + :: + + SSTATE_MIRRORS = "file://.* http://example.com/some_path/sstate-cache/PATH" + + You can set the + ``SSTATE_MIRRORS`` variable in two different places: + + - If the mirror value you are setting is appropriate to be set for + both the OpenEmbedded build system that is actually building the + SDK and the SDK itself (i.e. the mirror is accessible in both + places or it will fail quickly on the OpenEmbedded build system + side, and its contents will not interfere with the build), then + you can set the variable in your ``local.conf`` or custom distro + configuration file. You can then "whitelist" the variable through + to the SDK by adding the following: + :: + + SDK_LOCAL_CONF_WHITELIST = "SSTATE_MIRRORS" + + - Alternatively, if you just want to set the ``SSTATE_MIRRORS`` + variable's value for the SDK alone, create a + ``conf/sdk-extra.conf`` file either in your + :term:`Build Directory` or within any + layer and put your ``SSTATE_MIRRORS`` setting within that file. + + .. note:: + + This second option is the safest option should you have any + doubts as to which method to use when setting + SSTATE_MIRRORS + . + +Minimizing the Size of the Extensible SDK Installer Download +============================================================ + +By default, the extensible SDK bundles the shared state artifacts for +everything needed to reconstruct the image for which the SDK was built. +This bundling can lead to an SDK installer file that is a Gigabyte or +more in size. If the size of this file causes a problem, you can build +an SDK that has just enough in it to install and provide access to the +``devtool command`` by setting the following in your configuration: +:: + + SDK_EXT_TYPE = "minimal" + +Setting +:term:`SDK_EXT_TYPE` to +"minimal" produces an SDK installer that is around 35 Mbytes in size, +which downloads and installs quickly. You need to realize, though, that +the minimal installer does not install any libraries or tools out of the +box. These libraries and tools must be installed either "on the fly" or +through actions you perform using ``devtool`` or explicitly with the +``devtool sdk-install`` command. + +In most cases, when building a minimal SDK you need to also enable +bringing in the information on a wider range of packages produced by the +system. Requiring this wider range of information is particularly true +so that ``devtool add`` is able to effectively map dependencies it +discovers in a source tree to the appropriate recipes. Additionally, the +information enables the ``devtool search`` command to return useful +results. + +To facilitate this wider range of information, you would need to set the +following: +:: + + SDK_INCLUDE_PKGDATA = "1" + +See the :term:`SDK_INCLUDE_PKGDATA` variable for additional information. + +Setting the ``SDK_INCLUDE_PKGDATA`` variable as shown causes the "world" +target to be built so that information for all of the recipes included +within it are available. Having these recipes available increases build +time significantly and increases the size of the SDK installer by 30-80 +Mbytes depending on how many recipes are included in your configuration. + +You can use ``EXCLUDE_FROM_WORLD_pn-``\ recipename for recipes you want +to exclude. However, it is assumed that you would need to be building +the "world" target if you want to provide additional items to the SDK. +Consequently, building for "world" should not represent undue overhead +in most cases. + +.. note:: + + If you set + SDK_EXT_TYPE + to "minimal", then providing a shared state mirror is mandatory so + that items can be installed as needed. See the " + Providing Additional Installable Extensible SDK Content + " section for more information. + +You can explicitly control whether or not to include the toolchain when +you build an SDK by setting the +:term:`SDK_INCLUDE_TOOLCHAIN` +variable to "1". In particular, it is useful to include the toolchain +when you have set ``SDK_EXT_TYPE`` to "minimal", which by default, +excludes the toolchain. Also, it is helpful if you are building a small +SDK for use with an IDE or some other tool where you do not want to take +extra steps to install a toolchain. diff --git a/poky/documentation/sdk-manual/sdk-appendix-obtain.rst b/poky/documentation/sdk-manual/sdk-appendix-obtain.rst new file mode 100644 index 000000000..ffaed9dee --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-appendix-obtain.rst @@ -0,0 +1,321 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************** +Obtaining the SDK +***************** + +.. _sdk-locating-pre-built-sdk-installers: + +Locating Pre-Built SDK Installers +================================= + +You can use existing, pre-built toolchains by locating and running an +SDK installer script that ships with the Yocto Project. Using this +method, you select and download an architecture-specific SDK installer +and then run the script to hand-install the toolchain. + +Follow these steps to locate and hand-install the toolchain: + +1. *Go to the Installers Directory:* Go to + :yocto_dl:`releases/yocto/yocto-3.1.2/toolchain/` + +2. *Open the Folder for Your Build Host:* Open the folder that matches + your :term:`Build Host` (i.e. + ``i686`` for 32-bit machines or ``x86_64`` for 64-bit machines). + +3. *Locate and Download the SDK Installer:* You need to find and + download the installer appropriate for your build host, target + hardware, and image type. + + The installer files (``*.sh``) follow this naming convention: + :: + + poky-glibc-host_system-core-image-type-arch-toolchain[-ext]-release.sh + + Where: + host_system is a string representing your development system: + "i686" or "x86_64" + + type is a string representing the image: + "sato" or "minimal" + + arch is a string representing the target architecture: + "aarch64", "armv5e", "core2-64", "coretexa8hf-neon", "i586", "mips32r2", + "mips64", or "ppc7400" + + release is the version of Yocto Project. + + NOTE: + The standard SDK installer does not have the "-ext" string as + part of the filename. + + + The toolchains provided by the Yocto + Project are based off of the ``core-image-sato`` and + ``core-image-minimal`` images and contain libraries appropriate for + developing against those images. + + For example, if your build host is a 64-bit x86 system and you need + an extended SDK for a 64-bit core2 target, go into the ``x86_64`` + folder and download the following installer: + :: + + poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-DISTRO.sh + +4. *Run the Installer:* Be sure you have execution privileges and run + the installer. Following is an example from the ``Downloads`` + directory: + :: + + $ ~/Downloads/poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-DISTRO.sh + + During execution of the script, you choose the root location for the + toolchain. See the "`Installed Standard SDK Directory + Structure <#sdk-installed-standard-sdk-directory-structure>`__" + section and the "`Installed Extensible SDK Directory + Structure <#sdk-installed-extensible-sdk-directory-structure>`__" + section for more information. + +Building an SDK Installer +========================= + +As an alternative to locating and downloading an SDK installer, you can +build the SDK installer. Follow these steps: + +1. *Set Up the Build Environment:* Be sure you are set up to use BitBake + in a shell. See the ":ref:`dev-manual/dev-manual-start:preparing the build host`" section + in the Yocto Project Development Tasks Manual for information on how + to get a build host ready that is either a native Linux machine or a + machine that uses CROPS. + +2. *Clone the ``poky`` Repository:* You need to have a local copy of the + Yocto Project :term:`Source Directory` + (i.e. a local + ``poky`` repository). See the ":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`" and + possibly the ":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" and + ":ref:`checkout-out-by-tag-in-poky`" sections + all in the Yocto Project Development Tasks Manual for information on + how to clone the ``poky`` repository and check out the appropriate + branch for your work. + +3. *Initialize the Build Environment:* While in the root directory of + the Source Directory (i.e. ``poky``), run the + :ref:`structure-core-script` environment + setup script to define the OpenEmbedded build environment on your + build host. + :: + + $ source oe-init-build-env + + Among other things, the script + creates the :term:`Build Directory`, + which is + ``build`` in this case and is located in the Source Directory. After + the script runs, your current working directory is set to the + ``build`` directory. + +4. *Make Sure You Are Building an Installer for the Correct Machine:* + Check to be sure that your + :term:`MACHINE` variable in the + ``local.conf`` file in your Build Directory matches the architecture + for which you are building. + +5. *Make Sure Your SDK Machine is Correctly Set:* If you are building a + toolchain designed to run on an architecture that differs from your + current development host machine (i.e. the build host), be sure that + the :term:`SDKMACHINE` variable + in the ``local.conf`` file in your Build Directory is correctly set. + + .. note:: + + If you are building an SDK installer for the Extensible SDK, the + SDKMACHINE + value must be set for the architecture of the machine you are + using to build the installer. If + SDKMACHINE + is not set appropriately, the build fails and provides an error + message similar to the following: + :: + + The extensible SDK can currently only be built for the same architecture as the machine being built on - SDK_ARCH is + set to i686 (likely via setting SDKMACHINE) which is different from the architecture of the build machine (x86_64). + Unable to continue. + + +6. *Build the SDK Installer:* To build the SDK installer for a standard + SDK and populate the SDK image, use the following command form. Be + sure to replace image with an image (e.g. "core-image-sato"): $ + bitbake image -c populate_sdk You can do the same for the extensible + SDK using this command form: + :: + + $ bitbake image -c populate_sdk_ext + + These commands produce an SDK installer that contains the sysroot + that matches your target root filesystem. + + When the ``bitbake`` command completes, the SDK installer will be in + ``tmp/deploy/sdk`` in the Build Directory. + + .. note:: + + - By default, the previous BitBake command does not build static + binaries. If you want to use the toolchain to build these types + of libraries, you need to be sure your SDK has the appropriate + static development libraries. Use the + :term:`TOOLCHAIN_TARGET_TASK` + variable inside your ``local.conf`` file before building the + SDK installer. Doing so ensures that the eventual SDK + installation process installs the appropriate library packages + as part of the SDK. Following is an example using ``libc`` + static development libraries: TOOLCHAIN_TARGET_TASK_append = " + libc-staticdev" + +7. *Run the Installer:* You can now run the SDK installer from + ``tmp/deploy/sdk`` in the Build Directory. Following is an example: + :: + + $ cd ~/poky/build/tmp/deploy/sdk + $ ./poky-glibc-x86_64-core-image-sato-core2-64-toolchain-ext-DISTRO.sh + + During execution of the script, you choose the root location for the + toolchain. See the "`Installed Standard SDK Directory + Structure <#sdk-installed-standard-sdk-directory-structure>`__" + section and the "`Installed Extensible SDK Directory + Structure <#sdk-installed-extensible-sdk-directory-structure>`__" + section for more information. + +Extracting the Root Filesystem +============================== + +After installing the toolchain, for some use cases you might need to +separately extract a root filesystem: + +- You want to boot the image using NFS. + +- You want to use the root filesystem as the target sysroot. + +- You want to develop your target application using the root filesystem + as the target sysroot. + +Follow these steps to extract the root filesystem: + +1. *Locate and Download the Tarball for the Pre-Built Root Filesystem + Image File:* You need to find and download the root filesystem image + file that is appropriate for your target system. These files are kept + in machine-specific folders in the + :yocto_dl:`Index of Releases ` + in the "machines" directory. + + The machine-specific folders of the "machines" directory contain + tarballs (``*.tar.bz2``) for supported machines. These directories + also contain flattened root filesystem image files (``*.ext4``), + which you can use with QEMU directly. + + The pre-built root filesystem image files follow these naming + conventions: + :: + + core-image-profile-arch.tar.bz2 + + Where: + profile is the filesystem image's profile: + lsb, lsb-dev, lsb-sdk, minimal, minimal-dev, minimal-initramfs, + sato, sato-dev, sato-sdk, sato-sdk-ptest. For information on + these types of image profiles, see the "Images" chapter in + the Yocto Project Reference Manual. + + arch is a string representing the target architecture: + beaglebone-yocto, beaglebone-yocto-lsb, edgerouter, edgerouter-lsb, + genericx86, genericx86-64, genericx86-64-lsb, genericx86-lsb and qemu*. + + The root filesystems + provided by the Yocto Project are based off of the + ``core-image-sato`` and ``core-image-minimal`` images. + + For example, if you plan on using a BeagleBone device as your target + hardware and your image is a ``core-image-sato-sdk`` image, you can + download the following file: + :: + + core-image-sato-sdk-beaglebone-yocto.tar.bz2 + +2. *Initialize the Cross-Development Environment:* You must ``source`` + the cross-development environment setup script to establish necessary + environment variables. + + This script is located in the top-level directory in which you + installed the toolchain (e.g. ``poky_sdk``). + + Following is an example based on the toolchain installed in the + ":ref:`sdk-locating-pre-built-sdk-installers`" section: + :: + + $ source ~/poky_sdk/environment-setup-core2-64-poky-linux + +3. *Extract the Root Filesystem:* Use the ``runqemu-extract-sdk`` + command and provide the root filesystem image. + + Following is an example command that extracts the root filesystem + from a previously built root filesystem image that was downloaded + from the :yocto_dl:`Index of Releases `. + This command extracts the root filesystem into the ``core2-64-sato`` + directory: + :: + + $ runqemu-extract-sdk ~/Downloads/core-image-sato-sdk-beaglebone-yocto.tar.bz2 ~/beaglebone-sato + + You could now point to the target sysroot at ``beablebone-sato``. + +Installed Standard SDK Directory Structure +========================================== + +The following figure shows the resulting directory structure after you +install the Standard SDK by running the ``*.sh`` SDK installation +script: + +.. image:: figures/sdk-installed-standard-sdk-directory.png + :scale: 80% + :align: center + +The installed SDK consists of an environment setup script for the SDK, a +configuration file for the target, a version file for the target, and +the root filesystem (``sysroots``) needed to develop objects for the +target system. + +Within the figure, italicized text is used to indicate replaceable +portions of the file or directory name. For example, install_dir/version +is the directory where the SDK is installed. By default, this directory +is ``/opt/poky/``. And, version represents the specific snapshot of the +SDK (e.g. 3.1.2). Furthermore, target represents the target architecture +(e.g. ``i586``) and host represents the development system's +architecture (e.g. ``x86_64``). Thus, the complete names of the two +directories within the ``sysroots`` could be ``i586-poky-linux`` and +``x86_64-pokysdk-linux`` for the target and host, respectively. + +Installed Extensible SDK Directory Structure +============================================ + +The following figure shows the resulting directory structure after you +install the Extensible SDK by running the ``*.sh`` SDK installation +script: + +.. image:: figures/sdk-installed-extensible-sdk-directory.png + :scale: 80% + :align: center + +The installed directory structure for the extensible SDK is quite +different than the installed structure for the standard SDK. The +extensible SDK does not separate host and target parts in the same +manner as does the standard SDK. The extensible SDK uses an embedded +copy of the OpenEmbedded build system, which has its own sysroots. + +Of note in the directory structure are an environment setup script for +the SDK, a configuration file for the target, a version file for the +target, and log files for the OpenEmbedded build system preparation +script run by the installer and BitBake. + +Within the figure, italicized text is used to indicate replaceable +portions of the file or directory name. For example, install_dir is the +directory where the SDK is installed, which is ``poky_sdk`` by default, +and target represents the target architecture (e.g. ``i586``). diff --git a/poky/documentation/sdk-manual/sdk-extensible.rst b/poky/documentation/sdk-manual/sdk-extensible.rst new file mode 100644 index 000000000..1ad5c46be --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-extensible.rst @@ -0,0 +1,1356 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************************ +Using the Extensible SDK +************************ + +This chapter describes the extensible SDK and how to install it. +Information covers the pieces of the SDK, how to install it, and +presents a look at using the ``devtool`` functionality. The extensible +SDK makes it easy to add new applications and libraries to an image, +modify the source for an existing component, test changes on the target +hardware, and ease integration into the rest of the +:term:`OpenEmbedded Build System`. + +.. note:: + + For a side-by-side comparison of main features supported for an + extensible SDK as compared to a standard SDK, see the " + Introduction + " section. + +In addition to the functionality available through ``devtool``, you can +alternatively make use of the toolchain directly, for example from +Makefile and Autotools. See the "`Using the SDK Toolchain +Directly <#sdk-working-projects>`__" chapter for more information. + +.. _sdk-extensible-sdk-intro: + +Why use the Extensible SDK and What is in It? +============================================= + +The extensible SDK provides a cross-development toolchain and libraries +tailored to the contents of a specific image. You would use the +Extensible SDK if you want a toolchain experience supplemented with the +powerful set of ``devtool`` commands tailored for the Yocto Project +environment. + +The installed extensible SDK consists of several files and directories. +Basically, it contains an SDK environment setup script, some +configuration files, an internal build system, and the ``devtool`` +functionality. + +.. _sdk-installing-the-extensible-sdk: + +Installing the Extensible SDK +============================= + +The first thing you need to do is install the SDK on your :term:`Build +Host` by running the ``*.sh`` installation script. + +You can download a tarball installer, which includes the pre-built +toolchain, the ``runqemu`` script, the internal build system, +``devtool``, and support files from the appropriate +:yocto_dl:`toolchain ` directory within the Index of +Releases. Toolchains are available for several 32-bit and 64-bit +architectures with the ``x86_64`` directories, respectively. The +toolchains the Yocto Project provides are based off the +``core-image-sato`` and ``core-image-minimal`` images and contain +libraries appropriate for developing against that image. + +The names of the tarball installer scripts are such that a string +representing the host system appears first in the filename and then is +immediately followed by a string representing the target architecture. +An extensible SDK has the string "-ext" as part of the name. Following +is the general form: +:: + + poky-glibc-host_system-image_type-arch-toolchain-ext-release_version.sh + + Where: + host_system is a string representing your development system: + + i686 or x86_64. + + image_type is the image for which the SDK was built: + + core-image-sato or core-image-minimal + + arch is a string representing the tuned target architecture: + + aarch64, armv5e, core2-64, i586, mips32r2, mips64, ppc7400, or cortexa8hf-neon + + release_version is a string representing the release number of the Yocto Project: + + 3.1.2, 3.1.2+snapshot + +For example, the following SDK installer is for a 64-bit +development host system and a i586-tuned target architecture based off +the SDK for ``core-image-sato`` and using the current DISTRO snapshot: +:: + + poky-glibc-x86_64-core-image-sato-i586-toolchain-ext-DISTRO.sh + +.. note:: + + As an alternative to downloading an SDK, you can build the SDK + installer. For information on building the installer, see the " + Building an SDK Installer + " section. + +The SDK and toolchains are self-contained and by default are installed +into the ``poky_sdk`` folder in your home directory. You can choose to +install the extensible SDK in any location when you run the installer. +However, because files need to be written under that directory during +the normal course of operation, the location you choose for installation +must be writable for whichever users need to use the SDK. + +The following command shows how to run the installer given a toolchain +tarball for a 64-bit x86 development host system and a 64-bit x86 target +architecture. The example assumes the SDK installer is located in +``~/Downloads/`` and has execution rights. + +.. note:: + + If you do not have write permissions for the directory into which you + are installing the SDK, the installer notifies you and exits. For + that case, set up the proper permissions in the directory and run the + installer again. + +:: + + $ ./Downloads/poky-glibc-x86_64-core-image-minimal-core2-64-toolchain-ext-2.5.sh + Poky (Yocto Project Reference Distro) Extensible SDK installer version 2.5 + ========================================================================== + Enter target directory for SDK (default: ~/poky_sdk): + You are about to install the SDK to "/home/scottrif/poky_sdk". Proceed [Y/n]? Y + Extracting SDK..............done + Setting it up... + Extracting buildtools... + Preparing build system... + Parsing recipes: 100% |##################################################################| Time: 0:00:52 + Initialising tasks: 100% |###############################################################| Time: 0:00:00 + Checking sstate mirror object availability: 100% |#######################################| Time: 0:00:00 + Loading cache: 100% |####################################################################| Time: 0:00:00 + Initialising tasks: 100% |###############################################################| Time: 0:00:00 + done + SDK has been successfully set up and is ready to be used. + Each time you wish to use the SDK in a new shell session, you need to source the environment setup script e.g. + $ . /home/scottrif/poky_sdk/environment-setup-core2-64-poky-linux + +.. _sdk-running-the-extensible-sdk-environment-setup-script: + +Running the Extensible SDK Environment Setup Script +=================================================== + +Once you have the SDK installed, you must run the SDK environment setup +script before you can actually use the SDK. This setup script resides in +the directory you chose when you installed the SDK, which is either the +default ``poky_sdk`` directory or the directory you chose during +installation. + +Before running the script, be sure it is the one that matches the +architecture for which you are developing. Environment setup scripts +begin with the string "``environment-setup``" and include as part of +their name the tuned target architecture. As an example, the following +commands set the working directory to where the SDK was installed and +then source the environment setup script. In this example, the setup +script is for an IA-based target machine using i586 tuning: +:: + + $ cd /home/scottrif/poky_sdk + $ source environment-setup-core2-64-poky-linux + SDK environment now set up; additionally you may now run devtool to perform development tasks. + Run devtool --help for further details. + +Running the setup script defines many environment variables needed in +order to use the SDK (e.g. ``PATH``, +:term:`CC`, +:term:`LD`, and so forth). If you want to +see all the environment variables the script exports, examine the +installation file itself. + +Using ``devtool`` in Your SDK Workflow +====================================== + +The cornerstone of the extensible SDK is a command-line tool called +``devtool``. This tool provides a number of features that help you +build, test and package software within the extensible SDK, and +optionally integrate it into an image built by the OpenEmbedded build +system. + +.. note:: + + The use of + devtool + is not limited to the extensible SDK. You can use + devtool + to help you easily develop any project whose build output must be + part of an image built using the build system. + +The ``devtool`` command line is organized similarly to +:ref:`overview-manual/overview-manual-development-environment:git` in that it has a number of +sub-commands for each function. You can run ``devtool --help`` to see +all the commands. + +.. note:: + + See the " + devtool +  Quick Reference + " in the Yocto Project Reference Manual for a + devtool + quick reference. + +Three ``devtool`` subcommands exist that provide entry-points into +development: + +- *devtool add*: Assists in adding new software to be built. + +- *devtool modify*: Sets up an environment to enable you to modify + the source of an existing component. + +- *devtool upgrade*: Updates an existing recipe so that you can + build it for an updated set of source files. + +As with the build system, "recipes" represent software packages within +``devtool``. When you use ``devtool add``, a recipe is automatically +created. When you use ``devtool modify``, the specified existing recipe +is used in order to determine where to get the source code and how to +patch it. In both cases, an environment is set up so that when you build +the recipe a source tree that is under your control is used in order to +allow you to make changes to the source as desired. By default, new +recipes and the source go into a "workspace" directory under the SDK. + +The remainder of this section presents the ``devtool add``, +``devtool modify``, and ``devtool upgrade`` workflows. + +.. _sdk-use-devtool-to-add-an-application: + +Use ``devtool add`` to Add an Application +----------------------------------------- + +The ``devtool add`` command generates a new recipe based on existing +source code. This command takes advantage of the +:ref:`devtool-the-workspace-layer-structure` +layer that many ``devtool`` commands use. The command is flexible enough +to allow you to extract source code into both the workspace or a +separate local Git repository and to use existing code that does not +need to be extracted. + +Depending on your particular scenario, the arguments and options you use +with ``devtool add`` form different combinations. The following diagram +shows common development flows you would use with the ``devtool add`` +command: + +.. image:: figures/sdk-devtool-add-flow.png + :align: center + +1. *Generating the New Recipe*: The top part of the flow shows three + scenarios by which you could use ``devtool add`` to generate a recipe + based on existing source code. + + In a shared development environment, it is typical for other + developers to be responsible for various areas of source code. As a + developer, you are probably interested in using that source code as + part of your development within the Yocto Project. All you need is + access to the code, a recipe, and a controlled area in which to do + your work. + + Within the diagram, three possible scenarios feed into the + ``devtool add`` workflow: + + - *Left*: The left scenario in the figure represents a common + situation where the source code does not exist locally and needs + to be extracted. In this situation, the source code is extracted + to the default workspace - you do not want the files in some + specific location outside of the workspace. Thus, everything you + need will be located in the workspace: + :: + + $ devtool add recipe fetchuri + + With this command, ``devtool`` extracts the upstream + source files into a local Git repository within the ``sources`` + folder. The command then creates a recipe named recipe and a + corresponding append file in the workspace. If you do not provide + recipe, the command makes an attempt to determine the recipe name. + + - *Middle*: The middle scenario in the figure also represents a + situation where the source code does not exist locally. In this + case, the code is again upstream and needs to be extracted to some + local area - this time outside of the default workspace. + + .. note:: + + If required, + devtool + always creates a Git repository locally during the extraction. + + Furthermore, the first positional argument srctree in this case + identifies where the ``devtool add`` command will locate the + extracted code outside of the workspace. You need to specify an + empty directory: + :: + + $ devtool add recipe srctree fetchuri + + In summary, + the source code is pulled from fetchuri and extracted into the + location defined by srctree as a local Git repository. + + Within workspace, ``devtool`` creates a recipe named recipe along + with an associated append file. + + - *Right*: The right scenario in the figure represents a situation + where the srctree has been previously prepared outside of the + ``devtool`` workspace. + + The following command provides a new recipe name and identifies + the existing source tree location: + :: + + $ devtool add recipe srctree + + The command examines the source code and creates a recipe named + recipe for the code and places the recipe into the workspace. + + Because the extracted source code already exists, ``devtool`` does + not try to relocate the source code into the workspace - only the + new recipe is placed in the workspace. + + Aside from a recipe folder, the command also creates an associated + append folder and places an initial ``*.bbappend`` file within. + +2. *Edit the Recipe*: You can use ``devtool edit-recipe`` to open up the + editor as defined by the ``$EDITOR`` environment variable and modify + the file: + :: + + $ devtool edit-recipe recipe + + From within the editor, you + can make modifications to the recipe that take affect when you build + it later. + +3. *Build the Recipe or Rebuild the Image*: The next step you take + depends on what you are going to do with the new code. + + If you need to eventually move the build output to the target + hardware, use the following ``devtool`` command: + :; + + $ devtool build recipe + + On the other hand, if you want an image to contain the recipe's + packages from the workspace for immediate deployment onto a device + (e.g. for testing purposes), you can use the ``devtool build-image`` + command: + :: + + $ devtool build-image image + +4. *Deploy the Build Output*: When you use the ``devtool build`` command + to build out your recipe, you probably want to see if the resulting + build output works as expected on the target hardware. + + .. note:: + + This step assumes you have a previously built image that is + already either running in QEMU or is running on actual hardware. + Also, it is assumed that for deployment of the image to the + target, SSH is installed in the image and, if the image is running + on real hardware, you have network access to and from your + development machine. + + You can deploy your build output to that target hardware by using the + ``devtool deploy-target`` command: $ devtool deploy-target recipe + target The target is a live target machine running as an SSH server. + + You can, of course, also deploy the image you build to actual + hardware by using the ``devtool build-image`` command. However, + ``devtool`` does not provide a specific command that allows you to + deploy the image to actual hardware. + +5. *Finish Your Work With the Recipe*: The ``devtool finish`` command + creates any patches corresponding to commits in the local Git + repository, moves the new recipe to a more permanent layer, and then + resets the recipe so that the recipe is built normally rather than + from the workspace. + :: + + $ devtool finish recipe layer + + .. note:: + + Any changes you want to turn into patches must be committed to the + Git repository in the source tree. + + As mentioned, the ``devtool finish`` command moves the final recipe + to its permanent layer. + + As a final process of the ``devtool finish`` command, the state of + the standard layers and the upstream source is restored so that you + can build the recipe from those areas rather than the workspace. + + .. note:: + + You can use the + devtool reset + command to put things back should you decide you do not want to + proceed with your work. If you do use this command, realize that + the source tree is preserved. + +.. _sdk-devtool-use-devtool-modify-to-modify-the-source-of-an-existing-component: + +Use ``devtool modify`` to Modify the Source of an Existing Component +-------------------------------------------------------------------- + +The ``devtool modify`` command prepares the way to work on existing code +that already has a local recipe in place that is used to build the +software. The command is flexible enough to allow you to extract code +from an upstream source, specify the existing recipe, and keep track of +and gather any patch files from other developers that are associated +with the code. + +Depending on your particular scenario, the arguments and options you use +with ``devtool modify`` form different combinations. The following +diagram shows common development flows for the ``devtool modify`` +command: + +.. image:: figures/sdk-devtool-modify-flow.png + :align: center + +1. *Preparing to Modify the Code*: The top part of the flow shows three + scenarios by which you could use ``devtool modify`` to prepare to + work on source files. Each scenario assumes the following: + + - The recipe exists locally in a layer external to the ``devtool`` + workspace. + + - The source files exist either upstream in an un-extracted state or + locally in a previously extracted state. + + The typical situation is where another developer has created a layer + for use with the Yocto Project and their recipe already resides in + that layer. Furthermore, their source code is readily available + either upstream or locally. + + - *Left*: The left scenario in the figure represents a common + situation where the source code does not exist locally and it + needs to be extracted from an upstream source. In this situation, + the source is extracted into the default ``devtool`` workspace + location. The recipe, in this scenario, is in its own layer + outside the workspace (i.e. ``meta-``\ layername). + + The following command identifies the recipe and, by default, + extracts the source files: + :: + + $ devtool modify recipe + + Once + ``devtool``\ locates the recipe, ``devtool`` uses the recipe's + :term:`SRC_URI` statements to + locate the source code and any local patch files from other + developers. + + With this scenario, no srctree argument exists. Consequently, the + default behavior of the ``devtool modify`` command is to extract + the source files pointed to by the ``SRC_URI`` statements into a + local Git structure. Furthermore, the location for the extracted + source is the default area within the ``devtool`` workspace. The + result is that the command sets up both the source code and an + append file within the workspace while the recipe remains in its + original location. + + Additionally, if you have any non-patch local files (i.e. files + referred to with ``file://`` entries in ``SRC_URI`` statement + excluding ``*.patch/`` or ``*.diff``), these files are copied to + an ``oe-local-files`` folder under the newly created source tree. + Copying the files here gives you a convenient area from which you + can modify the files. Any changes or additions you make to those + files are incorporated into the build the next time you build the + software just as are other changes you might have made to the + source. + + - *Middle*: The middle scenario in the figure represents a situation + where the source code also does not exist locally. In this case, + the code is again upstream and needs to be extracted to some local + area as a Git repository. The recipe, in this scenario, is again + local and in its own layer outside the workspace. + + The following command tells ``devtool`` the recipe with which to + work and, in this case, identifies a local area for the extracted + source files that exists outside of the default ``devtool`` + workspace: + :: + + $ devtool modify recipe srctree + + .. note:: + + You cannot provide a URL for + srctree + using the + devtool + command. + + As with all extractions, the command uses the recipe's ``SRC_URI`` + statements to locate the source files and any associated patch + files. Non-patch files are copied to an ``oe-local-files`` folder + under the newly created source tree. + + Once the files are located, the command by default extracts them + into srctree. + + Within workspace, ``devtool`` creates an append file for the + recipe. The recipe remains in its original location but the source + files are extracted to the location you provide with srctree. + + - *Right*: The right scenario in the figure represents a situation + where the source tree (srctree) already exists locally as a + previously extracted Git structure outside of the ``devtool`` + workspace. In this example, the recipe also exists elsewhere + locally in its own layer. + + The following command tells ``devtool`` the recipe with which to + work, uses the "-n" option to indicate source does not need to be + extracted, and uses srctree to point to the previously extracted + source files: + :: + + $ devtool modify -n recipe srctree + + If an ``oe-local-files`` subdirectory happens to exist and it + contains non-patch files, the files are used. However, if the + subdirectory does not exist and you run the ``devtool finish`` + command, any non-patch files that might exist next to the recipe + are removed because it appears to ``devtool`` that you have + deleted those files. + + Once the ``devtool modify`` command finishes, it creates only an + append file for the recipe in the ``devtool`` workspace. The + recipe and the source code remain in their original locations. + +2. *Edit the Source*: Once you have used the ``devtool modify`` command, + you are free to make changes to the source files. You can use any + editor you like to make and save your source code modifications. + +3. *Build the Recipe or Rebuild the Image*: The next step you take + depends on what you are going to do with the new code. + + If you need to eventually move the build output to the target + hardware, use the following ``devtool`` command: + :: + + $ devtool build recipe + + On the other hand, if you want an image to contain the recipe's + packages from the workspace for immediate deployment onto a device + (e.g. for testing purposes), you can use the ``devtool build-image`` + command: $ devtool build-image image + +4. *Deploy the Build Output*: When you use the ``devtool build`` command + to build out your recipe, you probably want to see if the resulting + build output works as expected on target hardware. + + .. note:: + + This step assumes you have a previously built image that is + already either running in QEMU or running on actual hardware. + Also, it is assumed that for deployment of the image to the + target, SSH is installed in the image and if the image is running + on real hardware that you have network access to and from your + development machine. + + You can deploy your build output to that target hardware by using the + ``devtool deploy-target`` command: + :: + + $ devtool deploy-target recipe target + + The target is a live target machine running as an SSH server. + + You can, of course, use other methods to deploy the image you built + using the ``devtool build-image`` command to actual hardware. + ``devtool`` does not provide a specific command to deploy the image + to actual hardware. + +5. *Finish Your Work With the Recipe*: The ``devtool finish`` command + creates any patches corresponding to commits in the local Git + repository, updates the recipe to point to them (or creates a + ``.bbappend`` file to do so, depending on the specified destination + layer), and then resets the recipe so that the recipe is built + normally rather than from the workspace. + :: + + $ devtool finish recipe layer + + .. note:: + + Any changes you want to turn into patches must be staged and + committed within the local Git repository before you use the + devtool finish + command. + + Because there is no need to move the recipe, ``devtool finish`` + either updates the original recipe in the original layer or the + command creates a ``.bbappend`` file in a different layer as provided + by layer. Any work you did in the ``oe-local-files`` directory is + preserved in the original files next to the recipe during the + ``devtool finish`` command. + + As a final process of the ``devtool finish`` command, the state of + the standard layers and the upstream source is restored so that you + can build the recipe from those areas rather than from the workspace. + + .. note:: + + You can use the + devtool reset + command to put things back should you decide you do not want to + proceed with your work. If you do use this command, realize that + the source tree is preserved. + +.. _sdk-devtool-use-devtool-upgrade-to-create-a-version-of-the-recipe-that-supports-a-newer-version-of-the-software: + +Use ``devtool upgrade`` to Create a Version of the Recipe that Supports a Newer Version of the Software +------------------------------------------------------------------------------------------------------- + +The ``devtool upgrade`` command upgrades an existing recipe to that of a +more up-to-date version found upstream. Throughout the life of software, +recipes continually undergo version upgrades by their upstream +publishers. You can use the ``devtool upgrade`` workflow to make sure +your recipes you are using for builds are up-to-date with their upstream +counterparts. + +.. note:: + + Several methods exist by which you can upgrade recipes - + devtool upgrade + happens to be one. You can read about all the methods by which you + can upgrade recipes in the " + Upgrading Recipes + " section of the Yocto Project Development Tasks Manual. + +The ``devtool upgrade`` command is flexible enough to allow you to +specify source code revision and versioning schemes, extract code into +or out of the ``devtool`` +:ref:`devtool-the-workspace-layer-structure`, +and work with any source file forms that the +:ref:`fetchers ` support. + +The following diagram shows the common development flow used with the +``devtool upgrade`` command: + +.. image:: figures/sdk-devtool-upgrade-flow.png + :align: center + +1. *Initiate the Upgrade*: The top part of the flow shows the typical + scenario by which you use the ``devtool upgrade`` command. The + following conditions exist: + + - The recipe exists in a local layer external to the ``devtool`` + workspace. + + - The source files for the new release exist in the same location + pointed to by :term:`SRC_URI` + in the recipe (e.g. a tarball with the new version number in the + name, or as a different revision in the upstream Git repository). + + A common situation is where third-party software has undergone a + revision so that it has been upgraded. The recipe you have access to + is likely in your own layer. Thus, you need to upgrade the recipe to + use the newer version of the software: + :: + + $ devtool upgrade -V version recipe + + By default, the ``devtool upgrade`` command extracts source + code into the ``sources`` directory in the + :ref:`devtool-the-workspace-layer-structure`. + If you want the code extracted to any other location, you need to + provide the srctree positional argument with the command as follows: + $ devtool upgrade -V version recipe srctree + + .. note:: + + In this example, the "-V" option specifies the new version. If you + don't use "-V", the command upgrades the recipe to the latest + version. + + If the source files pointed to by the ``SRC_URI`` statement in the + recipe are in a Git repository, you must provide the "-S" option and + specify a revision for the software. + + Once ``devtool`` locates the recipe, it uses the ``SRC_URI`` variable + to locate the source code and any local patch files from other + developers. The result is that the command sets up the source code, + the new version of the recipe, and an append file all within the + workspace. + + Additionally, if you have any non-patch local files (i.e. files + referred to with ``file://`` entries in ``SRC_URI`` statement + excluding ``*.patch/`` or ``*.diff``), these files are copied to an + ``oe-local-files`` folder under the newly created source tree. + Copying the files here gives you a convenient area from which you can + modify the files. Any changes or additions you make to those files + are incorporated into the build the next time you build the software + just as are other changes you might have made to the source. + +2. *Resolve any Conflicts created by the Upgrade*: Conflicts could exist + due to the software being upgraded to a new version. Conflicts occur + if your recipe specifies some patch files in ``SRC_URI`` that + conflict with changes made in the new version of the software. For + such cases, you need to resolve the conflicts by editing the source + and following the normal ``git rebase`` conflict resolution process. + + Before moving onto the next step, be sure to resolve any such + conflicts created through use of a newer or different version of the + software. + +3. *Build the Recipe or Rebuild the Image*: The next step you take + depends on what you are going to do with the new code. + + If you need to eventually move the build output to the target + hardware, use the following ``devtool`` command: + :: + + $ devtool build recipe + + On the other hand, if you want an image to contain the recipe's + packages from the workspace for immediate deployment onto a device + (e.g. for testing purposes), you can use the ``devtool build-image`` + command: + :: + + $ devtool build-image image + +4. *Deploy the Build Output*: When you use the ``devtool build`` command + or ``bitbake`` to build your recipe, you probably want to see if the + resulting build output works as expected on target hardware. + + .. note:: + + This step assumes you have a previously built image that is + already either running in QEMU or running on actual hardware. + Also, it is assumed that for deployment of the image to the + target, SSH is installed in the image and if the image is running + on real hardware that you have network access to and from your + development machine. + + You can deploy your build output to that target hardware by using the + ``devtool deploy-target`` command: $ devtool deploy-target recipe + target The target is a live target machine running as an SSH server. + + You can, of course, also deploy the image you build using the + ``devtool build-image`` command to actual hardware. However, + ``devtool`` does not provide a specific command that allows you to do + this. + +5. *Finish Your Work With the Recipe*: The ``devtool finish`` command + creates any patches corresponding to commits in the local Git + repository, moves the new recipe to a more permanent layer, and then + resets the recipe so that the recipe is built normally rather than + from the workspace. + + Any work you did in the ``oe-local-files`` directory is preserved in + the original files next to the recipe during the ``devtool finish`` + command. + + If you specify a destination layer that is the same as the original + source, then the old version of the recipe and associated files are + removed prior to adding the new version. + :: + + $ devtool finish recipe layer + + .. note:: + + Any changes you want to turn into patches must be committed to the + Git repository in the source tree. + + As a final process of the ``devtool finish`` command, the state of + the standard layers and the upstream source is restored so that you + can build the recipe from those areas rather than the workspace. + + .. note:: + + You can use the + devtool reset + command to put things back should you decide you do not want to + proceed with your work. If you do use this command, realize that + the source tree is preserved. + +.. _sdk-a-closer-look-at-devtool-add: + +A Closer Look at ``devtool add`` +================================ + +The ``devtool add`` command automatically creates a recipe based on the +source tree you provide with the command. Currently, the command has +support for the following: + +- Autotools (``autoconf`` and ``automake``) + +- CMake + +- Scons + +- ``qmake`` + +- Plain ``Makefile`` + +- Out-of-tree kernel module + +- Binary package (i.e. "-b" option) + +- Node.js module + +- Python modules that use ``setuptools`` or ``distutils`` + +Apart from binary packages, the determination of how a source tree +should be treated is automatic based on the files present within that +source tree. For example, if a ``CMakeLists.txt`` file is found, then +the source tree is assumed to be using CMake and is treated accordingly. + +.. note:: + + In most cases, you need to edit the automatically generated recipe in + order to make it build properly. Typically, you would go through + several edit and build cycles until the recipe successfully builds. + Once the recipe builds, you could use possible further iterations to + test the recipe on the target device. + +The remainder of this section covers specifics regarding how parts of +the recipe are generated. + +.. _sdk-name-and-version: + +Name and Version +---------------- + +If you do not specify a name and version on the command line, +``devtool add`` uses various metadata within the source tree in an +attempt to determine the name and version of the software being built. +Based on what the tool determines, ``devtool`` sets the name of the +created recipe file accordingly. + +If ``devtool`` cannot determine the name and version, the command prints +an error. For such cases, you must re-run the command and provide the +name and version, just the name, or just the version as part of the +command line. + +Sometimes the name or version determined from the source tree might be +incorrect. For such a case, you must reset the recipe: +:: + + $ devtool reset -n recipename + +After running the ``devtool reset`` command, you need to +run ``devtool add`` again and provide the name or the version. + +.. _sdk-dependency-detection-and-mapping: + +Dependency Detection and Mapping +-------------------------------- + +The ``devtool add`` command attempts to detect build-time dependencies +and map them to other recipes in the system. During this mapping, the +command fills in the names of those recipes as part of the +:term:`DEPENDS` variable within the +recipe. If a dependency cannot be mapped, ``devtool`` places a comment +in the recipe indicating such. The inability to map a dependency can +result from naming not being recognized or because the dependency simply +is not available. For cases where the dependency is not available, you +must use the ``devtool add`` command to add an additional recipe that +satisfies the dependency. Once you add that recipe, you need to update +the ``DEPENDS`` variable in the original recipe to include the new +recipe. + +If you need to add runtime dependencies, you can do so by adding the +following to your recipe: +:: + + RDEPENDS_${PN} += "dependency1 dependency2 ..." + +.. note:: + + The + devtool add + command often cannot distinguish between mandatory and optional + dependencies. Consequently, some of the detected dependencies might + in fact be optional. When in doubt, consult the documentation or the + configure script for the software the recipe is building for further + details. In some cases, you might find you can substitute the + dependency with an option that disables the associated functionality + passed to the configure script. + +.. _sdk-license-detection: + +License Detection +----------------- + +The ``devtool add`` command attempts to determine if the software you +are adding is able to be distributed under a common, open-source +license. If so, the command sets the +:term:`LICENSE` value accordingly. +You should double-check the value added by the command against the +documentation or source files for the software you are building and, if +necessary, update that ``LICENSE`` value. + +The ``devtool add`` command also sets the +:term:`LIC_FILES_CHKSUM` +value to point to all files that appear to be license-related. Realize +that license statements often appear in comments at the top of source +files or within the documentation. In such cases, the command does not +recognize those license statements. Consequently, you might need to +amend the ``LIC_FILES_CHKSUM`` variable to point to one or more of those +comments if present. Setting ``LIC_FILES_CHKSUM`` is particularly +important for third-party software. The mechanism attempts to ensure +correct licensing should you upgrade the recipe to a newer upstream +version in future. Any change in licensing is detected and you receive +an error prompting you to check the license text again. + +If the ``devtool add`` command cannot determine licensing information, +``devtool`` sets the ``LICENSE`` value to "CLOSED" and leaves the +``LIC_FILES_CHKSUM`` value unset. This behavior allows you to continue +with development even though the settings are unlikely to be correct in +all cases. You should check the documentation or source files for the +software you are building to determine the actual license. + +.. _sdk-adding-makefile-only-software: + +Adding Makefile-Only Software +----------------------------- + +The use of Make by itself is very common in both proprietary and +open-source software. Unfortunately, Makefiles are often not written +with cross-compilation in mind. Thus, ``devtool add`` often cannot do +very much to ensure that these Makefiles build correctly. It is very +common, for example, to explicitly call ``gcc`` instead of using the +:term:`CC` variable. Usually, in a +cross-compilation environment, ``gcc`` is the compiler for the build +host and the cross-compiler is named something similar to +``arm-poky-linux-gnueabi-gcc`` and might require arguments (e.g. to +point to the associated sysroot for the target machine). + +When writing a recipe for Makefile-only software, keep the following in +mind: + +- You probably need to patch the Makefile to use variables instead of + hardcoding tools within the toolchain such as ``gcc`` and ``g++``. + +- The environment in which Make runs is set up with various standard + variables for compilation (e.g. ``CC``, ``CXX``, and so forth) in a + similar manner to the environment set up by the SDK's environment + setup script. One easy way to see these variables is to run the + ``devtool build`` command on the recipe and then look in + ``oe-logs/run.do_compile``. Towards the top of this file, a list of + environment variables exists that are being set. You can take + advantage of these variables within the Makefile. + +- If the Makefile sets a default for a variable using "=", that default + overrides the value set in the environment, which is usually not + desirable. For this case, you can either patch the Makefile so it + sets the default using the "?=" operator, or you can alternatively + force the value on the ``make`` command line. To force the value on + the command line, add the variable setting to + :term:`EXTRA_OEMAKE` or + :term:`PACKAGECONFIG_CONFARGS` + within the recipe. Here is an example using ``EXTRA_OEMAKE``: + :: + + EXTRA_OEMAKE += "'CC=${CC}' 'CXX=${CXX}'" + + In the above example, + single quotes are used around the variable settings as the values are + likely to contain spaces because required default options are passed + to the compiler. + +- Hardcoding paths inside Makefiles is often problematic in a + cross-compilation environment. This is particularly true because + those hardcoded paths often point to locations on the build host and + thus will either be read-only or will introduce contamination into + the cross-compilation because they are specific to the build host + rather than the target. Patching the Makefile to use prefix variables + or other path variables is usually the way to handle this situation. + +- Sometimes a Makefile runs target-specific commands such as + ``ldconfig``. For such cases, you might be able to apply patches that + remove these commands from the Makefile. + +.. _sdk-adding-native-tools: + +Adding Native Tools +------------------- + +Often, you need to build additional tools that run on the :term:`Build +Host` as opposed to +the target. You should indicate this requirement by using one of the +following methods when you run ``devtool add``: + +- Specify the name of the recipe such that it ends with "-native". + Specifying the name like this produces a recipe that only builds for + the build host. + +- Specify the "DASHDASHalso-native" option with the ``devtool add`` + command. Specifying this option creates a recipe file that still + builds for the target but also creates a variant with a "-native" + suffix that builds for the build host. + +.. note:: + + If you need to add a tool that is shipped as part of a source tree + that builds code for the target, you can typically accomplish this by + building the native and target parts separately rather than within + the same compilation process. Realize though that with the + "DASHDASHalso-native" option, you can add the tool using just one + recipe file. + +.. _sdk-adding-node-js-modules: + +Adding Node.js Modules +---------------------- + +You can use the ``devtool add`` command two different ways to add +Node.js modules: 1) Through ``npm`` and, 2) from a repository or local +source. + +Use the following form to add Node.js modules through ``npm``: +:: + + $ devtool add "npm://registry.npmjs.org;name=forever;version=0.15.1" + +The name and +version parameters are mandatory. Lockdown and shrinkwrap files are +generated and pointed to by the recipe in order to freeze the version +that is fetched for the dependencies according to the first time. This +also saves checksums that are verified on future fetches. Together, +these behaviors ensure the reproducibility and integrity of the build. + +.. note:: + + - You must use quotes around the URL. The ``devtool add`` does not + require the quotes, but the shell considers ";" as a splitter + between multiple commands. Thus, without the quotes, + ``devtool add`` does not receive the other parts, which results in + several "command not found" errors. + + - In order to support adding Node.js modules, a ``nodejs`` recipe + must be part of your SDK. + +As mentioned earlier, you can also add Node.js modules directly from a +repository or local source tree. To add modules this way, use +``devtool add`` in the following form: +:: + + $ devtool add https://github.com/diversario/node-ssdp + +In this example, ``devtool`` +fetches the specified Git repository, detects the code as Node.js code, +fetches dependencies using ``npm``, and sets +:term:`SRC_URI` accordingly. + +.. _sdk-working-with-recipes: + +Working With Recipes +==================== + +When building a recipe using the ``devtool build`` command, the typical +build progresses as follows: + +1. Fetch the source + +2. Unpack the source + +3. Configure the source + +4. Compile the source + +5. Install the build output + +6. Package the installed output + +For recipes in the workspace, fetching and unpacking is disabled as the +source tree has already been prepared and is persistent. Each of these +build steps is defined as a function (task), usually with a "do\_" prefix +(e.g. :ref:`ref-tasks-fetch`, +:ref:`ref-tasks-unpack`, and so +forth). These functions are typically shell scripts but can instead be +written in Python. + +If you look at the contents of a recipe, you will see that the recipe +does not include complete instructions for building the software. +Instead, common functionality is encapsulated in classes inherited with +the ``inherit`` directive. This technique leaves the recipe to describe +just the things that are specific to the software being built. A +:ref:`base ` class exists that +is implicitly inherited by all recipes and provides the functionality +that most recipes typically need. + +The remainder of this section presents information useful when working +with recipes. + +.. _sdk-finding-logs-and-work-files: + +Finding Logs and Work Files +--------------------------- + +After the first run of the ``devtool build`` command, recipes that were +previously created using the ``devtool add`` command or whose sources +were modified using the ``devtool modify`` command contain symbolic +links created within the source tree: + +- ``oe-logs``: This link points to the directory in which log files and + run scripts for each build step are created. + +- ``oe-workdir``: This link points to the temporary work area for the + recipe. The following locations under ``oe-workdir`` are particularly + useful: + + - ``image/``: Contains all of the files installed during the + :ref:`ref-tasks-install` stage. + Within a recipe, this directory is referred to by the expression + ``${``\ :term:`D`\ ``}``. + + - ``sysroot-destdir/``: Contains a subset of files installed within + ``do_install`` that have been put into the shared sysroot. For + more information, see the "`Sharing Files Between + Recipes <#sdk-sharing-files-between-recipes>`__" section. + + - ``packages-split/``: Contains subdirectories for each package + produced by the recipe. For more information, see the + "`Packaging <#sdk-packaging>`__" section. + +You can use these links to get more information on what is happening at +each build step. + +.. _sdk-setting-configure-arguments: + +Setting Configure Arguments +--------------------------- + +If the software your recipe is building uses GNU autoconf, then a fixed +set of arguments is passed to it to enable cross-compilation plus any +extras specified by +:term:`EXTRA_OECONF` or +:term:`PACKAGECONFIG_CONFARGS` +set within the recipe. If you wish to pass additional options, add them +to ``EXTRA_OECONF`` or ``PACKAGECONFIG_CONFARGS``. Other supported build +tools have similar variables (e.g. +:term:`EXTRA_OECMAKE` for +CMake, :term:`EXTRA_OESCONS` +for Scons, and so forth). If you need to pass anything on the ``make`` +command line, you can use ``EXTRA_OEMAKE`` or the +:term:`PACKAGECONFIG_CONFARGS` +variables to do so. + +You can use the ``devtool configure-help`` command to help you set the +arguments listed in the previous paragraph. The command determines the +exact options being passed, and shows them to you along with any custom +arguments specified through ``EXTRA_OECONF`` or +``PACKAGECONFIG_CONFARGS``. If applicable, the command also shows you +the output of the configure script's "DASHDASHhelp" option as a +reference. + +.. _sdk-sharing-files-between-recipes: + +Sharing Files Between Recipes +----------------------------- + +Recipes often need to use files provided by other recipes on the +:term:`Build Host`. For example, +an application linking to a common library needs access to the library +itself and its associated headers. The way this access is accomplished +within the extensible SDK is through the sysroot. One sysroot exists per +"machine" for which the SDK is being built. In practical terms, this +means a sysroot exists for the target machine, and a sysroot exists for +the build host. + +Recipes should never write files directly into the sysroot. Instead, +files should be installed into standard locations during the +:ref:`ref-tasks-install` task within +the ``${``\ :term:`D`\ ``}`` directory. A +subset of these files automatically goes into the sysroot. The reason +for this limitation is that almost all files that go into the sysroot +are cataloged in manifests in order to ensure they can be removed later +when a recipe is modified or removed. Thus, the sysroot is able to +remain free from stale files. + +.. _sdk-packaging: + +Packaging +--------- + +Packaging is not always particularly relevant within the extensible SDK. +However, if you examine how build output gets into the final image on +the target device, it is important to understand packaging because the +contents of the image are expressed in terms of packages and not +recipes. + +During the :ref:`ref-tasks-package` +task, files installed during the +:ref:`ref-tasks-install` task are +split into one main package, which is almost always named the same as +the recipe, and into several other packages. This separation exists +because not all of those installed files are useful in every image. For +example, you probably do not need any of the documentation installed in +a production image. Consequently, for each recipe the documentation +files are separated into a ``-doc`` package. Recipes that package +software containing optional modules or plugins might undergo additional +package splitting as well. + +After building a recipe, you can see where files have gone by looking in +the ``oe-workdir/packages-split`` directory, which contains a +subdirectory for each package. Apart from some advanced cases, the +:term:`PACKAGES` and +:term:`FILES` variables controls +splitting. The ``PACKAGES`` variable lists all of the packages to be +produced, while the ``FILES`` variable specifies which files to include +in each package by using an override to specify the package. For +example, ``FILES_${PN}`` specifies the files to go into the main package +(i.e. the main package has the same name as the recipe and +``${``\ :term:`PN`\ ``}`` evaluates to the +recipe name). The order of the ``PACKAGES`` value is significant. For +each installed file, the first package whose ``FILES`` value matches the +file is the package into which the file goes. Defaults exist for both +the ``PACKAGES`` and ``FILES`` variables. Consequently, you might find +you do not even need to set these variables in your recipe unless the +software the recipe is building installs files into non-standard +locations. + +.. _sdk-restoring-the-target-device-to-its-original-state: + +Restoring the Target Device to its Original State +================================================= + +If you use the ``devtool deploy-target`` command to write a recipe's +build output to the target, and you are working on an existing component +of the system, then you might find yourself in a situation where you +need to restore the original files that existed prior to running the +``devtool deploy-target`` command. Because the ``devtool deploy-target`` +command backs up any files it overwrites, you can use the +``devtool undeploy-target`` command to restore those files and remove +any other files the recipe deployed. Consider the following example: +:: + + $ devtool undeploy-target lighttpd root@192.168.7.2 + +If you have deployed +multiple applications, you can remove them all using the "-a" option +thus restoring the target device to its original state: +:: + + $ devtool undeploy-target -a root@192.168.7.2 + +Information about files deployed to +the target as well as any backed up files are stored on the target +itself. This storage, of course, requires some additional space on the +target machine. + +.. note:: + + The + devtool deploy-target + and + devtool undeploy-target + commands do not currently interact with any package management system + on the target device (e.g. RPM or OPKG). Consequently, you should not + intermingle + devtool deploy-target + and package manager operations on the target device. Doing so could + result in a conflicting set of files. + +.. _sdk-installing-additional-items-into-the-extensible-sdk: + +Installing Additional Items Into the Extensible SDK +=================================================== + +Out of the box the extensible SDK typically only comes with a small +number of tools and libraries. A minimal SDK starts mostly empty and is +populated on-demand. Sometimes you must explicitly install extra items +into the SDK. If you need these extra items, you can first search for +the items using the ``devtool search`` command. For example, suppose you +need to link to libGL but you are not sure which recipe provides libGL. +You can use the following command to find out: +:: + + $ devtool search libGL mesa + +A free implementation of the OpenGL API Once you know the recipe +(i.e. ``mesa`` in this example), you can install it: +:: + + $ devtool sdk-install mesa + +By default, the ``devtool sdk-install`` command assumes +the item is available in pre-built form from your SDK provider. If the +item is not available and it is acceptable to build the item from +source, you can add the "-s" option as follows: +:: + + $ devtool sdk-install -s mesa + +It is important to remember that building the item from source +takes significantly longer than installing the pre-built artifact. Also, +if no recipe exists for the item you want to add to the SDK, you must +instead add the item using the ``devtool add`` command. + +.. _sdk-applying-updates-to-an-installed-extensible-sdk: + +Applying Updates to an Installed Extensible SDK +=============================================== + +If you are working with an installed extensible SDK that gets +occasionally updated (e.g. a third-party SDK), then you will need to +manually "pull down" the updates into the installed SDK. + +To update your installed SDK, use ``devtool`` as follows: +:: + + $ devtool sdk-update + +The previous command assumes your SDK provider has set the +default update URL for you through the +:term:`SDK_UPDATE_URL` +variable as described in the "`Providing Updates to the Extensible SDK +After +Installation <#sdk-providing-updates-to-the-extensible-sdk-after-installation>`__" +section. If the SDK provider has not set that default URL, you need to +specify it yourself in the command as follows: $ devtool sdk-update +path_to_update_directory + +.. note:: + + The URL needs to point specifically to a published SDK and not to an + SDK installer that you would download and install. + +.. _sdk-creating-a-derivative-sdk-with-additional-components: + +Creating a Derivative SDK With Additional Components +==================================================== + +You might need to produce an SDK that contains your own custom +libraries. A good example would be if you were a vendor with customers +that use your SDK to build their own platform-specific software and +those customers need an SDK that has custom libraries. In such a case, +you can produce a derivative SDK based on the currently installed SDK +fairly easily by following these steps: + +1. If necessary, install an extensible SDK that you want to use as a + base for your derivative SDK. + +2. Source the environment script for the SDK. + +3. Add the extra libraries or other components you want by using the + ``devtool add`` command. + +4. Run the ``devtool build-sdk`` command. + +The previous steps take the recipes added to the workspace and construct +a new SDK installer that contains those recipes and the resulting binary +artifacts. The recipes go into their own separate layer in the +constructed derivative SDK, which leaves the workspace clean and ready +for users to add their own recipes. diff --git a/poky/documentation/sdk-manual/sdk-intro.rst b/poky/documentation/sdk-manual/sdk-intro.rst new file mode 100644 index 000000000..2e01cf136 --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-intro.rst @@ -0,0 +1,231 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Introduction +************ + +.. _sdk-manual-intro: + +eSDK Introduction +================= + +Welcome to the Yocto Project Application Development and the Extensible +Software Development Kit (eSDK) manual. This manual provides information +that explains how to use both the Yocto Project extensible and standard +SDKs to develop applications and images. + +.. note:: + + Prior to the 2.0 Release of the Yocto Project, application + development was primarily accomplished through the use of the + Application Development Toolkit (ADT) and the availability of + stand-alone cross-development toolchains and other tools. With the + 2.1 Release of the Yocto Project, application development has + transitioned to within a tool-rich extensible SDK and the more + traditional standard SDK. + +All SDKs consist of the following: + +- *Cross-Development Toolchain*: This toolchain contains a compiler, + debugger, and various miscellaneous tools. + +- *Libraries, Headers, and Symbols*: The libraries, headers, and + symbols are specific to the image (i.e. they match the image). + +- *Environment Setup Script*: This ``*.sh`` file, once run, sets up the + cross-development environment by defining variables and preparing for + SDK use. + +Additionally, an extensible SDK has tools that allow you to easily add +new applications and libraries to an image, modify the source of an +existing component, test changes on the target hardware, and easily +integrate an application into the :term:`OpenEmbedded Build System`. + +You can use an SDK to independently develop and test code that is +destined to run on some target machine. SDKs are completely +self-contained. The binaries are linked against their own copy of +``libc``, which results in no dependencies on the target system. To +achieve this, the pointer to the dynamic loader is configured at install +time since that path cannot be dynamically altered. This is the reason +for a wrapper around the ``populate_sdk`` and ``populate_sdk_ext`` +archives. + +Another feature for the SDKs is that only one set of cross-compiler +toolchain binaries are produced for any given architecture. This feature +takes advantage of the fact that the target hardware can be passed to +``gcc`` as a set of compiler options. Those options are set up by the +environment script and contained in variables such as +:term:`CC` and +:term:`LD`. This reduces the space needed +for the tools. Understand, however, that every target still needs a +sysroot because those binaries are target-specific. + +The SDK development environment consists of the following: + +- The self-contained SDK, which is an architecture-specific + cross-toolchain and matching sysroots (target and native) all built + by the OpenEmbedded build system (e.g. the SDK). The toolchain and + sysroots are based on a :term:`Metadata` + configuration and extensions, which allows you to cross-develop on + the host machine for the target hardware. Additionally, the + extensible SDK contains the ``devtool`` functionality. + +- The Quick EMUlator (QEMU), which lets you simulate target hardware. + QEMU is not literally part of the SDK. You must build and include + this emulator separately. However, QEMU plays an important role in + the development process that revolves around use of the SDK. + +In summary, the extensible and standard SDK share many features. +However, the extensible SDK has powerful development tools to help you +more quickly develop applications. Following is a table that summarizes +the primary differences between the standard and extensible SDK types +when considering which to build: + ++-----------------------+-----------------------+-----------------------+ +| *Feature* | *Standard SDK* | *Extensible SDK* | ++=======================+=======================+=======================+ +| Toolchain | Yes | Yes\* | ++-----------------------+-----------------------+-----------------------+ +| Debugger | Yes | Yes\* | ++-----------------------+-----------------------+-----------------------+ +| Size | 100+ MBytes | 1+ GBytes (or 300+ | +| | | MBytes for minimal | +| | | w/toolchain) | ++-----------------------+-----------------------+-----------------------+ +| ``devtool`` | No | Yes | ++-----------------------+-----------------------+-----------------------+ +| Build Images | No | Yes | ++-----------------------+-----------------------+-----------------------+ +| Updateable | No | Yes | ++-----------------------+-----------------------+-----------------------+ +| Managed Sysroot*\* | No | Yes | ++-----------------------+-----------------------+-----------------------+ +| Installed Packages | No**\* | Yes***\* | ++-----------------------+-----------------------+-----------------------+ +| Construction | Packages | Shared State | ++-----------------------+-----------------------+-----------------------+ + +\* Extensible SDK contains the toolchain and debugger if +:term:`SDK_EXT_TYPE` is "full" +or +:term:`SDK_INCLUDE_TOOLCHAIN` +is "1", which is the default. + +\*\* Sysroot is managed through the use of +``devtool``. Thus, it is less likely that you will corrupt your SDK +sysroot when you try to add additional libraries. + +\*\*\* You can add +runtime package management to the standard SDK but it is not supported +by default. + +\*\*\*\* You must build and make the shared state available to +extensible SDK users for "packages" you want to enable users to install. + +The Cross-Development Toolchain +------------------------------- + +The :term:`Cross-Development Toolchain` consists +of a cross-compiler, cross-linker, and cross-debugger that are used to +develop user-space applications for targeted hardware. Additionally, for +an extensible SDK, the toolchain also has built-in ``devtool`` +functionality. This toolchain is created by running a SDK installer +script or through a :term:`Build Directory` that is based on +your metadata configuration or extension for your targeted device. The +cross-toolchain works with a matching target sysroot. + +.. _sysroot: + +Sysroots +-------- + +The native and target sysroots contain needed headers and libraries for +generating binaries that run on the target architecture. The target +sysroot is based on the target root filesystem image that is built by +the OpenEmbedded build system and uses the same metadata configuration +used to build the cross-toolchain. + +The QEMU Emulator +----------------- + +The QEMU emulator allows you to simulate your hardware while running +your application or image. QEMU is not part of the SDK but is made +available a number of different ways: + +- If you have cloned the ``poky`` Git repository to create a + :term:`Source Directory` and you have + sourced the environment setup script, QEMU is installed and + automatically available. + +- If you have downloaded a Yocto Project release and unpacked it to + create a Source Directory and you have sourced the environment setup + script, QEMU is installed and automatically available. + +- If you have installed the cross-toolchain tarball and you have + sourced the toolchain's setup environment script, QEMU is also + installed and automatically available. + +SDK Development Model +===================== + +Fundamentally, the SDK fits into the development process as follows: + +.. image:: figures/sdk-environment.png + :align: center + +The SDK is installed on any machine and can be used to develop applications, +images, and kernels. An SDK can even be used by a QA Engineer or Release +Engineer. The fundamental concept is that the machine that has the SDK +installed does not have to be associated with the machine that has the +Yocto Project installed. A developer can independently compile and test +an object on their machine and then, when the object is ready for +integration into an image, they can simply make it available to the +machine that has the Yocto Project. Once the object is available, the +image can be rebuilt using the Yocto Project to produce the modified +image. + +You just need to follow these general steps: + +1. *Install the SDK for your target hardware:* For information on how to + install the SDK, see the "`Installing the + SDK <#sdk-installing-the-sdk>`__" section. + +2. *Download or Build the Target Image:* The Yocto Project supports + several target architectures and has many pre-built kernel images and + root filesystem images. + + If you are going to develop your application on hardware, go to the + :yocto_dl:`machines ` download area and choose a + target machine area from which to download the kernel image and root + filesystem. This download area could have several files in it that + support development using actual hardware. For example, the area + might contain ``.hddimg`` files that combine the kernel image with + the filesystem, boot loaders, and so forth. Be sure to get the files + you need for your particular development process. + + If you are going to develop your application and then run and test it + using the QEMU emulator, go to the + :yocto_dl:`machines/qemu ` download area. From this + area, go down into the directory for your target architecture (e.g. + ``qemux86_64`` for an Intel-based 64-bit architecture). Download the + kernel, root filesystem, and any other files you need for your + process. + + .. note:: + + To use the root filesystem in QEMU, you need to extract it. See + the " + Extracting the Root Filesystem + " section for information on how to extract the root filesystem. + +3. *Develop and Test your Application:* At this point, you have the + tools to develop your application. If you need to separately install + and use the QEMU emulator, you can go to `QEMU Home + Page `__ to download and learn about + the emulator. See the ":doc:`../dev-manual/dev-manual-qemu`" chapter in the + Yocto Project Development Tasks Manual for information on using QEMU + within the Yocto Project. + +The remainder of this manual describes how to use the extensible and +standard SDKs. Information also exists in appendix form that describes +how you can build, install, and modify an SDK. diff --git a/poky/documentation/sdk-manual/sdk-manual.rst b/poky/documentation/sdk-manual/sdk-manual.rst new file mode 100644 index 000000000..d7776b7c4 --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-manual.rst @@ -0,0 +1,22 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +======================================================================================== +Yocto Project Application Development and the Extensible Software Development Kit (eSDK) +======================================================================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + sdk-intro + sdk-extensible + sdk-using + sdk-working-projects + sdk-appendix-obtain + sdk-appendix-customizing + sdk-appendix-customizing-standard + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/sdk-manual/sdk-using.rst b/poky/documentation/sdk-manual/sdk-using.rst new file mode 100644 index 000000000..cd57f07ee --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-using.rst @@ -0,0 +1,159 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************** +Using the Standard SDK +********************** + +This chapter describes the standard SDK and how to install it. +Information includes unique installation and setup aspects for the +standard SDK. + +.. note:: + + For a side-by-side comparison of main features supported for a + standard SDK as compared to an extensible SDK, see the " + Introduction + " section. + +You can use a standard SDK to work on Makefile and Autotools-based +projects. See the "`Using the SDK Toolchain +Directly <#sdk-working-projects>`__" chapter for more information. + +.. _sdk-standard-sdk-intro: + +Why use the Standard SDK and What is in It? +=========================================== + +The Standard SDK provides a cross-development toolchain and libraries +tailored to the contents of a specific image. You would use the Standard +SDK if you want a more traditional toolchain experience as compared to +the extensible SDK, which provides an internal build system and the +``devtool`` functionality. + +The installed Standard SDK consists of several files and directories. +Basically, it contains an SDK environment setup script, some +configuration files, and host and target root filesystems to support +usage. You can see the directory structure in the "`Installed Standard +SDK Directory +Structure <#sdk-installed-standard-sdk-directory-structure>`__" section. + +.. _sdk-installing-the-sdk: + +Installing the SDK +================== + +The first thing you need to do is install the SDK on your :term:`Build +Host` by running the ``*.sh`` installation script. + +You can download a tarball installer, which includes the pre-built +toolchain, the ``runqemu`` script, and support files from the +appropriate :yocto_dl:`toolchain ` directory within +the Index of Releases. Toolchains are available for several 32-bit and +64-bit architectures with the ``x86_64`` directories, respectively. The +toolchains the Yocto Project provides are based off the +``core-image-sato`` and ``core-image-minimal`` images and contain +libraries appropriate for developing against that image. + +The names of the tarball installer scripts are such that a string +representing the host system appears first in the filename and then is +immediately followed by a string representing the target architecture. +:: + + poky-glibc-host_system-image_type-arch-toolchain-release_version.sh + + Where: + host_system is a string representing your development system: + + i686 or x86_64. + + image_type is the image for which the SDK was built: + + core-image-minimal or core-image-sato. + + arch is a string representing the tuned target architecture: + + aarch64, armv5e, core2-64, i586, mips32r2, mips64, ppc7400, or cortexa8hf-neon. + + release_version is a string representing the release number of the Yocto Project: + + 3.1.2, 3.1.2+snapshot + +For example, the following SDK installer is for a 64-bit +development host system and a i586-tuned target architecture based off +the SDK for ``core-image-sato`` and using the current DISTRO snapshot: +:: + + poky-glibc-x86_64-core-image-sato-i586-toolchain-DISTRO.sh + +.. note:: + + As an alternative to downloading an SDK, you can build the SDK + installer. For information on building the installer, see the " + Building an SDK Installer + " section. + +The SDK and toolchains are self-contained and by default are installed +into the ``poky_sdk`` folder in your home directory. You can choose to +install the extensible SDK in any location when you run the installer. +However, because files need to be written under that directory during +the normal course of operation, the location you choose for installation +must be writable for whichever users need to use the SDK. + +The following command shows how to run the installer given a toolchain +tarball for a 64-bit x86 development host system and a 64-bit x86 target +architecture. The example assumes the SDK installer is located in +``~/Downloads/`` and has execution rights. + +.. note:: + + If you do not have write permissions for the directory into which you + are installing the SDK, the installer notifies you and exits. For + that case, set up the proper permissions in the directory and run the + installer again. + +:: + + $ ./Downloads/poky-glibc-x86_64-core-image-sato-i586-toolchain-3.1.2.sh + Poky (Yocto Project Reference Distro) SDK installer version 3.1.2 + =============================================================== + Enter target directory for SDK (default: /opt/poky/3.1.2): + You are about to install the SDK to "/opt/poky/3.1.2". Proceed [Y/n]? Y + Extracting SDK........................................ ..............................done + Setting it up...done + SDK has been successfully set up and is ready to be used. + Each time you wish to use the SDK in a new shell session, you need to source the environment setup script e.g. + $ . /opt/poky/3.1.2/environment-setup-i586-poky-linux + +Again, reference the "`Installed Standard SDK Directory +Structure <#sdk-installed-standard-sdk-directory-structure>`__" section +for more details on the resulting directory structure of the installed +SDK. + +.. _sdk-running-the-sdk-environment-setup-script: + +Running the SDK Environment Setup Script +======================================== + +Once you have the SDK installed, you must run the SDK environment setup +script before you can actually use the SDK. This setup script resides in +the directory you chose when you installed the SDK, which is either the +default ``/opt/poky/3.1.2`` directory or the directory you chose during +installation. + +Before running the script, be sure it is the one that matches the +architecture for which you are developing. Environment setup scripts +begin with the string "``environment-setup``" and include as part of +their name the tuned target architecture. As an example, the following +commands set the working directory to where the SDK was installed and +then source the environment setup script. In this example, the setup +script is for an IA-based target machine using i586 tuning: +:: + + $ source /opt/poky/3.1.2/environment-setup-i586-poky-linux + +When you run the +setup script, the same environment variables are defined as are when you +run the setup script for an extensible SDK. See the "`Running the +Extensible SDK Environment Setup +Script <#sdk-running-the-extensible-sdk-environment-setup-script>`__" +section for more information. diff --git a/poky/documentation/sdk-manual/sdk-working-projects.rst b/poky/documentation/sdk-manual/sdk-working-projects.rst new file mode 100644 index 000000000..2c20a1ec5 --- /dev/null +++ b/poky/documentation/sdk-manual/sdk-working-projects.rst @@ -0,0 +1,423 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************************** +Using the SDK Toolchain Directly +******************************** + +You can use the SDK toolchain directly with Makefile and Autotools-based +projects. + +Autotools-Based Projects +======================== + +Once you have a suitable :ref:`sdk-manual/sdk-intro:the cross-development toolchain` +installed, it is very easy to develop a project using the `GNU +Autotools-based `__ +workflow, which is outside of the :term:`OpenEmbedded Build System`. + +The following figure presents a simple Autotools workflow. + +.. image:: figures/sdk-autotools-flow.png + :align: center + +Follow these steps to create a simple Autotools-based "Hello World" +project: + +.. note:: + + For more information on the GNU Autotools workflow, see the same + example on the + GNOME Developer + site. + +1. *Create a Working Directory and Populate It:* Create a clean + directory for your project and then make that directory your working + location. + :: + + $ mkdir $HOME/helloworld + $ cd $HOME/helloworld + + After setting up the directory, populate it with files needed for the flow. + You need a project source file, a file to help with configuration, + and a file to help create the Makefile, and a README file: + ``hello.c``, ``configure.ac``, ``Makefile.am``, and ``README``, + respectively. + + Use the following command to create an empty README file, which is + required by GNU Coding Standards: + :: + + $ touch README + + Create the remaining + three files as follows: + + - ``hello.c``: + :: + + #include + + main() + { + printf("Hello World!\n"); + } + + - ``configure.ac``: + :: + + AC_INIT(hello,0.1) + AM_INIT_AUTOMAKE([foreign]) + AC_PROG_CC + AC_CONFIG_FILES(Makefile) + AC_OUTPUT + + - ``Makefile.am``: + :: + + bin_PROGRAMS = hello + hello_SOURCES = hello.c + +2. *Source the Cross-Toolchain Environment Setup File:* As described + earlier in the manual, installing the cross-toolchain creates a + cross-toolchain environment setup script in the directory that the + SDK was installed. Before you can use the tools to develop your + project, you must source this setup script. The script begins with + the string "environment-setup" and contains the machine architecture, + which is followed by the string "poky-linux". For this example, the + command sources a script from the default SDK installation directory + that uses the 32-bit Intel x86 Architecture and the 3.1.2 Yocto + Project release: + :: + + $ source /opt/poky/3.1.2/environment-setup-i586-poky-linux + +3. *Create the configure Script:* Use the ``autoreconf`` command to + generate the ``configure`` script. + :: + + $ autoreconf + + The ``autoreconf`` + tool takes care of running the other Autotools such as ``aclocal``, + ``autoconf``, and ``automake``. + + .. note:: + + If you get errors from + configure.ac + , which + autoreconf + runs, that indicate missing files, you can use the "-i" option, + which ensures missing auxiliary files are copied to the build + host. + +4. *Cross-Compile the Project:* This command compiles the project using + the cross-compiler. The + :term:`CONFIGURE_FLAGS` + environment variable provides the minimal arguments for GNU + configure: + :: + + $ ./configure ${CONFIGURE_FLAGS} + + For an Autotools-based + project, you can use the cross-toolchain by just passing the + appropriate host option to ``configure.sh``. The host option you use + is derived from the name of the environment setup script found in the + directory in which you installed the cross-toolchain. For example, + the host option for an ARM-based target that uses the GNU EABI is + ``armv5te-poky-linux-gnueabi``. You will notice that the name of the + script is ``environment-setup-armv5te-poky-linux-gnueabi``. Thus, the + following command works to update your project and rebuild it using + the appropriate cross-toolchain tools: + :: + + $ ./configure --host=armv5te-poky-linux-gnueabi --with-libtool-sysroot=sysroot_dir + +5. *Make and Install the Project:* These two commands generate and + install the project into the destination directory: + :: + + $ make + $ make install DESTDIR=./tmp + + .. note:: + + To learn about environment variables established when you run the + cross-toolchain environment setup script and how they are used or + overridden when the Makefile, see the " + Makefile-Based Projects + " section. + + This next command is a simple way to verify the installation of your + project. Running the command prints the architecture on which the + binary file can run. This architecture should be the same + architecture that the installed cross-toolchain supports. + :: + + $ file ./tmp/usr/local/bin/hello + +6. *Execute Your Project:* To execute the project, you would need to run + it on your target hardware. If your target hardware happens to be + your build host, you could run the project as follows: + :: + + $ ./tmp/usr/local/bin/hello + + As expected, the project displays the "Hello World!" message. + +Makefile-Based Projects +======================= + +Simple Makefile-based projects use and interact with the cross-toolchain +environment variables established when you run the cross-toolchain +environment setup script. The environment variables are subject to +general ``make`` rules. + +This section presents a simple Makefile development flow and provides an +example that lets you see how you can use cross-toolchain environment +variables and Makefile variables during development. + +.. image:: figures/sdk-makefile-flow.png + :align: center + +The main point of this section is to explain the following three cases +regarding variable behavior: + +- *Case 1 - No Variables Set in the Makefile Map to Equivalent + Environment Variables Set in the SDK Setup Script:* Because matching + variables are not specifically set in the ``Makefile``, the variables + retain their values based on the environment setup script. + +- *Case 2 - Variables Are Set in the Makefile that Map to Equivalent + Environment Variables from the SDK Setup Script:* Specifically + setting matching variables in the ``Makefile`` during the build + results in the environment settings of the variables being + overwritten. In this case, the variables you set in the ``Makefile`` + are used. + +- *Case 3 - Variables Are Set Using the Command Line that Map to + Equivalent Environment Variables from the SDK Setup Script:* + Executing the ``Makefile`` from the command line results in the + environment variables being overwritten. In this case, the + command-line content is used. + +.. note:: + + Regardless of how you set your variables, if you use the "-e" option + with + make + , the variables from the SDK setup script take precedence: + :: + + $ make -e target + + +The remainder of this section presents a simple Makefile example that +demonstrates these variable behaviors. + +In a new shell environment variables are not established for the SDK +until you run the setup script. For example, the following commands show +a null value for the compiler variable (i.e. +:term:`CC`). +:: + + $ echo ${CC} + + $ + +Running the +SDK setup script for a 64-bit build host and an i586-tuned target +architecture for a ``core-image-sato`` image using the current 3.1.2 +Yocto Project release and then echoing that variable shows the value +established through the script: +:: + + $ source /opt/poky/3.1.2/environment-setup-i586-poky-linux + $ echo ${CC} + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/3.1.2/sysroots/i586-poky-linux + +To illustrate variable use, work through this simple "Hello World!" +example: + +1. *Create a Working Directory and Populate It:* Create a clean + directory for your project and then make that directory your working + location. + :: + + $ mkdir $HOME/helloworld + $ cd $HOME/helloworld + + After + setting up the directory, populate it with files needed for the flow. + You need a ``main.c`` file from which you call your function, a + ``module.h`` file to contain headers, and a ``module.c`` that defines + your function. + + Create the three files as follows: + + - ``main.c``: + :: + + #include "module.h" + void sample_func(); + int main() + { + sample_func(); + return 0; + } + + - ``module.h``: + :: + + #include + void sample_func(); + + - ``module.c``: + :: + + #include "module.h" + void sample_func() + { + printf("Hello World!"); + printf("\n"); + } + +2. *Source the Cross-Toolchain Environment Setup File:* As described + earlier in the manual, installing the cross-toolchain creates a + cross-toolchain environment setup script in the directory that the + SDK was installed. Before you can use the tools to develop your + project, you must source this setup script. The script begins with + the string "environment-setup" and contains the machine architecture, + which is followed by the string "poky-linux". For this example, the + command sources a script from the default SDK installation directory + that uses the 32-bit Intel x86 Architecture and the DISTRO_NAME Yocto + Project release: + :: + + $ source /opt/poky/DISTRO/environment-setup-i586-poky-linux + +3. *Create the Makefile:* For this example, the Makefile contains + two lines that can be used to set the ``CC`` variable. One line is + identical to the value that is set when you run the SDK environment + setup script, and the other line sets ``CC`` to "gcc", the default + GNU compiler on the build host: + :: + + # CC=i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux + # CC="gcc" + all: main.o module.o + ${CC} main.o module.o -o target_bin + main.o: main.c module.h + ${CC} -I . -c main.c + module.o: module.c + module.h ${CC} -I . -c module.c + clean: + rm -rf *.o + rm target_bin + +4. *Make the Project:* Use the ``make`` command to create the binary + output file. Because variables are commented out in the Makefile, the + value used for ``CC`` is the value set when the SDK environment setup + file was run: + :: + + $ make + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c main.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c module.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux main.o module.o -o target_bin + + From the results of the previous command, you can see that + the compiler used was the compiler established through the ``CC`` + variable defined in the setup script. + + You can override the ``CC`` environment variable with the same + variable as set from the Makefile by uncommenting the line in the + Makefile and running ``make`` again. + :: + + $ make clean + rm -rf *.o + rm target_bin + # + # Edit the Makefile by uncommenting the line that sets CC to "gcc" + # + $ make + gcc -I . -c main.c + gcc -I . -c module.c + gcc main.o module.o -o target_bin + + As shown in the previous example, the + cross-toolchain compiler is not used. Rather, the default compiler is + used. + + This next case shows how to override a variable by providing the + variable as part of the command line. Go into the Makefile and + re-insert the comment character so that running ``make`` uses the + established SDK compiler. However, when you run ``make``, use a + command-line argument to set ``CC`` to "gcc": + :: + + $ make clean + rm -rf *.o + rm target_bin + # + # Edit the Makefile to comment out the line setting CC to "gcc" + # + $ make + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c main.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c module.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux main.o module.o -o target_bin + $ make clean + rm -rf *.o + rm target_bin + $ make CC="gcc" + gcc -I . -c main.c + gcc -I . -c module.c + gcc main.o module.o -o target_bin + + In the previous case, the command-line argument overrides the SDK + environment variable. + + In this last case, edit Makefile again to use the "gcc" compiler but + then use the "-e" option on the ``make`` command line: + :: + + $ make clean + rm -rf *.o + rm target_bin + # + # Edit the Makefile to use "gcc" + # + $ make + gcc -I . -c main.c + gcc -I . -c module.c + gcc main.o module.o -o target_bin + $ make clean + rm -rf *.o + rm target_bin + $ make -e + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c main.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux -I . -c module.c + i586-poky-linux-gcc -m32 -march=i586 --sysroot=/opt/poky/2.5/sysroots/i586-poky-linux main.o module.o -o target_bin + + In the previous case, the "-e" option forces ``make`` to + use the SDK environment variables regardless of the values in the + Makefile. + +5. *Execute Your Project:* To execute the project (i.e. ``target_bin``), + use the following command: + :: + + $ ./target_bin + Hello World! + + .. note:: + + If you used the cross-toolchain compiler to build + target_bin + and your build host differs in architecture from that of the + target machine, you need to run your project on the target device. + + As expected, the project displays the "Hello World!" message. diff --git a/poky/documentation/sphinx-static/YoctoProject_Logo_RGB.jpg b/poky/documentation/sphinx-static/YoctoProject_Logo_RGB.jpg new file mode 100644 index 000000000..8ab47d49f Binary files /dev/null and b/poky/documentation/sphinx-static/YoctoProject_Logo_RGB.jpg differ diff --git a/poky/documentation/sphinx-static/switchers.js b/poky/documentation/sphinx-static/switchers.js new file mode 100644 index 000000000..32113cfa9 --- /dev/null +++ b/poky/documentation/sphinx-static/switchers.js @@ -0,0 +1,233 @@ +(function() { + 'use strict'; + + var all_versions = { + 'dev': 'dev (3.2)', + '3.1.2': '3.1.2', + '3.0.3': '3.0.3', + '2.7.4': '2.7.4', + }; + + var all_doctypes = { + 'single': 'Individual Webpages', + 'mega': "All-in-one 'Mega' Manual", + }; + + // Simple version comparision + // Return 1 if a > b + // Return -1 if a < b + // Return 0 if a == b + function ver_compare(a, b) { + if (a == "dev") { + return 1; + } + + if (a === b) { + return 0; + } + + var a_components = a.split("."); + var b_components = b.split("."); + + var len = Math.min(a_components.length, b_components.length); + + // loop while the components are equal + for (var i = 0; i < len; i++) { + // A bigger than B + if (parseInt(a_components[i]) > parseInt(b_components[i])) { + return 1; + } + + // B bigger than A + if (parseInt(a_components[i]) < parseInt(b_components[i])) { + return -1; + } + } + + // If one's a prefix of the other, the longer one is greater. + if (a_components.length > b_components.length) { + return 1; + } + + if (a_components.length < b_components.length) { + return -1; + } + + // Otherwise they are the same. + return 0; + } + + function build_version_select(current_series, current_version) { + var buf = [''); + return buf.join(''); + } + + function build_doctype_select(current_doctype) { + var buf = [''); + return buf.join(''); + } + + function navigate_to_first_existing(urls) { + // Navigate to the first existing URL in urls. + var url = urls.shift(); + + // Web browsers won't redirect file:// urls to file urls using ajax but + // its useful for local testing + if (url.startsWith("file://")) { + window.location.href = url; + return; + } + + if (urls.length == 0) { + window.location.href = url; + return; + } + $.ajax({ + url: url, + success: function() { + window.location.href = url; + }, + error: function() { + navigate_to_first_existing(urls); + } + }); + } + + function get_docroot_url() { + var url = window.location.href; + var root = DOCUMENTATION_OPTIONS.URL_ROOT; + + var urlarray = url.split('/'); + // Trim off anything after '/' + urlarray.pop(); + var depth = (root.match(/\.\.\//g) || []).length; + for (var i = 0; i < depth; i++) { + urlarray.pop(); + } + + return urlarray.join('/') + '/'; + } + + function on_version_switch() { + var selected_version = $(this).children('option:selected').attr('value'); + var url = window.location.href; + var current_version = DOCUMENTATION_OPTIONS.VERSION; + var docroot = get_docroot_url() + + var new_versionpath = selected_version + '/'; + if (selected_version == "dev") + new_versionpath = ''; + + // dev versions have no version prefix + if (current_version == "dev") { + var new_url = docroot + new_versionpath + url.replace(docroot, ""); + var fallback_url = docroot + new_versionpath; + } else { + var new_url = url.replace('/' + current_version + '/', '/' + new_versionpath); + var fallback_url = new_url.replace(url.replace(docroot, ""), ""); + } + + console.log(get_docroot_url()) + console.log(url + " to url " + new_url); + console.log(url + " to fallback " + fallback_url); + + if (new_url != url) { + navigate_to_first_existing([ + new_url, + fallback_url, + 'https://www.yoctoproject.org/docs/', + ]); + } + } + + function on_doctype_switch() { + var selected_doctype = $(this).children('option:selected').attr('value'); + var url = window.location.href; + if (selected_doctype == 'mega') { + var docroot = get_docroot_url() + var current_version = DOCUMENTATION_OPTIONS.VERSION; + // Assume manuals before 3.2 are using old docbook mega-manual + if (ver_compare(current_version, "3.2") < 0) { + var new_url = docroot + "mega-manual/mega-manual.html"; + } else { + var new_url = docroot + "singleindex.html"; + } + } else { + var new_url = url.replace("singleindex.html", "index.html") + } + + if (new_url != url) { + navigate_to_first_existing([ + new_url, + 'https://www.yoctoproject.org/docs/', + ]); + } + } + + // Returns the current doctype based upon the url + function doctype_segment_from_url(url) { + if (url.includes("singleindex") || url.includes("mega-manual")) + return "mega"; + return "single"; + } + + $(document).ready(function() { + var release = DOCUMENTATION_OPTIONS.VERSION; + var current_doctype = doctype_segment_from_url(window.location.href); + var current_series = release.substr(0, 3); + var version_select = build_version_select(current_series, release); + + $('.version_switcher_placeholder').html(version_select); + $('.version_switcher_placeholder select').bind('change', on_version_switch); + + var doctype_select = build_doctype_select(current_doctype); + + $('.doctype_switcher_placeholder').html(doctype_select); + $('.doctype_switcher_placeholder select').bind('change', on_doctype_switch); + + if (ver_compare(release, "3.1") < 0) { + $('#outdated-warning').html('Version ' + release + ' of the project is now considered obsolete, please select and use a more recent version'); + $('#outdated-warning').css('padding', '.5em'); + } else if (release != "dev") { + $.each(all_versions, function(version, title) { + var series = version.substr(0, 3); + if (series == current_series && version != release) { + $('#outdated-warning').html('This document is for outdated version ' + release + ', you should select the latest release version in this series, ' + version + '.'); + $('#outdated-warning').css('padding', '.5em'); + } + }); + } + }); +})(); diff --git a/poky/documentation/sphinx-static/theme_overrides.css b/poky/documentation/sphinx-static/theme_overrides.css new file mode 100644 index 000000000..c18053398 --- /dev/null +++ b/poky/documentation/sphinx-static/theme_overrides.css @@ -0,0 +1,166 @@ +/* + SPDX-License-Identifier: CC-BY-2.0-UK +*/ + +body { + font-family: Verdana, Sans, sans-serif; + + min-width: 640px; + margin: 0em auto; + color: #333; +} + +h1,h2,h3,h4,h5,h6,h7 { + font-family: Arial, Sans; + color: #00557D; + clear: both; +} + +h1 { + font-size: 2em; + text-align: left; + padding: 0em 0em 0em 0em; + margin: 2em 0em 0em 0em; +} + +h2.subtitle { + margin: 0.10em 0em 3.0em 0em; + padding: 0em 0em 0em 0em; + font-size: 1.8em; + padding-left: 20%; + font-weight: normal; + font-style: italic; +} + +h2 { + margin: 2em 0em 0.66em 0em; + padding: 0.5em 0em 0em 0em; + font-size: 1.5em; + font-weight: bold; +} + +h3.subtitle { + margin: 0em 0em 1em 0em; + padding: 0em 0em 0em 0em; + font-size: 142.14%; + text-align: right; +} + +h3 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 140%; + font-weight: bold; +} + +h4 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 120%; + font-weight: bold; +} + +h5 { + margin: 1em 0em 0.5em 0em; + padding: 1em 0em 0em 0em; + font-size: 110%; + font-weight: bold; +} + +h6 { + margin: 1em 0em 0em 0em; + padding: 1em 0em 0em 0em; + font-size: 110%; + font-weight: bold; +} + +em { + font-weight: bold; +} + +.pre { + font-size: medium; + font-family: Courier, monospace; +} + +.wy-nav-content a { + text-decoration: underline; + color: #444; + background: transparent; +} + +.wy-nav-content a:hover { + text-decoration: underline; + background-color: #dedede; +} + +.wy-nav-content a:visited { + color: #444; +} + +[alt='Permalink'] { color: #eee; } +[alt='Permalink']:hover { color: black; } + +@media screen { + /* content column + * + * RTD theme's default is 800px as max width for the content, but we have + * tables with tons of columns, which need the full width of the view-port. + */ + + .wy-nav-content{max-width: none; } + + /* inline literal: drop the borderbox, padding and red color */ + code, .rst-content tt, .rst-content code { + color: inherit; + border: none; + padding: unset; + background: inherit; + font-size: 85%; + } + + .rst-content tt.literal,.rst-content tt.literal,.rst-content code.literal { + color: inherit; + } + + /* Admonition should be gray, not blue or green */ + .rst-content .note .admonition-title, + .rst-content .tip .admonition-title, + .rst-content .warning .admonition-title, + .rst-content .caution .admonition-title, + .rst-content .admonition-tying-it-together .admonition-title, + .rst-content .important .admonition-title { + background: #f0f0f2; + color: #00557D; + + } + + .rst-content .note, + .rst-content .tip, + .rst-content .important, + .rst-content .warning, + .rst-content .admonition-tying-it-together, + .rst-content .caution { + background: #f0f0f2; + } + + /* Remove the icon in front of note/tip element, and before the logo */ + .icon-home:before, .rst-content .admonition-title:before { + display: none + } + + /* a custom informalexample container is used in some doc */ + .informalexample { + border: 1px solid; + border-color: #aaa; + margin: 1em 0em; + padding: 1em; + page-break-inside: avoid; + } + + /* Remove the blue background in the top left corner, around the logo */ + .wy-side-nav-search { + background: inherit; + } + +} diff --git a/poky/documentation/sphinx/yocto-vars.py b/poky/documentation/sphinx/yocto-vars.py new file mode 100644 index 000000000..568947299 --- /dev/null +++ b/poky/documentation/sphinx/yocto-vars.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +import re +import yaml + +import sphinx +from sphinx.application import Sphinx + +__version__ = '1.0' + +# Variables substitutions. Uses {VAR} subst using variables defined in poky.yaml +# Each .rst file is processed after source-read event (subst_vars_replace runs once per file) +subst_vars = {} + +def subst_vars_replace(app: Sphinx, docname, source): + result = source[0] + for k in subst_vars: + result = result.replace("&"+k+";", subst_vars[k]) + source[0] = result + +PATTERN = re.compile(r'&(.*?);') +def expand(val, src): + return PATTERN.sub(lambda m: expand(src.get(m.group(1), ''), src), val) + +def setup(app: Sphinx): + #FIXME: if poky.yaml changes, files are not reprocessed. + with open("poky.yaml") as file: + subst_vars.update(yaml.load(file, Loader=yaml.FullLoader)) + + for k in subst_vars: + subst_vars[k] = expand(subst_vars[k], subst_vars) + + app.connect('source-read', subst_vars_replace) + + return dict( + version = __version__, + parallel_read_safe = True, + parallel_write_safe = True + ) diff --git a/poky/documentation/test-manual/history.rst b/poky/documentation/test-manual/history.rst new file mode 100644 index 000000000..76d43091a --- /dev/null +++ b/poky/documentation/test-manual/history.rst @@ -0,0 +1,16 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 3.2 + - October 2020 + - The initial document released with the Yocto Project 3.2 Release diff --git a/poky/documentation/test-manual/test-manual-intro.rst b/poky/documentation/test-manual/test-manual-intro.rst new file mode 100644 index 000000000..53ad650b3 --- /dev/null +++ b/poky/documentation/test-manual/test-manual-intro.rst @@ -0,0 +1,550 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +***************************************** +The Yocto Project Test Environment Manual +***************************************** + +.. _test-welcome: + +Welcome +======= + +Welcome to the Yocto Project Test Environment Manual! This manual is a +work in progress. The manual contains information about the testing +environment used by the Yocto Project to make sure each major and minor +release works as intended. All the project's testing infrastructure and +processes are publicly visible and available so that the community can +see what testing is being performed, how it's being done and the current +status of the tests and the project at any given time. It is intended +that Other organizations can leverage off the process and testing +environment used by the Yocto Project to create their own automated, +production test environment, building upon the foundations from the +project core. + +Currently, the Yocto Project Test Environment Manual has no projected +release date. This manual is a work-in-progress and is being initially +loaded with information from the README files and notes from key +engineers: + +- *yocto-autobuilder2:* This + :yocto_git:`README.md ` + is the main README which detials how to set up the Yocto Project + Autobuilder. The ``yocto-autobuilder2`` repository represents the + Yocto Project's console UI plugin to Buildbot and the configuration + necessary to configure Buildbot to perform the testing the project + requires. + +- *yocto-autobuilder-helper:* This :yocto_git:`README ` + and repository contains Yocto Project Autobuilder Helper scripts and + configuration. The ``yocto-autobuilder-helper`` repository contains + the "glue" logic that defines which tests to run and how to run them. + As a result, it can be used by any Continuous Improvement (CI) system + to run builds, support getting the correct code revisions, configure + builds and layers, run builds, and collect results. The code is + independent of any CI system, which means the code can work `Buildbot `__, + Jenkins, or others. This repository has a branch per release of the + project defining the tests to run on a per release basis. + +.. _test-yocto-project-autobuilder-overview: + +Yocto Project Autobuilder Overview +================================== + +The Yocto Project Autobuilder collectively refers to the software, +tools, scripts, and procedures used by the Yocto Project to test +released software across supported hardware in an automated and regular +fashion. Basically, during the development of a Yocto Project release, +the Autobuilder tests if things work. The Autobuilder builds all test +targets and runs all the tests. + +The Yocto Project uses now uses standard upstream +`Buildbot `__ (version 9) to +drive its integration and testing. Buildbot Nine has a plug-in interface +that the Yocto Project customizes using code from the +``yocto-autobuilder2`` repository, adding its own console UI plugin. The +resulting UI plug-in allows you to visualize builds in a way suited to +the project's needs. + +A ``helper`` layer provides configuration and job management through +scripts found in the ``yocto-autobuilder-helper`` repository. The +``helper`` layer contains the bulk of the build configuration +information and is release-specific, which makes it highly customizable +on a per-project basis. The layer is CI system-agnostic and contains a +number of Helper scripts that can generate build configurations from +simple JSON files. + +.. note:: + + The project uses Buildbot for historical reasons but also because + many of the project developers have knowledge of python. It is + possible to use the outer layers from another Continuous Integration + (CI) system such as + `Jenkins `__ + instead of Buildbot. + +The following figure shows the Yocto Project Autobuilder stack with a +topology that includes a controller and a cluster of workers: + +.. image:: figures/ab-test-cluster.png + :align: center + +.. _test-project-tests: + +Yocto Project Tests - Types of Testing Overview +=============================================== + +The Autobuilder tests different elements of the project by using +thefollowing types of tests: + +- *Build Testing:* Tests whether specific configurations build by + varying :term:`MACHINE`, + :term:`DISTRO`, other configuration + options, and the specific target images being built (or world). Used + to trigger builds of all the different test configurations on the + Autobuilder. Builds usually cover many different targets for + different architectures, machines, and distributions, as well as + different configurations, such as different init systems. The + Autobuilder tests literally hundreds of configurations and targets. + + - *Sanity Checks During the Build Process:* Tests initiated through + the :ref:`insane ` + class. These checks ensure the output of the builds are correct. + For example, does the ELF architecture in the generated binaries + match the target system? ARM binaries would not work in a MIPS + system! + +- *Build Performance Testing:* Tests whether or not commonly used steps + during builds work efficiently and avoid regressions. Tests to time + commonly used usage scenarios are run through ``oe-build-perf-test``. + These tests are run on isolated machines so that the time + measurements of the tests are accurate and no other processes + interfere with the timing results. The project currently tests + performance on two different distributions, Fedora and Ubuntu, to + ensure we have no single point of failure and can ensure the + different distros work effectively. + +- *eSDK Testing:* Image tests initiated through the following command:: + + $ bitbake image -c testsdkext + + The tests utilize the ``testsdkext`` class and the ``do_testsdkext`` task. + +- *Feature Testing:* Various scenario-based tests are run through the + :ref:`OpenEmbedded Self test (oe-selftest) `. We test oe-selftest on each of the main distrubutions + we support. + +- *Image Testing:* Image tests initiated through the following command:: + + $ bitbake image -c testimage + + The tests utilize the :ref:`testimage* ` + classes and the :ref:`ref-tasks-testimage` task. + +- *Layer Testing:* The Autobuilder has the possibility to test whether + specific layers work with the test of the system. The layers tested + may be selected by members of the project. Some key community layers + are also tested periodically. + +- *Package Testing:* A Package Test (ptest) runs tests against packages + built by the OpenEmbedded build system on the target machine. See the + :ref:`Testing Packages With + ptest ` section + in the Yocto Project Development Tasks Manual and the + ":yocto_wiki:`Ptest `" Wiki page for more + information on Ptest. + +- *SDK Testing:* Image tests initiated through the following command:: + + $ bitbake image -c testsdk + + The tests utilize the :ref:`testsdk ` class and + the ``do_testsdk`` task. + +- *Unit Testing:* Unit tests on various components of the system run + through :ref:`bitbake-selftest ` and + :ref:`oe-selftest `. + +- *Automatic Upgrade Helper:* This target tests whether new versions of + software are available and whether we can automatically upgrade to + those new versions. If so, this target emails the maintainers with a + patch to let them know this is possible. + +.. _test-test-mapping: + +How Tests Map to Areas of Code +============================== + +Tests map into the codebase as follows: + +- *bitbake-selftest:* + + These tests are self-contained and test BitBake as well as its APIs, + which include the fetchers. The tests are located in + ``bitbake/lib/*/tests``. + + From within the BitBake repository, run the following:: + + $ bitbake-selftest + + To skip tests that access the Internet, use the ``BB_SKIP_NETTEST`` + variable when running "bitbake-selftest" as follows:: + + $ BB_SKIP_NETTEST=yes bitbake-selftest + + The default output is quiet and just prints a summary of what was + run. To see more information, there is a verbose option:: + + $ bitbake-selftest -v + + Use this option when you wish to skip tests that access the network, + which are mostly necessary to test the fetcher modules. To specify + individual test modules to run, append the test module name to the + "bitbake-selftest" command. For example, to specify the tests for the + bb.data.module, run:: + + $ bitbake-selftest bb.test.data.module + + You can also specify individual tests by defining the full name and module + plus the class path of the test, for example:: + + $ bitbake-selftest bb.tests.data.TestOverrides.test_one_override + + The tests are based on `Python + unittest `__. + +- *oe-selftest:* + + - These tests use OE to test the workflows, which include testing + specific features, behaviors of tasks, and API unit tests. + + - The tests can take advantage of parallelism through the "-j" + option, which can specify a number of threads to spread the tests + across. Note that all tests from a given class of tests will run + in the same thread. To parallelize large numbers of tests you can + split the class into multiple units. + + - The tests are based on Python unittest. + + - The code for the tests resides in + ``meta/lib/oeqa/selftest/cases/``. + + - To run all the tests, enter the following command:: + + $ oe-selftest -a + + - To run a specific test, use the following command form where + testname is the name of the specific test:: + + $ oe-selftest -r + + For example, the following command would run the tinfoil + getVar API test:: + + $ oe-selftest -r tinfoil.TinfoilTests.test_getvar + + It is also possible to run a set + of tests. For example the following command will run all of the + tinfoil tests:: + + $ oe-selftest -r tinfoil + +- *testimage:* + + - These tests build an image, boot it, and run tests against the + image's content. + + - The code for these tests resides in ``meta/lib/oeqa/runtime/cases/``. + + - You need to set the :term:`IMAGE_CLASSES` variable as follows:: + + IMAGE_CLASSES += "testimage" + + - Run the tests using the following command form:: + + $ bitbake image -c testimage + +- *testsdk:* + + - These tests build an SDK, install it, and then run tests against + that SDK. + + - The code for these tests resides in ``meta/lib/oeqa/sdk/cases/``. + + - Run the test using the following command form:: + + $ bitbake image -c testsdk + +- *testsdk_ext:* + + - These tests build an extended SDK (eSDK), install that eSDK, and + run tests against the eSDK. + + - The code for these tests resides in ``meta/lib/oeqa/esdk``. + + - To run the tests, use the following command form:: + + $ bitbake image -c testsdkext + +- *oe-build-perf-test:* + + - These tests run through commonly used usage scenarios and measure + the performance times. + + - The code for these tests resides in ``meta/lib/oeqa/buildperf``. + + - To run the tests, use the following command form:: + + $ oe-build-perf-test + + The command takes a number of options, + such as where to place the test results. The Autobuilder Helper + Scripts include the ``build-perf-test-wrapper`` script with + examples of how to use the oe-build-perf-test from the command + line. + + Use the ``oe-git-archive`` command to store test results into a + Git repository. + + Use the ``oe-build-perf-report`` command to generate text reports + and HTML reports with graphs of the performance data. For + examples, see + :yocto_dl:`/releases/yocto/yocto-2.7/testresults/buildperf-centos7/perf-centos7.yoctoproject.org_warrior_20190414204758_0e39202.html` + and + :yocto_dl:`/releases/yocto/yocto-2.7/testresults/buildperf-centos7/perf-centos7.yoctoproject.org_warrior_20190414204758_0e39202.txt`. + + - The tests are contained in ``lib/oeqa/buildperf/test_basic.py``. + +Test Examples +============= + +This section provides example tests for each of the tests listed in the +:ref:`test-manual/test-manual-intro:How Tests Map to Areas of Code` section. + +For oeqa tests, testcases for each area reside in the main test +directory at ``meta/lib/oeqa/selftest/cases`` directory. + +For oe-selftest. bitbake testcases reside in the ``lib/bb/tests/`` +directory. + +.. _bitbake-selftest-example: + +``bitbake-selftest`` +-------------------- + +A simple test example from ``lib/bb/tests/data.py`` is:: + + class DataExpansions(unittest.TestCase): + def setUp(self): + self.d = bb.data.init() + self.d["foo"] = "value_of_foo" + self.d["bar"] = "value_of_bar" + self.d["value_of_foo"] = "value_of_'value_of_foo'" + + def test_one_var(self): + val = self.d.expand("${foo}") + self.assertEqual(str(val), "value_of_foo") + +In this example, a ``DataExpansions`` class of tests is created, +derived from standard python unittest. The class has a common ``setUp`` +function which is shared by all the tests in the class. A simple test is +then added to test that when a variable is expanded, the correct value +is found. + +Bitbake selftests are straightforward python unittest. Refer to the +Python unittest documentation for additional information on writing +these tests at: https://docs.python.org/3/library/unittest.html. + +.. _oe-selftest-example: + +``oe-selftest`` +--------------- + +These tests are more complex due to the setup required behind the scenes +for full builds. Rather than directly using Python's unittest, the code +wraps most of the standard objects. The tests can be simple, such as +testing a command from within the OE build environment using the +following example:: + + class BitbakeLayers(OESelftestTestCase): + def test_bitbakelayers_showcrossdepends(self): + result = runCmd('bitbake-layers show-cross-depends') + self.assertTrue('aspell' in result.output, msg = "No dependencies were shown. bitbake-layers show-cross-depends output: %s"% result.output) + +This example, taken from ``meta/lib/oeqa/selftest/cases/bblayers.py``, +creates a testcase from the ``OESelftestTestCase`` class, derived +from ``unittest.TestCase``, which runs the ``bitbake-layers`` command +and checks the output to ensure it contains something we know should be +here. + +The ``oeqa.utils.commands`` module contains Helpers which can assist +with common tasks, including: + +- *Obtaining the value of a bitbake variable:* Use + ``oeqa.utils.commands.get_bb_var()`` or use + ``oeqa.utils.commands.get_bb_vars()`` for more than one variable + +- *Running a bitbake invocation for a build:* Use + ``oeqa.utils.commands.bitbake()`` + +- *Running a command:* Use ``oeqa.utils.commandsrunCmd()`` + +There is also a ``oeqa.utils.commands.runqemu()`` function for launching +the ``runqemu`` command for testing things within a running, virtualized +image. + +You can run these tests in parallel. Parallelism works per test class, +so tests within a given test class should always run in the same build, +while tests in different classes or modules may be split into different +builds. There is no data store available for these tests since the tests +launch the ``bitbake`` command and exist outside of its context. As a +result, common bitbake library functions (bb.\*) are also unavailable. + +.. _testimage-example: + +``testimage`` +------------- + +These tests are run once an image is up and running, either on target +hardware or under QEMU. As a result, they are assumed to be running in a +target image environment, as opposed to a host build environment. A +simple example from ``meta/lib/oeqa/runtime/cases/python.py`` contains +the following:: + + class PythonTest(OERuntimeTestCase): + @OETestDepends(['ssh.SSHTest.test_ssh']) + @OEHasPackage(['python3-core']) + def test_python3(self): + cmd = "python3 -c \\"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\"" + status, output = self.target.run(cmd) + msg = 'Exit status was not 0. Output: %s' % output + self.assertEqual(status, 0, msg=msg) + +In this example, the ``OERuntimeTestCase`` class wraps +``unittest.TestCase``. Within the test, ``self.target`` represents the +target system, where commands can be run on it using the ``run()`` +method. + +To ensure certain test or package dependencies are met, you can use the +``OETestDepends`` and ``OEHasPackage`` decorators. For example, the test +in this example would only make sense if python3-core is installed in +the image. + +.. _testsdk_ext-example: + +``testsdk_ext`` +--------------- + +These tests are run against built extensible SDKs (eSDKs). The tests can +assume that the eSDK environment has already been setup. An example from +``meta/lib/oeqa/sdk/cases/devtool.py`` contains the following:: + + class DevtoolTest(OESDKExtTestCase): + @classmethod def setUpClass(cls): + myapp_src = os.path.join(cls.tc.esdk_files_dir, "myapp") + cls.myapp_dst = os.path.join(cls.tc.sdk_dir, "myapp") + shutil.copytree(myapp_src, cls.myapp_dst) + subprocess.check_output(['git', 'init', '.'], cwd=cls.myapp_dst) + subprocess.check_output(['git', 'add', '.'], cwd=cls.myapp_dst) + subprocess.check_output(['git', 'commit', '-m', "'test commit'"], cwd=cls.myapp_dst) + + @classmethod + def tearDownClass(cls): + shutil.rmtree(cls.myapp_dst) + def _test_devtool_build(self, directory): + self._run('devtool add myapp %s' % directory) + try: + self._run('devtool build myapp') + finally: + self._run('devtool reset myapp') + def test_devtool_build_make(self): + self._test_devtool_build(self.myapp_dst) + +In this example, the ``devtool`` +command is tested to see whether a sample application can be built with +the ``devtool build`` command within the eSDK. + +.. _testsdk-example: + +``testsdk`` +----------- + +These tests are run against built SDKs. The tests can assume that an SDK +has already been extracted and its environment file has been sourced. A +simple example from ``meta/lib/oeqa/sdk/cases/python2.py`` contains the +following:: + + class Python3Test(OESDKTestCase): + def setUp(self): + if not (self.tc.hasHostPackage("nativesdk-python3-core") or + self.tc.hasHostPackage("python3-core-native")): + raise unittest.SkipTest("No python3 package in the SDK") + + def test_python3(self): + cmd = "python3 -c \\"import codecs; print(codecs.encode('Uryyb, jbeyq', 'rot13'))\"" + output = self._run(cmd) + self.assertEqual(output, "Hello, world\n") + +In this example, if nativesdk-python3-core has been installed into the SDK, the code runs +the python3 interpreter with a basic command to check it is working +correctly. The test would only run if python3 is installed in the SDK. + +.. _oe-build-perf-test-example: + +``oe-build-perf-test`` +---------------------- + +The performance tests usually measure how long operations take and the +resource utilisation as that happens. An example from +``meta/lib/oeqa/buildperf/test_basic.py`` contains the following:: + + class Test3(BuildPerfTestCase): + def test3(self): + """Bitbake parsing (bitbake -p)""" + # Drop all caches and parse + self.rm_cache() + oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True) + self.measure_cmd_resources(['bitbake', '-p'], 'parse_1', + 'bitbake -p (no caches)') + # Drop tmp/cache + oe.path.remove(os.path.join(self.bb_vars['TMPDIR'], 'cache'), True) + self.measure_cmd_resources(['bitbake', '-p'], 'parse_2', + 'bitbake -p (no tmp/cache)') + # Parse with fully cached data + self.measure_cmd_resources(['bitbake', '-p'], 'parse_3', + 'bitbake -p (cached)') + +This example shows how three specific parsing timings are +measured, with and without various caches, to show how BitBake's parsing +performance trends over time. + +.. _test-writing-considerations: + +Considerations When Writing Tests +================================= + +When writing good tests, there are several things to keep in mind. Since +things running on the Autobuilder are accessed concurrently by multiple +workers, consider the following: + +**Running "cleanall" is not permitted.** + +This can delete files from DL_DIR which would potentially break other +builds running in parallel. If this is required, DL_DIR must be set to +an isolated directory. + +**Running "cleansstate" is not permitted.** + +This can delete files from SSTATE_DIR which would potentially break +other builds running in parallel. If this is required, SSTATE_DIR must +be set to an isolated directory. Alternatively, you can use the "-f" +option with the ``bitbake`` command to "taint" tasks by changing the +sstate checksums to ensure sstate cache items will not be reused. + +**Tests should not change the metadata.** + +This is particularly true for oe-selftests since these can run in +parallel and changing metadata leads to changing checksums, which +confuses BitBake while running in parallel. If this is necessary, copy +layers to a temporary location and modify them. Some tests need to +change metadata, such as the devtool tests. To prevent the metadate from +changes, set up temporary copies of that data first. diff --git a/poky/documentation/test-manual/test-manual-intro.xml b/poky/documentation/test-manual/test-manual-intro.xml index 8e2c7cd87..0cdbee4d8 100644 --- a/poky/documentation/test-manual/test-manual-intro.xml +++ b/poky/documentation/test-manual/test-manual-intro.xml @@ -12,8 +12,8 @@ Welcome to the Yocto Project Test Environment Manual! This manual is a work in progress. The manual contains information about the testing environment used by the Yocto Project to make sure each major and minor release works as intended. All the - project’s testing infrastructure and processes are publicly visible and available so - that the community can see what testing is being performed, how it’s being done and the + project's testing infrastructure and processes are publicly visible and available so + that the community can see what testing is being performed, how it's being done and the current status of the tests and the project at any given time. It is intended that Other organizations can leverage off the process and testing environment used by the Yocto Project to create their own automated, production test environment, building upon the @@ -579,7 +579,7 @@ 'bitbake -p (cached)') This example shows how three specific parsing timings are measured, with and without - various caches, to show how BitBake’s parsing performance trends over time. + various caches, to show how BitBake's parsing performance trends over time.
    diff --git a/poky/documentation/test-manual/test-manual-test-process.rst b/poky/documentation/test-manual/test-manual-test-process.rst new file mode 100644 index 000000000..96e71bf31 --- /dev/null +++ b/poky/documentation/test-manual/test-manual-test-process.rst @@ -0,0 +1,103 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************************** +Project Testing and Release Process +*********************************** + +.. _test-daily-devel: + +Day to Day Development +====================== + +This section details how the project tests changes, through automation +on the Autobuilder or with the assistance of QA teams, through to making +releases. + +The project aims to test changes against our test matrix before those +changes are merged into the master branch. As such, changes are queued +up in batches either in the ``master-next`` branch in the main trees, or +in user trees such as ``ross/mut`` in ``poky-contrib`` (Ross Burton +helps review and test patches and this is his testing tree). + +We have two broad categories of test builds, including "full" and +"quick". On the Autobuilder, these can be seen as "a-quick" and +"a-full", simply for ease of sorting in the UI. Use our Autobuilder +console view to see where me manage most test-related items, available +at: :yocto_ab:`/typhoon/#/console`. + +Builds are triggered manually when the test branches are ready. The +builds are monitored by the SWAT team. For additional information, see +:yocto_wiki:`/wiki/Yocto_Build_Failure_Swat_Team`. +If successful, the changes would usually be merged to the ``master`` +branch. If not successful, someone would respond to the changes on the +mailing list explaining that there was a failure in testing. The choice +of quick or full would depend on the type of changes and the speed with +which the result was required. + +The Autobuilder does build the ``master`` branch once daily for several +reasons, in particular, to ensure the current ``master`` branch does +build, but also to keep ``yocto-testresults`` +(:yocto_git:`/cgit.cgi/yocto-testresults/`), +buildhistory +(:yocto_git:`/cgit.cgi/poky-buildhistory/`), and +our sstate up to date. On the weekend, there is a master-next build +instead to ensure the test results are updated for the less frequently +run targets. + +Performance builds (buildperf-\* targets in the console) are triggered +separately every six hours and automatically push their results to the +buildstats repository at: +:yocto_git:`/cgit.cgi/yocto-buildstats/`. + +The 'quick' targets have been selected to be the ones which catch the +most failures or give the most valuable data. We run 'fast' ptests in +this case for example but not the ones which take a long time. The quick +target doesn't include \*-lsb builds for all architectures, some world +builds and doesn't trigger performance tests or ltp testing. The full +build includes all these things and is slower but more comprehensive. + +Release Builds +============== + +The project typically has two major releases a year with a six month +cadence in April and October. Between these there would be a number of +milestone releases (usually four) with the final one being stablization +only along with point releases of our stable branches. + +The build and release process for these project releases is similar to +that in `Day to Day Development <#test-daily-devel>`__, in that the +a-full target of the Autobuilder is used but in addition the form is +configured to generate and publish artefacts and the milestone number, +version, release candidate number and other information is entered. The +box to "generate an email to QA"is also checked. + +When the build completes, an email is sent out using the send-qa-email +script in the ``yocto-autobuilder-helper`` repository to the list of +people configured for that release. Release builds are placed into a +directory in https://autobuilder.yocto.io/pub/releases on the +Autobuilder which is included in the email. The process from here is +more manual and control is effectively passed to release engineering. +The next steps include: + +- QA teams respond to the email saying which tests they plan to run and + when the results will be available. + +- QA teams run their tests and share their results in the yocto- + testresults-contrib repository, along with a summary of their + findings. + +- Release engineering prepare the release as per their process. + +- Test results from the QA teams are included into the release in + separate directories and also uploaded to the yocto-testresults + repository alongside the other test results for the given revision. + +- The QA report in the final release is regenerated using resulttool to + include the new test results and the test summaries from the teams + (as headers to the generated report). + +- The release is checked against the release checklist and release + readiness criteria. + +- A final decision on whether to release is made by the YP TSC who have + final oversight on release readiness. diff --git a/poky/documentation/test-manual/test-manual-understand-autobuilder.rst b/poky/documentation/test-manual/test-manual-understand-autobuilder.rst new file mode 100644 index 000000000..2fcae5000 --- /dev/null +++ b/poky/documentation/test-manual/test-manual-understand-autobuilder.rst @@ -0,0 +1,305 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +******************************************* +Understanding the Yocto Project Autobuilder +******************************************* + +Execution Flow within the Autobuilder +===================================== + +The "a-full" and "a-quick" targets are the usual entry points into the +Autobuilder and it makes sense to follow the process through the system +starting there. This is best visualised from the Autobuilder Console +view (:yocto_ab:`/typhoon/#/console`). + +Each item along the top of that view represents some "target build" and +these targets are all run in parallel. The 'full' build will trigger the +majority of them, the "quick" build will trigger some subset of them. +The Autobuilder effectively runs whichever configuration is defined for +each of those targets on a seperate buildbot worker. To understand the +configuration, you need to look at the entry on ``config.json`` file +within the ``yocto-autobuilder-helper`` repository. The targets are +defined in the ‘overrides' section, a quick example could be qemux86-64 +which looks like:: + + "qemux86-64" : { + "MACHINE" : "qemux86-64", + "TEMPLATE" : "arch-qemu", + "step1" : { + "extravars" : [ + "IMAGE_FSTYPES_append = ' wic wic.bmap'" + ] + } + }, + +And to expand that, you need the "arch-qemu" entry from +the "templates" section, which looks like:: + + "arch-qemu" : { + "BUILDINFO" : true, + "BUILDHISTORY" : true, + "step1" : { + "BBTARGETS" : "core-image-sato core-image-sato-dev core-image-sato-sdk core-image-minimal core-image-minimal-dev core-image-sato:do_populate_sdk", + "SANITYTARGETS" : "core-image-minimal:do_testimage core-image-sato:do_testimage core-image-sato-sdk:do_testimage core-image-sato:do_testsdk" + }, + "step2" : { + "SDKMACHINE" : "x86_64", + "BBTARGETS" : "core-image-sato:do_populate_sdk core-image-minimal:do_populate_sdk_ext core-image-sato:do_populate_sdk_ext", + "SANITYTARGETS" : "core-image-sato:do_testsdk core-image-minimal:do_testsdkext core-image-sato:do_testsdkext" + }, + "step3" : { + "BUILDHISTORY" : false, + "EXTRACMDS" : ["${SCRIPTSDIR}/checkvnc; DISPLAY=:1 oe-selftest ${HELPERSTMACHTARGS} -j 15"], + "ADDLAYER" : ["${BUILDDIR}/../meta-selftest"] + } + }, + +Combining these two entries you can see that "qemux86-64" is a three step build where the +``bitbake BBTARGETS`` would be run, then ``bitbake SANITYTARGETS`` for each step; all for +``MACHINE="qemx86-64"`` but with differing SDKMACHINE settings. In step +1 an extra variable is added to the ``auto.conf`` file to enable wic +image generation. + +While not every detail of this is covered here, you can see how the +template mechanism allows quite complex configurations to be built up +yet allows duplication and repetition to be kept to a minimum. + +The different build targets are designed to allow for parallelisation, +so different machines are usually built in parallel, operations using +the same machine and metadata are built sequentially, with the aim of +trying to optimise build efficiency as much as possible. + +The ``config.json`` file is processed by the scripts in the Helper +repository in the ``scripts`` directory. The following section details +how this works. + +.. _test-autobuilder-target-exec-overview: + +Autobuilder Target Execution Overview +===================================== + +For each given target in a build, the Autobuilder executes several +steps. These are configured in ``yocto-autobuilder2/builders.py`` and +roughly consist of: + +#. *Run clobberdir*. + + This cleans out any previous build. Old builds are left around to + allow easier debugging of failed builds. For additional information, + see :ref:`test-manual/test-manual-understand-autobuilder:clobberdir`. + +#. *Obtain yocto-autobuilder-helper* + + This step clones the ``yocto-autobuilder-helper`` git repository. + This is necessary to prevent the requirement to maintain all the + release or project-specific code within Buildbot. The branch chosen + matches the release being built so we can support older releases and + still make changes in newer ones. + +#. *Write layerinfo.json* + + This transfers data in the Buildbot UI when the build was configured + to the Helper. + +#. *Call scripts/shared-repo-unpack* + + This is a call into the Helper scripts to set up a checkout of all + the pieces this build might need. It might clone the BitBake + repository and the OpenEmbedded-Core repository. It may clone the + Poky repository, as well as additional layers. It will use the data + from the ``layerinfo.json`` file to help understand the + configuration. It will also use a local cache of repositories to + speed up the clone checkouts. For additional information, see + :ref:`test-manual/test-manual-understand-autobuilder:Autobuilder Clone Cache`. + + This step has two possible modes of operation. If the build is part + of a parent build, its possible that all the repositories needed may + already be available, ready in a pre-prepared directory. An "a-quick" + or "a-full" build would prepare this before starting the other + sub-target builds. This is done for two reasons: + + - the upstream may change during a build, for example, from a forced + push and this ensures we have matching content for the whole build + + - if 15 Workers all tried to pull the same data from the same repos, + we can hit resource limits on upstream servers as they can think + they are under some kind of network attack + + This pre-prepared directory is shared among the Workers over NFS. If + the build is an individual build and there is no "shared" directory + available, it would clone from the cache and the upstreams as + necessary. This is considered the fallback mode. + +#. *Call scripts/run-config* + + This is another call into the Helper scripts where its expected that + the main functionality of this target will be executed. + +.. _test-autobuilder-tech: + +Autobuilder Technology +====================== + +The Autobuilder has Yocto Project-specific functionality to allow builds +to operate with increased efficiency and speed. + +.. _test-clobberdir: + +clobberdir +---------- + +When deleting files, the Autobuilder uses ``clobberdir``, which is a +special script that moves files to a special location, rather than +deleting them. Files in this location are deleted by an ``rm`` command, +which is run under ``ionice -c 3``. For example, the deletion only +happens when there is idle IO capacity on the Worker. The Autobuilder +Worker Janitor runs this deletion. See :ref:`test-manual/test-manual-understand-autobuilder:Autobuilder Worker Janitor`. + +.. _test-autobuilder-clone-cache: + +Autobuilder Clone Cache +----------------------- + +Cloning repositories from scratch each time they are required was slow +on the Autobuilder. We therefore have a stash of commonly used +repositories pre-cloned on the Workers. Data is fetched from these +during clones first, then "topped up" with later revisions from any +upstream when necesary. The cache is maintained by the Autobuilder +Worker Janitor. See :ref:`test-manual/test-manual-understand-autobuilder:Autobuilder Worker Janitor`. + +.. _test-autobuilder-worker-janitor: + +Autobuilder Worker Janitor +-------------------------- + +This is a process running on each Worker that performs two basic +operations, including background file deletion at IO idle (see :ref:`test-manual/test-manual-understand-autobuilder:Autobuilder Target Execution Overview`: Run clobberdir) and +maintainenance of a cache of cloned repositories to improve the speed +the system can checkout repositories. + +.. _test-shared-dl-dir: + +Shared DL_DIR +------------- + +The Workers are all connected over NFS which allows DL_DIR to be shared +between them. This reduces network accesses from the system and allows +the build to be sped up. Usage of the directory within the build system +is designed to be able to be shared over NFS. + +.. _test-shared-sstate-cache: + +Shared SSTATE_DIR +----------------- + +The Workers are all connected over NFS which allows the ``sstate`` +directory to be shared between them. This means once a Worker has built +an artifact, all the others can benefit from it. Usage of the directory +within the directory is designed for sharing over NFS. + +.. _test-resulttool: + +Resulttool +---------- + +All of the different tests run as part of the build generate output into +``testresults.json`` files. This allows us to determine which tests ran +in a given build and their status. Additional information, such as +failure logs or the time taken to run the tests, may also be included. + +Resulttool is part of OpenEmbedded-Core and is used to manipulate these +json results files. It has the ability to merge files together, display +reports of the test results and compare different result files. + +For details, see :yocto_wiki:`/wiki/Resulttool`. + +.. _test-run-config-tgt-execution: + +run-config Target Execution +=========================== + +The ``scripts/run-config`` execution is where most of the work within +the Autobuilder happens. It runs through a number of steps; the first +are general setup steps that are run once and include: + +#. Set up any ``buildtools-tarball`` if configured. + +#. Call "buildhistory-init" if buildhistory is configured. + +For each step that is configured in ``config.json``, it will perform the +following: + +#. Add any layers that are specified using the + ``bitbake-layers add-layer`` command (logging as stepXa) + +#. Call the ``scripts/setup-config`` script to generate the necessary + ``auto.conf`` configuration file for the build + +#. Run the ``bitbake BBTARGETS`` command (logging as stepXb) + +#. Run the ``bitbake SANITYTARGETS`` command (logging as stepXc) + +#. Run the ``EXTRACMDS`` command, which are run within the BitBake build + environment (logging as stepXd) + +#. Run the ``EXTRAPLAINCMDS`` command(s), which are run outside the + BitBake build environment (logging as stepXd) + +#. Remove any layers added in step + 1 using the ``bitbake-layers remove-layer`` command (logging as stepXa) + +Once the execution steps above complete, ``run-config`` executes a set +of post-build steps, including: + +#. Call ``scripts/publish-artifacts`` to collect any output which is to + be saved from the build. + +#. Call ``scripts/collect-results`` to collect any test results to be + saved from the build. + +#. Call ``scripts/upload-error-reports`` to send any error reports + generated to the remote server. + +#. Cleanup the build directory using + :ref:`test-manual/test-manual-understand-autobuilder:clobberdir` if the build was successful, + else rename it to "build-renamed" for potential future debugging. + +.. _test-deploying-yp-autobuilder: + +Deploying Yocto Autobuilder +=========================== + +The most up to date information about how to setup and deploy your own +Autbuilder can be found in README.md in the ``yocto-autobuilder2`` +repository. + +We hope that people can use the ``yocto-autobuilder2`` code directly but +it is inevitable that users will end up needing to heavily customise the +``yocto-autobuilder-helper`` repository, particularly the +``config.json`` file as they will want to define their own test matrix. + +The Autobuilder supports wo customization options: + +- variable substitution + +- overlaying configuration files + +The standard ``config.json`` minimally attempts to allow substitution of +the paths. The Helper script repository includes a +``local-example.json`` file to show how you could override these from a +separate configuration file. Pass the following into the environment of +the Autobuilder:: + + $ ABHELPER_JSON="config.json local-example.json" + +As another example, you could also pass the following into the +environment:: + + $ ABHELPER_JSON="config.json /some/location/local.json" + +One issue users often run into is validation of the ``config.json`` files. A +tip for minimizing issues from invalid json files is to use a Git +``pre-commit-hook.sh`` script to verify the JSON file before committing +it. Create a symbolic link as follows:: + + $ ln -s ../../scripts/pre-commit-hook.sh .git/hooks/pre-commit diff --git a/poky/documentation/test-manual/test-manual-understand-autobuilder.xml b/poky/documentation/test-manual/test-manual-understand-autobuilder.xml index a04006605..8600367be 100644 --- a/poky/documentation/test-manual/test-manual-understand-autobuilder.xml +++ b/poky/documentation/test-manual/test-manual-understand-autobuilder.xml @@ -8,18 +8,18 @@ Understanding the Yocto Project Autobuilder
    Execution Flow within the Autobuilder - The “a-full” and “a-quick” targets are the usual entry points into the Autobuilder and + The "a-full" and "a-quick" targets are the usual entry points into the Autobuilder and it makes sense to follow the process through the system starting there. This is best visualised from the Autobuilder Console view (https://autobuilder.yoctoproject.org/typhoon/#/console). - Each item along the top of that view represents some “target build” and these targets - are all run in parallel. The ‘full’ build will trigger the majority of them, the “quick” + Each item along the top of that view represents some "target build" and these targets + are all run in parallel. The 'full' build will trigger the majority of them, the "quick" build will trigger some subset of them. The Autobuilder effectively runs whichever configuration is defined for each of those targets on a seperate buildbot worker. To understand the configuration, you need to look at the entry on config.json file within the yocto-autobuilder-helper repository. The targets are defined in - the ‘overrides’ section, a quick example could be qemux86-64 which looks + the ‘overrides' section, a quick example could be qemux86-64 which looks like: "qemux86-64" : { "MACHINE" : "qemux86-64", @@ -31,7 +31,7 @@ } }, And - to expand that, you need the “arch-qemu” entry from the “templates” section, which looks + to expand that, you need the "arch-qemu" entry from the "templates" section, which looks like: "arch-qemu" : { "BUILDINFO" : true, @@ -52,10 +52,10 @@ } }, Combining - these two entries you can see that “qemux86-64” is a three step build where the + these two entries you can see that "qemux86-64" is a three step build where the bitbake BBTARGETS would be run, then bitbake SANITYTARGETS for each step; all for - MACHINE=”qemx86-64” but with differing SDKMACHINE settings. In + MACHINE="qemx86-64" but with differing SDKMACHINE settings. In step 1 an extra variable is added to the auto.conf file to enable wic image generation. While not every detail of this is covered here, you can see how the templating @@ -260,7 +260,7 @@ Cleanup the build directory using clobberdir if the - build was successful, else rename it to “build-renamed” for potential future + build was successful, else rename it to "build-renamed" for potential future debugging. diff --git a/poky/documentation/test-manual/test-manual.rst b/poky/documentation/test-manual/test-manual.rst new file mode 100644 index 000000000..bd5b1b096 --- /dev/null +++ b/poky/documentation/test-manual/test-manual.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +===================================== +Yocto Project Test Environment Manual +===================================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + test-manual-intro + test-manual-test-process + test-manual-understand-autobuilder + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/toaster-manual/history.rst b/poky/documentation/toaster-manual/history.rst new file mode 100644 index 000000000..027b343d0 --- /dev/null +++ b/poky/documentation/toaster-manual/history.rst @@ -0,0 +1,46 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +*********************** +Manual Revision History +*********************** + +.. list-table:: + :widths: 10 15 40 + :header-rows: 1 + + * - Revision + - Date + - Note + * - 1.8 + - April 2015 + - The initial document released with the Yocto Project 1.8 Release + * - 2.0 + - October 2015 + - Released with the Yocto Project 2.0 Release. + * - 2.1 + - April 2016 + - Released with the Yocto Project 2.1 Release. + * - 2.2 + - October 2016 + - Released with the Yocto Project 2.2 Release. + * - 2.3 + - May 2017 + - Released with the Yocto Project 2.3 Release. + * - 2.4 + - October 2017 + - Released with the Yocto Project 2.4 Release. + * - 2.5 + - May 2018 + - Released with the Yocto Project 2.5 Release. + * - 2.6 + - November 2018 + - Released with the Yocto Project 2.6 Release. + * - 2.7 + - May 2019 + - Released with the Yocto Project 2.7 Release. + * - 3.0 + - October 2019 + - Released with the Yocto Project 3.0 Release. + * - 3.1 + - April 2020 + - Released with the Yocto Project 3.1 Release. diff --git a/poky/documentation/toaster-manual/toaster-manual-intro.rst b/poky/documentation/toaster-manual/toaster-manual-intro.rst new file mode 100644 index 000000000..0b7cd41c8 --- /dev/null +++ b/poky/documentation/toaster-manual/toaster-manual-intro.rst @@ -0,0 +1,105 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +************ +Introduction +************ + +Toaster is a web interface to the Yocto Project's +:term:`OpenEmbedded Build System`. The interface +enables you to configure and run your builds. Information about builds +is collected and stored in a database. You can use Toaster to configure +and start builds on multiple remote build servers. + +.. _intro-features: + +Toaster Features +================ + +Toaster allows you to configure and run builds, and it provides +extensive information about the build process. + +- *Configure and Run Builds:* You can use the Toaster web interface to + configure and start your builds. Builds started using the Toaster web + interface are organized into projects. When you create a project, you + are asked to select a release, or version of the build system you + want to use for the project builds. As shipped, Toaster supports + Yocto Project releases 1.8 and beyond. With the Toaster web + interface, you can: + + - Browse layers listed in the various + :ref:`layer sources ` + that are available in your project (e.g. the OpenEmbedded Layer Index at + http://layers.openembedded.org/layerindex/). + + - Browse images, recipes, and machines provided by those layers. + + - Import your own layers for building. + + - Add and remove layers from your configuration. + + - Set configuration variables. + + - Select a target or multiple targets to build. + + - Start your builds. + + Toaster also allows you to configure and run your builds from the + command line, and switch between the command line and the web + interface at any time. Builds started from the command line appear + within a special Toaster project called "Command line builds". + +- *Information About the Build Process:* Toaster also records extensive + information about your builds. Toaster collects data for builds you + start from the web interface and from the command line as long as + Toaster is running. + + .. note:: + + You must start Toaster before the build or it will not collect + build data. + + With Toaster you can: + + - See what was built (recipes and packages) and what packages were + installed into your final image. + + - Browse the directory structure of your image. + + - See the value of all variables in your build configuration, and + which files set each value. + + - Examine error, warning, and trace messages to aid in debugging. + + - See information about the BitBake tasks executed and reused during + your build, including those that used shared state. + + - See dependency relationships between recipes, packages, and tasks. + + - See performance information such as build time, task time, CPU + usage, and disk I/O. + +For an overview of Toaster shipped with the Yocto Project &DISTRO; +Release, see the "`Toaster - Yocto Project +2.2 `__" video. + +.. _toaster-installation-options: + +Installation Options +==================== + +You can set Toaster up to run as a local instance or as a shared hosted +service. + +When Toaster is set up as a local instance, all the components reside on +a single build host. Fundamentally, a local instance of Toaster is +suited for a single user developing on a single build host. + +.. image:: figures/simple-configuration.png + :align: center + +Toaster as a hosted service is suited for multiple users developing +across several build hosts. When Toaster is set up as a hosted service, +its components can be spread across several machines: + +.. image:: figures/hosted-service.png + :align: center diff --git a/poky/documentation/toaster-manual/toaster-manual-reference.rst b/poky/documentation/toaster-manual/toaster-manual-reference.rst new file mode 100644 index 000000000..e95536e05 --- /dev/null +++ b/poky/documentation/toaster-manual/toaster-manual-reference.rst @@ -0,0 +1,662 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +********************** +Concepts and Reference +********************** + +In order to configure and use Toaster, you should understand some +concepts and have some basic command reference material available. This +final chapter provides conceptual information on layer sources, +releases, and JSON configuration files. Also provided is a quick look at +some useful ``manage.py`` commands that are Toaster-specific. +Information on ``manage.py`` commands does exist across the Web and the +information in this manual by no means attempts to provide a command +comprehensive reference. + +Layer Source +============ + +In general, a "layer source" is a source of information about existing +layers. In particular, we are concerned with layers that you can use +with the Yocto Project and Toaster. This chapter describes a particular +type of layer source called a "layer index." + +A layer index is a web application that contains information about a set +of custom layers. A good example of an existing layer index is the +OpenEmbedded Layer Index. A public instance of this layer index exists +at http://layers.openembedded.org. You can find the code for this +layer index's web application at +http://git.yoctoproject.org/cgit/cgit.cgi/layerindex-web/. + +When you tie a layer source into Toaster, it can query the layer source +through a +`REST `__ +API, store the information about the layers in the Toaster database, and +then show the information to users. Users are then able to view that +information and build layers from Toaster itself without worrying about +cloning or editing the BitBake layers configuration file +``bblayers.conf``. + +Tying a layer source into Toaster is convenient when you have many +custom layers that need to be built on a regular basis by a community of +developers. In fact, Toaster comes pre-configured with the OpenEmbedded +Metadata Index. + +.. note:: + + You do not have to use a layer source to use Toaster. Tying into a + layer source is optional. + +.. _layer-source-using-with-toaster: + +Setting Up and Using a Layer Source +----------------------------------- + +To use your own layer source, you need to set up the layer source and +then tie it into Toaster. This section describes how to tie into a layer +index in a manner similar to the way Toaster ties into the OpenEmbedded +Metadata Index. + +Understanding Your Layers +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The obvious first step for using a layer index is to have several custom +layers that developers build and access using the Yocto Project on a +regular basis. This set of layers needs to exist and you need to be +familiar with where they reside. You will need that information when you +set up the code for the web application that "hooks" into your set of +layers. + +For general information on layers, see the +":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`" +section in the Yocto Project Overview and Concepts Manual. For information on how +to create layers, see the ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`" +section in the Yocto Project Development Tasks Manual. + +.. _configuring-toaster-to-hook-into-your-layer-source: + +Configuring Toaster to Hook Into Your Layer Index +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want Toaster to use your layer index, you must host the web +application in a server to which Toaster can connect. You also need to +give Toaster the information about your layer index. In other words, you +have to configure Toaster to use your layer index. This section +describes two methods by which you can configure and use your layer +index. + +In the previous section, the code for the OpenEmbedded Metadata Index +(i.e. http://layers.openembedded.org) was referenced. You can use +this code, which is at +http://git.yoctoproject.org/cgit/cgit.cgi/layerindex-web/, as a +base to create your own layer index. + +Use the Administration Interface +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Access the administration interface through a browser by entering the +URL of your Toaster instance and adding "``/admin``" to the end of the +URL. As an example, if you are running Toaster locally, use the +following URL:: + + http://127.0.0.1:8000/admin + +The administration interface has a "Layer sources" section that includes +an "Add layer source" button. Click that button and provide the required +information. Make sure you select "layerindex" as the layer source type. + +Use the Fixture Feature +^^^^^^^^^^^^^^^^^^^^^^^ + +The Django fixture feature overrides the default layer server when you +use it to specify a custom URL. To use the fixture feature, create (or +edit) the file ``bitbake/lib/toaster.orm/fixtures/custom.xml``, and then +set the following Toaster setting to your custom URL: + +.. code-block:: xml + + + + + CUSTOM_LAYERINDEX_SERVER + https://layers.my_organization.org/layerindex/branch/master/layers/ + + + +When you start Toaster for the first time, or +if you delete the file ``toaster.sqlite`` and restart, the database will +populate cleanly from this layer index server. + +Once the information has been updated, verify the new layer information +is available by using the Toaster web interface. To do that, visit the +"All compatible layers" page inside a Toaster project. The layers from +your layer source should be listed there. + +If you change the information in your layer index server, refresh the +Toaster database by running the following command: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py lsupdates + + +If Toaster can reach the API URL, you should see a message telling you that +Toaster is updating the layer source information. + +.. _toaster-releases: + +Releases +======== + +When you create a Toaster project using the web interface, you are asked +to choose a "Release." In the context of Toaster, the term "Release" +refers to a set of layers and a BitBake version the OpenEmbedded build +system uses to build something. As shipped, Toaster is pre-configured +with releases that correspond to Yocto Project release branches. +However, you can modify, delete, and create new releases according to +your needs. This section provides some background information on +releases. + +.. _toaster-releases-supported: + +Pre-Configured Releases +----------------------- + +As shipped, Toaster is configured to use a specific set of releases. Of +course, you can always configure Toaster to use any release. For +example, you might want your project to build against a specific commit +of any of the "out-of-the-box" releases. Or, you might want your project +to build against different revisions of OpenEmbedded and BitBake. + +As shipped, Toaster is configured to work with the following releases: + +- *Yocto Project &DISTRO; "&DISTRO_NAME;" or OpenEmbedded "&DISTRO_NAME;":* + This release causes your Toaster projects to build against the head + of the &DISTRO_NAME_NO_CAP; branch at + https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=&DISTRO_NAME_NO_CAP; or + http://git.openembedded.org/openembedded-core/commit/?h=&DISTRO_NAME_NO_CAP;. + +- *Yocto Project "Master" or OpenEmbedded "Master":* This release + causes your Toaster Projects to build against the head of the master + branch, which is where active development takes place, at + https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/ or + http://git.openembedded.org/openembedded-core/log/. + +- *Local Yocto Project or Local OpenEmbedded:* This release causes your + Toaster Projects to build against the head of the ``poky`` or + ``openembedded-core`` clone you have local to the machine running + Toaster. + +Configuring Toaster +=================== + +In order to use Toaster, you must configure the database with the +default content. The following subsections describe various aspects of +Toaster configuration. + +Configuring the Workflow +------------------------ + +The ``bldcontrol/management/commands/checksettings.py`` file controls +workflow configuration. The following steps outline the process to +initially populate this database. + +1. The default project settings are set from + ``orm/fixtures/settings.xml``. + +2. The default project distro and layers are added from + ``orm/fixtures/poky.xml`` if poky is installed. If poky is not + installed, they are added from ``orm/fixtures/oe-core.xml``. + +3. If the ``orm/fixtures/custom.xml`` file exists, then its values are + added. + +4. The layer index is then scanned and added to the database. + +Once these steps complete, Toaster is set up and ready to use. + +Customizing Pre-Set Data +------------------------ + +The pre-set data for Toaster is easily customizable. You can create the +``orm/fixtures/custom.xml`` file to customize the values that go into to +the database. Customization is additive, and can either extend or +completely replace the existing values. + +You use the ``orm/fixtures/custom.xml`` file to change the default +project settings for the machine, distro, file images, and layers. When +creating a new project, you can use the file to define the offered +alternate project release selections. For example, you can add one or +more additional selections that present custom layer sets or distros, +and any other local or proprietary content. + +Additionally, you can completely disable the content from the +``oe-core.xml`` and ``poky.xml`` files by defining the section shown +below in the ``settings.xml`` file. For example, this option is +particularly useful if your custom configuration defines fewer releases +or layers than the default fixture files. + +The following example sets "name" to "CUSTOM_XML_ONLY" and its value to +"True". + +.. code-block:: xml + + + CUSTOM_XML_ONLY + True + + +Understanding Fixture File Format +--------------------------------- + +The following is an overview of the file format used by the +``oe-core.xml``, ``poky.xml``, and ``custom.xml`` files. + +The following subsections describe each of the sections in the fixture +files, and outline an example section of the XML code. you can use to +help understand this information and create a local ``custom.xml`` file. + +Defining the Default Distro and Other Values +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This section defines the default distro value for new projects. By +default, it reserves the first Toaster Setting record "1". The following +demonstrates how to set the project default value for +:term:`DISTRO`: + +.. code-block:: xml + + + + DEFCONF_DISTRO + poky + + +You can override +other default project values by adding additional Toaster Setting +sections such as any of the settings coming from the ``settings.xml`` +file. Also, you can add custom values that are included in the BitBake +environment. The "pk" values must be unique. By convention, values that +set default project values have a "DEFCONF" prefix. + +Defining BitBake Version +~~~~~~~~~~~~~~~~~~~~~~~~ + +The following defines which version of BitBake is used for the following +release selection: + +.. code-block:: xml + + + + &DISTRO_NAME_NO_CAP; + git://git.yoctoproject.org/poky + &DISTRO_NAME_NO_CAP; + bitbake + + +.. _defining-releases: + +Defining Release +~~~~~~~~~~~~~~~~ + +The following defines the releases when you create a new project: + +.. code-block:: xml + + + + &DISTRO_NAME_NO_CAP; + Yocto Project &DISTRO; "&DISTRO_NAME;" + 1 + &DISTRO_NAME_NO_CAP; + Toaster will run your builds using the tip of the Yocto Project &DISTRO_NAME; branch. + + +The "pk" value must match the above respective BitBake version record. + +Defining the Release Default Layer Names +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following defines the default layers for each release: + +.. code-block:: xml + + + + 1 + openembedded-core + + +The 'pk' values in the example above should start at "1" and increment +uniquely. You can use the same layer name in multiple releases. + +Defining Layer Definitions +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Layer definitions are the most complex. The following defines each of +the layers, and then defines the exact layer version of the layer used +for each respective release. You must have one ``orm.layer`` entry for +each layer. Then, with each entry you need a set of +``orm.layer_version`` entries that connects the layer with each release +that includes the layer. In general all releases include the layer. + +.. code-block:: xml + + + openembedded-core + + git://git.yoctoproject.org/poky + http://git.yoctoproject.org/cgit/cgit.cgi/poky + http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch% + http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch% + + + 1 + 0 + 1 + &DISTRO_NAME_NO_CAP; + meta + + 1 + 0 + 2 + HEAD + HEAD + meta + + + 1 + 0 + 3 + master + meta + + +The layer "pk" values above must be unique, and typically start at "1". The +layer version "pk" values must also be unique across all layers, and typically +start at "1". + +Remote Toaster Monitoring +========================= + +Toaster has an API that allows remote management applications to +directly query the state of the Toaster server and its builds in a +machine-to-machine manner. This API uses the +`REST `__ +interface and the transfer of JSON files. For example, you might monitor +a build inside a container through well supported known HTTP ports in +order to easily access a Toaster server inside the container. In this +example, when you use this direct JSON API, you avoid having web page +parsing against the display the user sees. + +Checking Health +--------------- + +Before you use remote Toaster monitoring, you should do a health check. +To do this, ping the Toaster server using the following call to see if +it is still alive:: + + http://host:port/health + +Be sure to provide values for host and port. If the server is alive, you will +get the response HTML: + +.. code-block:: html + + + + Toaster Health + Ok + + +Determining Status of Builds in Progress +---------------------------------------- + +Sometimes it is useful to determine the status of a build in progress. +To get the status of pending builds, use the following call:: + + http://host:port/toastergui/api/building + +Be sure to provide values for host and port. The output is a JSON file that +itemizes all builds in progress. This file includes the time in seconds since +each respective build started as well as the progress of the cloning, parsing, +and task execution. The following is sample output for a build in progress: + +.. code-block:: JSON + + {"count": 1, + "building": [ + {"machine": "beaglebone", + "seconds": "463.869", + "task": "927:2384", + "distro": "poky", + "clone": "1:1", + "id": 2, + "start": "2017-09-22T09:31:44.887Z", + "name": "20170922093200", + "parse": "818:818", + "project": "my_rocko", + "target": "core-image-minimal" + }] + } + +The JSON data for this query is returned in a +single line. In the previous example the line has been artificially +split for readability. + +Checking Status of Builds Completed +----------------------------------- + +Once a build is completed, you get the status when you use the following +call:: + + http://host:port/toastergui/api/builds + +Be sure to provide values for host and port. The output is a JSON file that +itemizes all complete builds, and includes build summary information. The +following is sample output for a completed build: + +.. code-block:: JSON + + {"count": 1, + "builds": [ + {"distro": "poky", + "errors": 0, + "machine": "beaglebone", + "project": "my_rocko", + "stop": "2017-09-22T09:26:36.017Z", + "target": "quilt-native", + "seconds": "78.193", + "outcome": "Succeeded", + "id": 1, + "start": "2017-09-22T09:25:17.824Z", + "warnings": 1, + "name": "20170922092618" + }] + } + +The JSON data for this query is returned in a single line. In the +previous example the line has been artificially split for readability. + +Determining Status of a Specific Build +-------------------------------------- + +Sometimes it is useful to determine the status of a specific build. To +get the status of a specific build, use the following call:: + + http://host:port/toastergui/api/build/ID + +Be sure to provide values for +host, port, and ID. You can find the value for ID from the Builds +Completed query. See the ":ref:`toaster-manual/toaster-manual-reference:checking status of builds completed`" +section for more information. + +The output is a JSON file that itemizes the specific build and includes +build summary information. The following is sample output for a specific +build: + +.. code-block:: JSON + + {"build": + {"distro": "poky", + "errors": 0, + "machine": "beaglebone", + "project": "my_rocko", + "stop": "2017-09-22T09:26:36.017Z", + "target": "quilt-native", + "seconds": "78.193", + "outcome": "Succeeded", + "id": 1, + "start": "2017-09-22T09:25:17.824Z", + "warnings": 1, + "name": "20170922092618", + "cooker_log": "/opt/user/poky/build-toaster-2/tmp/log/cooker/beaglebone/build_20170922_022607.991.log" + } + } + +The JSON data for this query is returned in a single line. In the +previous example the line has been artificially split for readability. + +.. _toaster-useful-commands: + +Useful Commands +=============== + +In addition to the web user interface and the scripts that start and +stop Toaster, command-line commands exist through the ``manage.py`` +management script. You can find general documentation on ``manage.py`` +at the +`Django `__ +site. However, several ``manage.py`` commands have been created that are +specific to Toaster and are used to control configuration and back-end +tasks. You can locate these commands in the +:term:`Source Directory` (e.g. ``poky``) at +``bitbake/lib/manage.py``. This section documents those commands. + +.. note:: + + - When using ``manage.py`` commands given a default configuration, + you must be sure that your working directory is set to the + :term:`Build Directory`. Using + ``manage.py`` commands from the Build Directory allows Toaster to + find the ``toaster.sqlite`` file, which is located in the Build + Directory. + + - For non-default database configurations, it is possible that you + can use ``manage.py`` commands from a directory other than the + Build Directory. To do so, the ``toastermain/settings.py`` file + must be configured to point to the correct database backend. + +.. _toaster-command-buildslist: + +``buildslist`` +-------------- + +The ``buildslist`` command lists all builds that Toaster has recorded. +Access the command as follows: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py buildslist + +The command returns a list, which includes numeric +identifications, of the builds that Toaster has recorded in the current +database. + +You need to run the ``buildslist`` command first to identify existing +builds in the database before using the +:ref:`toaster-manual/toaster-manual-reference:\`\`builddelete\`\`` command. Here is an +example that assumes default repository and build directory names: + +.. code-block:: shell + + $ cd ~/poky/build + $ python ../bitbake/lib/toaster/manage.py buildslist + +If your Toaster database had only one build, the above +:ref:`toaster-manual/toaster-manual-reference:\`\`buildslist\`\`` +command would return something like the following:: + + 1: qemux86 poky core-image-minimal + +.. _toaster-command-builddelete: + +``builddelete`` +--------------- + +The ``builddelete`` command deletes data associated with a build. Access +the command as follows: + +.. code-block:: + + $ bitbake/lib/toaster/manage.py builddelete build_id + +The command deletes all the build data for the specified +build_id. This command is useful for removing old and unused data from +the database. + +Prior to running the ``builddelete`` command, you need to get the ID +associated with builds by using the +:ref:`toaster-manual/toaster-manual-reference:\`\`buildslist\`\`` command. + +.. _toaster-command-perf: + +``perf`` +-------- + +The ``perf`` command measures Toaster performance. Access the command as +follows: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py perf + +The command is a sanity check that returns page loading times in order to +identify performance problems. + +.. _toaster-command-checksettings: + +``checksettings`` +----------------- + +The ``checksettings`` command verifies existing Toaster settings. Access +the command as follows: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py checksettings + +Toaster uses settings that are based on the database to configure the +building tasks. The ``checksettings`` command verifies that the database +settings are valid in the sense that they have the minimal information +needed to start a build. + +In order for the ``checksettings`` command to work, the database must be +correctly set up and not have existing data. To be sure the database is +ready, you can run the following: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py syncdb + $ bitbake/lib/toaster/manage.py migrate orm + $ bitbake/lib/toaster/manage.py migrate bldcontrol + +After running these commands, you can run the ``checksettings`` command. + +.. _toaster-command-runbuilds: + +``runbuilds`` +------------- + +The ``runbuilds`` command launches scheduled builds. Access the command +as follows: + +.. code-block:: shell + + $ bitbake/lib/toaster/manage.py runbuilds + +The ``runbuilds`` command checks if scheduled builds exist in the database +and then launches them per schedule. The command returns after the builds +start but before they complete. The Toaster Logging Interface records and +updates the database when the builds complete. diff --git a/poky/documentation/toaster-manual/toaster-manual-setup-and-use.rst b/poky/documentation/toaster-manual/toaster-manual-setup-and-use.rst new file mode 100644 index 000000000..01c0dce41 --- /dev/null +++ b/poky/documentation/toaster-manual/toaster-manual-setup-and-use.rst @@ -0,0 +1,651 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK +.. Set default pygment highlighting to 'shell' for this document +.. highlight:: shell + +**************************** +Setting Up and Using Toaster +**************************** + +Starting Toaster for Local Development +====================================== + +Once you have set up the Yocto Project and installed the Toaster system +dependencies as described in the ":ref:`toaster-manual/toaster-manual-start:Preparing to Use +Toaster`" chapter, you are ready to start +Toaster. + +Navigate to the root of your +:term:`Source Directory` (e.g. ``poky``):: + + $ cd poky + +Once in that directory, source the build environment script:: + + $ source oe-init-build-env + +Next, from the build directory (e.g. +``poky/build``), start Toaster using this command:: + + $ source toaster start + +You can now run your builds from the command line, or with Toaster +as explained in section +":ref:`toaster-manual/toaster-manual-setup-and-use:using the toaster web interface`". + +To access the Toaster web interface, open your favorite browser and +enter the following:: + + http://127.0.0.1:8000 + +Setting a Different Port +======================== + +By default, Toaster starts on port 8000. You can use the ``WEBPORT`` +parameter to set a different port. For example, the following command +sets the port to "8400":: + + $ source toaster start webport=8400 + +Setting Up Toaster Without a Web Server +======================================= + +You can start a Toaster environment without starting its web server. +This is useful for the following: + +- Capturing a command-line build's statistics into the Toaster database + for examination later. + +- Capturing a command-line build's statistics when the Toaster server + is already running. + +- Having one instance of the Toaster web server track and capture + multiple command-line builds, where each build is started in its own + "noweb" Toaster environment. + +The following commands show how to start a Toaster environment without +starting its web server, perform BitBake operations, and then shut down +the Toaster environment. Once the build is complete, you can close the +Toaster environment. Before closing the environment, however, you should +allow a few minutes to ensure the complete transfer of its BitBake build +statistics to the Toaster database. If you have a separate Toaster web +server instance running, you can watch this command-line build's +progress and examine the results as soon as they are posted:: + + $ source toaster start noweb + $ bitbake target + $ source toaster stop + +Setting Up Toaster Without a Build Server +========================================= + +You can start a Toaster environment with the "New Projects" feature +disabled. Doing so is useful for the following: + +- Sharing your build results over the web server while blocking others + from starting builds on your host. + +- Allowing only local command-line builds to be captured into the + Toaster database. + +Use the following command to set up Toaster without a build server:: + + $ source toaster start nobuild webport=port + +Setting up External Access +========================== + +By default, Toaster binds to the loop back address (i.e. ``localhost``), +which does not allow access from external hosts. To allow external +access, use the ``WEBPORT`` parameter to open an address that connects +to the network, specifically the IP address that your NIC uses to +connect to the network. You can also bind to all IP addresses the +computer supports by using the shortcut "0.0.0.0:port". + +The following example binds to all IP addresses on the host:: + + $ source toaster start webport=0.0.0.0:8400 + +This example binds to a specific IP address on the host's NIC:: + + $ source toaster start webport=192.168.1.1:8400 + +The Directory for Cloning Layers +================================ + +Toaster creates a ``_toaster_clones`` directory inside your Source +Directory (i.e. ``poky``) to clone any layers needed for your builds. + +Alternatively, if you would like all of your Toaster related files and +directories to be in a particular location other than the default, you +can set the ``TOASTER_DIR`` environment variable, which takes precedence +over your current working directory. Setting this environment variable +causes Toaster to create and use ``$TOASTER_DIR./_toaster_clones``. + +.. _toaster-the-build-directory: + +The Build Directory +=================== + +Toaster creates a build directory within your Source Directory (e.g. +``poky``) to execute the builds. + +Alternatively, if you would like all of your Toaster related files and +directories to be in a particular location, you can set the +``TOASTER_DIR`` environment variable, which takes precedence over your +current working directory. Setting this environment variable causes +Toaster to use ``$TOASTER_DIR/build`` as the build directory. + +.. _toaster-creating-a-django-super-user: + +Creating a Django Superuser +=========================== + +Toaster is built on the `Django +framework `__. Django provides an +administration interface you can use to edit Toaster configuration +parameters. + +To access the Django administration interface, you must create a +superuser by following these steps: + +#. If you used ``pip3``, which is recommended, to set up the Toaster + system dependencies, you need be sure the local user path is in your + ``PATH`` list. To append the pip3 local user path, use the following + command:: + + $ export PATH=$PATH:$HOME/.local/bin + +#. From the directory containing the Toaster database, which by default + is the :term:`Build Directory`, + invoke the ``createsuperuser`` command from ``manage.py``:: + + $ cd ~/poky/build + $ ../bitbake/lib/toaster/manage.py createsuperuser + +#. Django prompts you for the username, which you need to provide. + +#. Django prompts you for an email address, which is optional. + +#. Django prompts you for a password, which you must provide. + +#. Django prompts you to re-enter your password for verification. + +After completing these steps, the following confirmation message +appears:: + + Superuser created successfully. + +Creating a superuser allows you to access the Django administration +interface through a browser. The URL for this interface is the same as +the URL used for the Toaster instance with "/admin" on the end. For +example, if you are running Toaster locally, use the following URL:: + + http://127.0.0.1:8000/admin + +You can use the Django administration interface to set Toaster configuration +parameters such as the build directory, layer sources, default variable +values, and BitBake versions. + +.. _toaster-setting-up-a-production-instance-of-toaster: + +Setting Up a Production Instance of Toaster +=========================================== + +You can use a production instance of Toaster to share the Toaster +instance with remote users, multiple users, or both. The production +instance is also the setup that can handle heavier loads on the web +service. Use the instructions in the following sections to set up +Toaster to run builds through the Toaster web interface. + +.. _toaster-production-instance-requirements: + +Requirements +------------ + +Be sure you meet the following requirements: + +.. note:: + + You must comply with all Apache, ``mod-wsgi``, and Mysql requirements. + +- Have all the build requirements as described in the ":ref:`toaster-manual/toaster-manual-start:Preparing to + Use Toaster`" chapter. + +- Have an Apache webserver. + +- Have ``mod-wsgi`` for the Apache webserver. + +- Use the Mysql database server. + +- If you are using Ubuntu, run the following:: + + $ sudo apt-get install apache2 libapache2-mod-wsgi-py3 mysql-server python3-pip libmysqlclient-dev + +- If you are using Fedora or a RedHat distribution, run the + following:: + + $ sudo dnf install httpd python3-mod_wsgi python3-pip mariadb-server mariadb-devel python3-devel + +- If you are using openSUSE, run the following:: + + $ sudo zypper install apache2 apache2-mod_wsgi-python3 python3-pip mariadb mariadb-client python3-devel + +.. _toaster-installation-steps: + +Installation +------------ + +Perform the following steps to install Toaster: + +#. Create toaster user and set its home directory to + ``/var/www/toaster``:: + + $ sudo /usr/sbin/useradd toaster -md /var/www/toaster -s /bin/false + $ sudo su - toaster -s /bin/bash + +#. Checkout a copy of ``poky`` into the web server directory. You will + be using ``/var/www/toaster``:: + + $ git clone git://git.yoctoproject.org/poky + $ git checkout &DISTRO_NAME_NO_CAP; + +#. Install Toaster dependencies using the --user flag which keeps the + Python packages isolated from your system-provided packages:: + + $ cd /var/www/toaster/ + $ pip3 install --user -r ./poky/bitbake/toaster-requirements.txt + $ pip3 install --user mysqlclient + + .. note:: + + Isolating these packages is not required but is recommended. + Alternatively, you can use your operating system's package + manager to install the packages. + +#. Configure Toaster by editing + ``/var/www/toaster/poky/bitbake/lib/toaster/toastermain/settings.py`` + as follows: + + - Edit the + `DATABASES `__ + settings: + + .. code-block:: python + + DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.mysql', + 'NAME': 'toaster_data', + 'USER': 'toaster', + 'PASSWORD': 'yourpasswordhere', + 'HOST': 'localhost', + 'PORT': '3306', + } + } + + - Edit the + `SECRET_KEY `__: + + .. code-block:: python + + SECRET_KEY = 'your_secret_key' + + - Edit the + `STATIC_ROOT `__: + + .. code-block:: python + + STATIC_ROOT = '/var/www/toaster/static_files/' + +#. Add the database and user to the ``mysql`` server defined earlier:: + + $ mysql -u root -p + mysql> CREATE DATABASE toaster_data; + mysql> CREATE USER 'toaster'@'localhost' identified by 'yourpasswordhere'; + mysql> GRANT all on toaster_data.\* to 'toaster'@'localhost'; + mysql> quit + +#. Get Toaster to create the database schema, default data, and gather + the statically-served files:: + + $ cd /var/www/toaster/poky/ + $ ./bitbake/lib/toaster/manage.py migrate + $ TOASTER_DIR=`pwd\` TEMPLATECONF='poky' \ + ./bitbake/lib/toaster/manage.py checksettings + $ ./bitbake/lib/toaster/manage.py collectstatic + + + In the previous + example, from the ``poky`` directory, the ``migrate`` command + ensures the database schema changes have propagated correctly (i.e. + migrations). The next line sets the Toaster root directory + ``TOASTER_DIR`` and the location of the Toaster configuration file + ``TOASTER_CONF``, which is relative to ``TOASTER_DIR``. The + ``TEMPLATECONF`` value reflects the contents of + ``poky/.templateconf``, and by default, should include the string + "poky". For more information on the Toaster configuration file, see + the ":ref:`toaster-manual/toaster-manual-reference:Configuring Toaster`" section. + + This line also runs the ``checksettings`` command, which configures + the location of the Toaster :term:`Build Directory`. + The Toaster + root directory ``TOASTER_DIR`` determines where the Toaster build + directory is created on the file system. In the example above, + ``TOASTER_DIR`` is set as follows:: + + /var/www/toaster/poky + + + This setting causes the Toaster build directory to be:: + + /var/www/toaster/poky/build + + Finally, the ``collectstatic`` command is a Django framework command + that collects all the statically served files into a designated + directory to be served up by the Apache web server as defined by + ``STATIC_ROOT``. + +#. Test and/or use the Mysql integration with Toaster's Django web + server. At this point, you can start up the normal Toaster Django + web server with the Toaster database in Mysql. You can use this web + server to confirm that the database migration and data population + from the Layer Index is complete. + + To start the default Toaster Django web server with the Toaster + database now in Mysql, use the standard start commands:: + + $ source oe-init-build-env + $ source toaster start + + Additionally, if Django is sufficient for your requirements, you can use + it for your release system and migrate later to Apache as your + requirements change. + +#. Add an Apache configuration file for Toaster to your Apache web + server's configuration directory. If you are using Ubuntu or Debian, + put the file here:: + + /etc/apache2/conf-available/toaster.conf + + + If you are using Fedora or RedHat, put it here:: + + /etc/httpd/conf.d/toaster.conf + + If you are using OpenSUSE, put it here:: + + /etc/apache2/conf.d/toaster.conf + + Following is a sample Apache configuration for Toaster you can follow: + + .. code-block:: apache + + Alias /static /var/www/toaster/static_files + + + Order allow,deny + Allow from all + + + Require all granted + + + + + + Require all granted + + + + WSGIDaemonProcess toaster_wsgi python-path=/var/www/toaster/poky/bitbake/lib/toaster:/var/www/toaster/.local/lib/python3.4/site-packages + WSGIScriptAlias / "/var/www/toaster/poky/bitbake/lib/toaster/toastermain/wsgi.py" + + WSGIProcessGroup toaster_wsgi + + + + If you are using Ubuntu or Debian, you will need to enable the config and + module for Apache:: + + $ sudo a2enmod wsgi + $ sudo a2enconf toaster + $ chmod +x bitbake/lib/toaster/toastermain/wsgi.py + + Finally, restart Apache to make sure all new configuration is loaded. For Ubuntu, + Debian, and OpenSUSE use:: + + $ sudo service apache2 restart + + For Fedora and RedHat use:: + + $ sudo service httpd restart + +#. Prepare the systemd service to run Toaster builds. Here is a sample + configuration file for the service: + + .. code-block:: ini + + [Unit] + Description=Toaster runbuilds + + [Service] + Type=forking User=toaster + ExecStart=/usr/bin/screen -d -m -S runbuilds /var/www/toaster/poky/bitbake/lib/toaster/runbuilds-service.sh start + ExecStop=/usr/bin/screen -S runbuilds -X quit + WorkingDirectory=/var/www/toaster/poky + + [Install] + WantedBy=multi-user.target + + + Prepare the ``runbuilds-service.sh`` script that you need to place in the + ``/var/www/toaster/poky/bitbake/lib/toaster/`` directory by setting + up executable permissions:: + + #!/bin/bash + + #export http_proxy=http://proxy.host.com:8080 + #export https_proxy=http://proxy.host.com:8080 + #export GIT_PROXY_COMMAND=$HOME/bin/gitproxy + cd ~/poky/ + source ./oe-init-build-env build + source ../bitbake/bin/toaster $1 noweb + [ "$1" == 'start' ] && /bin/bash + +#. Run the service:: + + $ sudo service runbuilds start + + Since the service is running in a detached screen session, you can + attach to it using this command:: + + $ sudo su - toaster + $ screen -rS runbuilds + + You can detach from the service again using "Ctrl-a" followed by "d" key + combination. + +You can now open up a browser and start using Toaster. + +Using the Toaster Web Interface +=============================== + +The Toaster web interface allows you to do the following: + +- Browse published layers in the `OpenEmbedded Layer + Index `__ that are available for your + selected version of the build system. + +- Import your own layers for building. + +- Add and remove layers from your configuration. + +- Set configuration variables. + +- Select a target or multiple targets to build. + +- Start your builds. + +- See what was built (recipes and packages) and what packages were + installed into your final image. + +- Browse the directory structure of your image. + +- See the value of all variables in your build configuration, and which + files set each value. + +- Examine error, warning and trace messages to aid in debugging. + +- See information about the BitBake tasks executed and reused during + your build, including those that used shared state. + +- See dependency relationships between recipes, packages and tasks. + +- See performance information such as build time, task time, CPU usage, + and disk I/O. + +.. _web-interface-videos: + +Toaster Web Interface Videos +---------------------------- + +Following are several videos that show how to use the Toaster GUI: + +- *Build Configuration:* This + `video `__ overviews and + demonstrates build configuration for Toaster. + +- *Build Custom Layers:* This + `video `__ shows you how + to build custom layers that are used with Toaster. + +- *Toaster Homepage and Table Controls:* This + `video `__ goes over the + Toaster entry page, and provides an overview of the data manipulation + capabilities of Toaster, which include search, sorting and filtering + by different criteria. + +- *Build Dashboard:* This + `video `__ shows you the + build dashboard, a page providing an overview of the information + available for a selected build. + +- *Image Information:* This + `video `__ walks through + the information Toaster provides about images: packages installed and + root file system. + +- *Configuration:* This + `video `__ provides + Toaster build configuration information. + +- *Tasks:* This `video `__ + shows the information Toaster provides about the tasks run by the + build system. + +- *Recipes and Packages Built:* This + `video `__ shows the + information Toaster provides about recipes and packages built. + +- *Performance Data:* This + `video `__ shows the + build performance data provided by Toaster. + +.. _a-note-on-the-local-yocto-project-release: + +Additional Information About the Local Yocto Project Release +------------------------------------------------------------ + +This section only applies if you have set up Toaster for local +development, as explained in the +":ref:`toaster-manual/toaster-manual-setup-and-use:starting toaster for local development`" +section. + +When you create a project in Toaster, you will be asked to provide a +name and to select a Yocto Project release. One of the release options +you will find is called "Local Yocto Project". + +.. image:: figures/new-project.png + :align: center + :scale: 75% + +When you select the "Local Yocto Project" release, Toaster will run your +builds using the local Yocto Project clone you have in your computer: +the same clone you are using to run Toaster. Unless you manually update +this clone, your builds will always use the same Git revision. + +If you select any of the other release options, Toaster will fetch the +tip of your selected release from the upstream `Yocto Project +repository `__ every time you run a build. +Fetching this tip effectively means that if your selected release is +updated upstream, the Git revision you are using for your builds will +change. If you are doing development locally, you might not want this +change to happen. In that case, the "Local Yocto Project" release might +be the right choice. + +However, the "Local Yocto Project" release will not provide you with any +compatible layers, other than the three core layers that come with the +Yocto Project: + +- `openembedded-core `__ + +- `meta-poky `__ + +- `meta-yocto-bsp `__ + +.. image:: figures/compatible-layers.png + :align: center + :scale: 75% + +If you want to build any other layers, you will need to manually import +them into your Toaster project, using the "Import layer" page. + +.. image:: figures/import-layer.png + :align: center + :scale: 75% + +.. _toaster-web-interface-preferred-version: + +Building a Specific Recipe Given Multiple Versions +-------------------------------------------------- + +Occasionally, a layer might provide more than one version of the same +recipe. For example, the ``openembedded-core`` layer provides two +versions of the ``bash`` recipe (i.e. 3.2.48 and 4.3.30-r0) and two +versions of the ``which`` recipe (i.e. 2.21 and 2.18). The following +figure shows this exact scenario: + +.. image:: figures/bash-oecore.png + :align: center + :scale: 75% + +By default, the OpenEmbedded build system builds one of the two recipes. +For the ``bash`` case, version 4.3.30-r0 is built by default. +Unfortunately, Toaster as it exists, is not able to override the default +recipe version. If you would like to build bash 3.2.48, you need to set +the +:term:`PREFERRED_VERSION` +variable. You can do so from Toaster, using the "Add variable" form, +which is available in the "BitBake variables" page of the project +configuration section as shown in the following screen: + +.. image:: figures/add-variable.png + :align: center + :scale: 75% + +To specify ``bash`` 3.2.48 as the version to build, enter +"PREFERRED_VERSION_bash" in the "Variable" field, and "3.2.48" in the +"Value" field. Next, click the "Add variable" button: + +.. image:: figures/set-variable.png + :align: center + :scale: 75% + +After clicking the "Add variable" button, the settings for +``PREFERRED_VERSION`` are added to the bottom of the BitBake variables +list. With these settings, the OpenEmbedded build system builds the +desired version of the recipe rather than the default version: + +.. image:: figures/variable-added.png + :align: center + :scale: 75% diff --git a/poky/documentation/toaster-manual/toaster-manual-setup-and-use.xml b/poky/documentation/toaster-manual/toaster-manual-setup-and-use.xml index d810b9d57..f55574592 100644 --- a/poky/documentation/toaster-manual/toaster-manual-setup-and-use.xml +++ b/poky/documentation/toaster-manual/toaster-manual-setup-and-use.xml @@ -70,17 +70,17 @@ web server. This is useful for the following: - Capturing a command-line build’s statistics into + Capturing a command-line build's statistics into the Toaster database for examination later. - Capturing a command-line build’s statistics when + Capturing a command-line build's statistics when the Toaster server is already running. Having one instance of the Toaster web server track and capture multiple command-line builds, - where each build is started in its own “noweb” + where each build is started in its own "noweb" Toaster environment. @@ -92,7 +92,7 @@ minutes to ensure the complete transfer of its BitBake build statistics to the Toaster database. If you have a separate Toaster web server instance running, you - can watch this command-line build’s progress and examine the + can watch this command-line build's progress and examine the results as soon as they are posted: $ source toaster start noweb @@ -107,7 +107,7 @@ You can start a Toaster environment with the - “New Projects” feature disabled. + "New Projects" feature disabled. Doing so is useful for the following: @@ -470,7 +470,7 @@ STATIC_ROOT. - Test and/or use the Mysql integration with Toaster’s + Test and/or use the Mysql integration with Toaster's Django web server. At this point, you can start up the normal Toaster Django web server with the Toaster database in Mysql. diff --git a/poky/documentation/toaster-manual/toaster-manual-start.rst b/poky/documentation/toaster-manual/toaster-manual-start.rst new file mode 100644 index 000000000..2d612b893 --- /dev/null +++ b/poky/documentation/toaster-manual/toaster-manual-start.rst @@ -0,0 +1,57 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK +.. Set default pygments highlighting to shell for this document +.. highlight:: shell + +************************ +Preparing to Use Toaster +************************ + +This chapter describes how you need to prepare your system in order to +use Toaster. + +.. _toaster-setting-up-the-basic-system-requirements: + +Setting Up the Basic System Requirements +======================================== + +Before you can use Toaster, you need to first set up your build system +to run the Yocto Project. To do this, follow the instructions in the +":ref:`dev-manual/dev-manual-start:preparing the build host`" section of +the Yocto Project Development Tasks Manual. For Ubuntu/Debian, you might +also need to do an additional install of pip3. :: + + $ sudo apt-get install python3-pip + +.. _toaster-establishing-toaster-system-dependencies: + +Establishing Toaster System Dependencies +======================================== + +Toaster requires extra Python dependencies in order to run. A Toaster +requirements file named ``toaster-requirements.txt`` defines the Python +dependencies. The requirements file is located in the ``bitbake`` +directory, which is located in the root directory of the +:term:`Source Directory` (e.g. +``poky/bitbake/toaster-requirements.txt``). The dependencies appear in a +``pip``, install-compatible format. + +.. _toaster-load-packages: + +Install Toaster Packages +------------------------ + +You need to install the packages that Toaster requires. Use this +command:: + + $ pip3 install --user -r bitbake/toaster-requirements.txt + +The previous command installs the necessary Toaster modules into a local +python 3 cache in your ``$HOME`` directory. The caches is actually +located in ``$HOME/.local``. To see what packages have been installed +into your ``$HOME`` directory, do the following:: + + $ pip3 list installed --local + +If you need to remove something, the following works:: + + $ pip3 uninstall PackageNameToUninstall diff --git a/poky/documentation/toaster-manual/toaster-manual.rst b/poky/documentation/toaster-manual/toaster-manual.rst new file mode 100644 index 000000000..f6f59411b --- /dev/null +++ b/poky/documentation/toaster-manual/toaster-manual.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +=================== +Toaster User Manual +=================== + +| + +.. toctree:: + :caption: Table of Contents + :numbered: + + toaster-manual-intro + toaster-manual-start + toaster-manual-setup-and-use + toaster-manual-reference + history + +.. include:: /boilerplate.rst diff --git a/poky/documentation/transitioning-to-a-custom-environment.rst b/poky/documentation/transitioning-to-a-custom-environment.rst new file mode 100644 index 000000000..160152b09 --- /dev/null +++ b/poky/documentation/transitioning-to-a-custom-environment.rst @@ -0,0 +1,116 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +============================================================= +Transitioning to a custom environment for systems development +============================================================= + +| + +.. note:: + + So you've finished the :doc:`brief-yoctoprojectqs/brief-yoctoprojectqs` and + glanced over the document :doc:`what-i-wish-id-known`, the latter contains + important information learned from other users. You're well prepared. But + now, as you are starting your own project, it isn't exactly straightforward what + to do. And, the documentation is daunting. We've put together a few hints to + get you started. + +#. **Make a list of the processor, target board, technologies, and capabilities + that will be part of your project**. + You will be finding layers with recipes and other metadata that support these + things, and adding them to your configuration. (See #3) + +#. **Set up your board support**. + Even if you're using custom hardware, it might be easier to start with an + existing target board that uses the same processor or at least the same + architecture as your custom hardware. Knowing the board already has a + functioning Board Support Package (BSP) within the project makes it easier + for you to get comfortable with project concepts. + +#. **Find and acquire the best BSP for your target**. + Use the :yocto_home:`Yocto Project curated layer index + ` or even the `OpenEmbedded layer index + `_ to find and acquire the best BSP for your + target board. The Yocto Project layer index BSPs are regularly validated. The + best place to get your first BSP is from your silicon manufacturer or board + vendor – they can point you to their most qualified efforts. In general, for + Intel silicon use meta-intel, for Texas Instruments use meta-ti, and so + forth. Choose a BSP that has been tested with the same Yocto Project release + that you've downloaded. Be aware that some BSPs may not be immediately + supported on the very latest release, but they will be eventually. + + You might want to start with the build specification that Poky provides + (which is reference embedded distribution) and then add your newly chosen + layers to that. Here is the information :ref:`about adding layers + `. + +#. **Based on the layers you've chosen, make needed changes in your + configuration**. + For instance, you've chosen a machine type and added in the corresponding BSP + layer. You'll then need to change the value of the ``MACHINE`` variable in your + configuration file (build/local.conf) to point to that same machine + type. There could be other layer-specific settings you need to change as + well. Each layer has a ``README`` document that you can look at for this type of + usage information. + +#. **Add a new layer for any custom recipes and metadata you create**. + Use the ``bitbake-layers create-layer`` tool for Yocto Project 2.4+ + releases. If you are using a Yocto Project release earlier than 2.4, use the + ``yocto-layer create`` tool. The ``bitbake-layers`` tool also provides a number + of other useful layer-related commands. See + :ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the + \`\`bitbake-layers\`\` script` section. + +#. **Create your own layer for the BSP you're going to use**. + It is not common that you would need to create an entire BSP from scratch + unless you have a *really* special device. Even if you are using an existing + BSP, :ref:`create your own layer for the BSP `. For example, given a + 64-bit x86-based machine, copy the conf/intel-corei7-64 definition and give + the machine a relevant name (think board name, not product name). Make sure + the layer configuration is dependent on the meta-intel layer (or at least, + meta-intel remains in your bblayers.conf). Now you can put your custom BSP + settings into your layer and you can re-use it for different applications. + +#. **Write your own recipe to build additional software support that isn't + already available in the form of a recipe**. + Creating your own recipe is especially important for custom application + software that you want to run on your device. Writing new recipes is a + process of refinement. Start by getting each step of the build process + working beginning with fetching all the way through packaging. Next, run the + software on your target and refine further as needed. See :ref:`Writing a New + Recipe ` in the + Yocto Project Development Tasks Manual for more information. + +#. **Now you're ready to create an image recipe**. + There are a number of ways to do this. However, it is strongly recommended + that you have your own image recipe - don't try appending to existing image + recipes. Recipes for images are trivial to create and you usually want to + fully customize their contents. + +#. **Build your image and refine it**. + Add what's missing and fix anything that's broken using your knowledge of the + :ref:`workflow ` to identify where issues might be occurring. + +#. **Consider creating your own distribution**. + When you get to a certain level of customization, consider creating your own + distribution rather than using the default reference distribution. + + Distribution settings define the packaging back-end (e.g. rpm or other) as + well as the package feed and possibly the update solution. You would create + your own distribution in a new layer inheriting from Poky but overriding what + needs to change for your distribution. If you find yourself adding a lot of + configuration to your local.conf file aside from paths and other typical + local settings, it's time to :ref:`consider creating your own distribution + `. + + You can add product specifications that can customize the distribution if + needed in other layers. You can also add other functionality specific to the + product. But to update the distribution, not individual products, you update + the distribution feature through that layer. + +#. **Congratulations! You're well on your way.** + Welcome to the Yocto Project community. + +.. include:: /boilerplate.rst diff --git a/poky/documentation/what-i-wish-id-known.rst b/poky/documentation/what-i-wish-id-known.rst new file mode 100644 index 000000000..495ebdc20 --- /dev/null +++ b/poky/documentation/what-i-wish-id-known.rst @@ -0,0 +1,226 @@ +.. SPDX-License-Identifier: CC-BY-2.0-UK + +========================================= +What I wish I'd known about Yocto Project +========================================= + +| + +.. note:: + + Before reading further, make sure you've taken a look at the + :yocto_home:`Software Overview` page which presents the + definitions for many of the terms referenced here. Also, know that some of the + information here won't make sense now, but as you start developing, it is the + information you'll want to keep close at hand. These are best known methods for + working with Yocto Project and they are updated regularly. + +Using the Yocto Project is fairly easy, *until something goes wrong*. Without an +understanding of how the build process works, you'll find yourself trying to +troubleshoot "a black box". Here are a few items that new users wished they had +known before embarking on their first build with Yocto Project. Feel free to +contact us with other suggestions. + +#. **Use Git, not the tarball download:** + If you use git the software will be automatically updated with bug updates + because of how git works. If you download the tarball instead, you will need + to be responsible for your own updates. + +#. **Get to know the layer index:** + All layers can be found in the `layer index + `_. Layers which have applied for Yocto + Project Compatible status (structure continuity assurance and testing) can be + found in the :yocto_home:`Yocto Project Compatible index + `. Generally check the Compatible layer index first, + and if you don't find the necessary layer check the general layer index. The + layer index is an original artifact from the Open Embedded Project. As such, + that index doesn't have the curating and testing that the Yocto Project + provides on Yocto Project Compatible layer list, but the latter has fewer + entries. Know that when you start searching in the layer index that not all + layers have the same level of maturity, validation, or usability. Nor do + searches prioritize displayed results. There is no easy way to help you + through the process of choosing the best layer to suit your needs. + Consequently, it is often trial and error, checking the mailing lists, or + working with other developers through collaboration rooms that can help you + make good choices. + +#. **Use existing BSP layers from silicon vendors when possible:** + Intel, TI, NXP and others have information on what BSP layers to use with + their silicon. These layers have names such as "meta-intel" or "meta-ti". Try + not to build layers from scratch. If you do have custom silicon, use one of + these layers as a guide or template and familiarize yourself with the + :doc:`bsp-guide/bsp-guide`. + +#. **Do not put everything into one layer:** + Use different layers to logically separate information in your build. As an + example, you could have a BSP layer, a GUI layer, a distro configuration, + middleware, or an application (e.g. "meta-filesystems", "meta-python", + "meta-intel", and so forth). Putting your entire build into one layer limits + and complicates future customization and reuse. Isolating information into + layers, on the other hand, helps keep simplify future customizations and + reuse. + +#. **Never modify the POKY layer. Never. Ever. When you update to the next + release, you'll lose all of your work. ALL OF IT.** + +#. **Don't be fooled by documentation searching results:** + Yocto Project documentation is always being updated. Unfortunately, when you + use Google to search for Yocto Project concepts or terms, Google consistently + searches and retrieves older versions of Yocto Project manuals. For example, + searching for a particular topic using Google could result in a "hit" on a + Yocto Project manual that is several releases old. To be sure that you are + using the most current Yocto Project documentation, use the drop-down menu at + the top of any of its page. + + Many developers look through the :yocto_docs:`All-in-one 'Mega' Manual ` + for a concept or term by doing a search through the whole page. This manual + is a concatenation of the core set of Yocto Project manual. Thus, a simple + string search using Ctrl-F in this manual produces all the "hits" for a + desired term or concept. Once you find the area in which you are + interested, you can display the actual manual, if desired. It is also + possible to use the search bar in the menu or in the left navigation pane. + +#. **Understand the basic concepts of how the build system works: the workflow:** + Understanding the Yocto Project workflow is important as it can help you both + pinpoint where trouble is occurring and how the build is breaking. The + workflow breaks down into the following steps: + + #. Fetch – get the source code + #. Extract – unpack the sources + #. Patch – apply patches for bug fixes and new capability + #. Configure – set up your environment specifications + #. Build – compile and link + #. Install – copy files to target directories + #. Package – bundle files for installation + + During "fetch", there may be an inability to find code. During "extract", + there is likely an invalid zip or something similar. In other words, the + function of a particular part of the workflow gives you an idea of what might + be going wrong. + + .. image:: figures/yp-how-it-works-new-diagram.png + +#. **Know that you can generate a dependency graph and learn how to do it:** + A dependency graph shows dependencies between recipes, tasks, and targets. + You can use the "-g" option with BitBake to generate this graph. When you + start a build and the build breaks, you could see packages you have no clue + about or have any idea why the build system has included them. The + dependency graph can clarify that confusion. You can learn more about + dependency graphs and how to generate them in the + :ref:`bitbake-user-manual/bitbake-user-manual-intro:generating dependency + graphs` section in the BitBake User Manual. + +#. **Here's how you decode "magic" folder names in tmp/work:** + The build system fetches, unpacks, preprocesses, and builds. If something + goes wrong, the build system reports to you directly the path to a folder + where the temporary (build/tmp) files and packages reside resulting from the + build. For a detailed example of this process, see the :yocto_wiki:`example + `. Unfortunately this + example is on an earlier release of Yocto Project. + + When you perform a build, you can use the "-u" BitBake command-line option to + specify a user interface viewer into the dependency graph (e.g. knotty, + ncurses, or taskexp) that helps you understand the build dependencies better. + +#. **You can build more than just images:** + You can build and run a specific task for a specific package (including + devshell) or even a single recipe. When developers first start using the + Yocto Project, the instructions found in the + :doc:`brief-yoctoprojectqs/brief-yoctoprojectqs` show how to create an image + and then run or flash that image. However, you can actually build just a + single recipe. Thus, if some dependency or recipe isn't working, you can just + say "bitbake foo" where "foo" is the name for a specific recipe. As you + become more advanced using the Yocto Project, and if builds are failing, it + can be useful to make sure the fetch itself works as desired. Here are some + valuable links: :ref:`dev-manual/dev-manual-common-tasks:Using a Development + Shell` for information on how to build and run a specific task using + devshell. Also, the :ref:`SDK manual shows how to build out a specific recipe + `. + +#. **An ambiguous definition: Package vs Recipe:** + A recipe contains instructions the build system uses to create + packages. Recipes and Packages are the difference between the front end and + the result of the build process. + + As mentioned, the build system takes the recipe and creates packages from the + recipe's instructions. The resulting packages are related to the one thing + the recipe is building but are different parts (packages) of the build + (i.e. the main package, the doc package, the debug symbols package, the + separate utilities package, and so forth). The build system splits out the + packages so that you don't need to install the packages you don't want or + need, which is advantageous because you are building for small devices when + developing for embedded and IoT. + +#. **You will want to learn about and know what's packaged in rootfs.** + +#. **Create your own image recipe:** + There are a number of ways to create your own image recipe. We suggest you + create your own image recipe as opposed to appending an existing recipe. It + is trivial and easy to write an image recipe. Again, do not try appending to + an existing image recipe. Create your own and do it right from the start. + +#. **Finally, here is a list of the basic skills you will need as a systems + developer. You must be able to:** + + * deal with corporate proxies + * add a package to an image + * understand the difference between a recipe and package + * build a package by itself and why that's useful + * find out what packages are created by a recipe + * find out what files are in a package + * find out what files are in an image + * add an ssh server to an image (enable transferring of files to target) + * know the anatomy of a recipe + * know how to create and use layers + * find recipes (with the `OpenEmbedded Layer index `_) + * understand difference between machine and distro settings + * find and use the right BSP (machine) for your hardware + * find examples of distro features and know where to set them + * understanding the task pipeline and executing individual tasks + * understand devtool and how it simplifies your workflow + * improve build speeds with shared downloads and shared state cache + * generate and understand a dependency graph + * generate and understand bitbake environment + * build an Extensible SDK for applications development + +#. **Depending on what you primary interests are with the Yocto Project, you + could consider any of the following reading:** + + * **Look Through the Yocto Project Development Tasks Manual**: This manual + contains procedural information grouped to help you get set up, work with + layers, customize images, write new recipes, work with libraries, and use + QEMU. The information is task-based and spans the breadth of the Yocto + Project. See the :doc:`../dev-manual/dev-manual`. + + * **Look Through the Yocto Project Application Development and the Extensible + Software Development Kit (eSDK) manual**: This manual describes how to use + both the standard SDK and the extensible SDK, which are used primarily for + application development. The :doc:`../sdk-manual/sdk-extensible` also provides + example workflows that use devtool. See the section + :ref:`sdk-manual/sdk-extensible:using \`\`devtool\`\` in your sdk workflow` + for more information. + + * **Learn About Kernel Development**: If you want to see how to work with the + kernel and understand Yocto Linux kernels, see the :doc:`../kernel-dev/kernel-dev`. + This manual provides information on how to patch the kernel, modify kernel + recipes, and configure the kernel. + + * **Learn About Board Support Packages (BSPs)**: If you want to learn about + BSPs, see the :doc:`../bsp-guide/bsp-guide`. This manual also provides an + example BSP creation workflow. See the :doc:`../bsp-guide/bsp` section. + + * **Learn About Toaster**: Toaster is a web interface to the Yocto Project's + OpenEmbedded build system. If you are interested in using this type of + interface to create images, see the :doc:`../toaster-manual/toaster-manual`. + + * **Have Available the Yocto Project Reference Manual**: Unlike the rest of + the Yocto Project manual set, this manual is comprised of material suited + for reference rather than procedures. You can get build details, a closer + look at how the pieces of the Yocto Project development environment work + together, information on various technical details, guidance on migrating + to a newer Yocto Project release, reference material on the directory + structure, classes, and tasks. The :doc:`../ref-manual/ref-manual` also + contains a fairly comprehensive glossary of variables used within the Yocto + Project. + +.. include:: /boilerplate.rst diff --git a/poky/meta-poky/conf/distro/poky-tiny.conf b/poky/meta-poky/conf/distro/poky-tiny.conf index c6d4b88f8..57826037f 100644 --- a/poky/meta-poky/conf/distro/poky-tiny.conf +++ b/poky/meta-poky/conf/distro/poky-tiny.conf @@ -38,7 +38,7 @@ TCLIBC = "musl" # Distro config is evaluated after the machine config, so we have to explicitly # set the kernel provider to override a machine config. PREFERRED_PROVIDER_virtual/kernel = "linux-yocto-tiny" -PREFERRED_VERSION_linux-yocto-tiny ?= "5.0%" +PREFERRED_VERSION_linux-yocto-tiny ?= "5.8%" # We can use packagegroup-core-boot, but in the future we may need a new packagegroup-core-tiny #POKY_DEFAULT_EXTRA_RDEPENDS += "packagegroup-core-boot" diff --git a/poky/meta-poky/conf/distro/poky.conf b/poky/meta-poky/conf/distro/poky.conf index f2ee0d187..ed37f4a0b 100644 --- a/poky/meta-poky/conf/distro/poky.conf +++ b/poky/meta-poky/conf/distro/poky.conf @@ -21,7 +21,8 @@ POKY_DEFAULT_EXTRA_RRECOMMENDS = "kernel-module-af-packet" DISTRO_FEATURES ?= "${DISTRO_FEATURES_DEFAULT} ${POKY_DEFAULT_DISTRO_FEATURES}" -PREFERRED_VERSION_linux-yocto ?= "5.4%" +PREFERRED_VERSION_linux-yocto ?= "5.8%" +PREFERRED_VERSION_linux-yocto-rt ?= "5.4%" SDK_NAME = "${DISTRO}-${TCLIBC}-${SDKMACHINE}-${IMAGE_BASENAME}-${TUNE_PKGARCH}-${MACHINE}" SDKPATH = "/opt/${DISTRO}/${SDK_VERSION}" @@ -29,14 +30,6 @@ SDKPATH = "/opt/${DISTRO}/${SDK_VERSION}" DISTRO_EXTRA_RDEPENDS += " ${POKY_DEFAULT_EXTRA_RDEPENDS}" DISTRO_EXTRA_RRECOMMENDS += " ${POKY_DEFAULT_EXTRA_RRECOMMENDS}" -POKYQEMUDEPS = "${@bb.utils.contains("INCOMPATIBLE_LICENSE", "GPL-3.0", "", "packagegroup-core-device-devel",d)}" -DISTRO_EXTRA_RDEPENDS_append_qemuarm = " ${POKYQEMUDEPS}" -DISTRO_EXTRA_RDEPENDS_append_qemuarm64 = " ${POKYQEMUDEPS}" -DISTRO_EXTRA_RDEPENDS_append_qemumips = " ${POKYQEMUDEPS}" -DISTRO_EXTRA_RDEPENDS_append_qemuppc = " ${POKYQEMUDEPS}" -DISTRO_EXTRA_RDEPENDS_append_qemux86 = " ${POKYQEMUDEPS}" -DISTRO_EXTRA_RDEPENDS_append_qemux86-64 = " ${POKYQEMUDEPS}" - TCLIBCAPPEND = "" PREMIRRORS ??= "\ diff --git a/poky/meta-poky/conf/local.conf.sample b/poky/meta-poky/conf/local.conf.sample index b555f1d21..032f2623e 100644 --- a/poky/meta-poky/conf/local.conf.sample +++ b/poky/meta-poky/conf/local.conf.sample @@ -261,6 +261,16 @@ PACKAGECONFIG_append_pn-qemu-system-native = " sdl" #BB_HASHSERVE = "auto" #BB_SIGNATURE_HANDLER = "OEEquivHash" +# +# Memory Resident Bitbake +# +# Bitbake's server component can stay in memory after the UI for the current command +# has completed. This means subsequent commands can run faster since there is no need +# for bitbake to reload cache files and so on. Number is in seconds, after which the +# server will shut down. +# +#BB_SERVER_TIMEOUT = "60" + # CONF_VERSION is increased each time build/conf/ changes incompatibly and is used to # track the version of this file when it was generated. This can safely be ignored if # this doesn't mean anything to you. diff --git a/poky/meta-poky/conf/local.conf.sample.extended b/poky/meta-poky/conf/local.conf.sample.extended index dc92a16f6..44a01690e 100644 --- a/poky/meta-poky/conf/local.conf.sample.extended +++ b/poky/meta-poky/conf/local.conf.sample.extended @@ -124,9 +124,6 @@ DISTRO_FEATURES_remove = "x11" #This enable mklibs library size optimization will be for all the images. #MKLIBS_OPTIMIZED_IMAGES ?= "all" -# Uncomment this if your host distribution provides the help2man tool. -#ASSUME_PROVIDED += "help2man-native" - # This value is currently used by pseudo to determine if the recipe should # build both the 32-bit and 64-bit wrapper libraries on a 64-bit build system. # diff --git a/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py b/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py index c0abfd1b1..892c19c84 100644 --- a/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py +++ b/poky/meta-selftest/lib/oeqa/runtime/cases/virgl.py @@ -9,7 +9,7 @@ class VirglTest(OERuntimeTestCase): def test_kernel_driver(self): status, output = self.target.run('dmesg|grep virgl') self.assertEqual(status, 0, "Checking for virgl driver in dmesg returned non-zero: %d\n%s" % (status, output)) - self.assertIn("virgl 3d acceleration enabled", output, "virgl acceleration seems to be disabled:\n%s" %(output)) + self.assertIn("features: +virgl", output, "virgl acceleration seems to be disabled:\n%s" %(output)) @OETestDepends(['virgl.VirglTest.test_kernel_driver']) def test_kmscube(self): diff --git a/poky/meta-selftest/recipes-test/recipetool/files/selftest-replaceme-src-globfile b/poky/meta-selftest/recipes-test/recipetool/files/selftest-replaceme-src-globfile deleted file mode 100644 index 1e20a2b03..000000000 --- a/poky/meta-selftest/recipes-test/recipetool/files/selftest-replaceme-src-globfile +++ /dev/null @@ -1 +0,0 @@ -A file matched by a glob in SRC_URI diff --git a/poky/meta-selftest/recipes-test/recipetool/selftest-recipetool-appendfile.bb b/poky/meta-selftest/recipes-test/recipetool/selftest-recipetool-appendfile.bb index 7375c4793..b5f976708 100644 --- a/poky/meta-selftest/recipes-test/recipetool/selftest-recipetool-appendfile.bb +++ b/poky/meta-selftest/recipes-test/recipetool/selftest-recipetool-appendfile.bb @@ -10,7 +10,6 @@ SRC_URI = "file://installscript.sh \ file://file1 \ file://add-file.patch \ file://subdir \ - file://selftest-replaceme-src-glob* \ file://selftest-replaceme-inst-globfile \ file://selftest-replaceme-inst-todir-globfile \ file://selftest-replaceme-inst-func" @@ -27,7 +26,6 @@ do_install() { install -m 0644 ${WORKDIR}/selftest-replaceme-todir ${D}${datadir} install -m 0644 ${WORKDIR}/file1 ${D}${datadir}/selftest-replaceme-renamed install -m 0644 ${WORKDIR}/subdir/fileinsubdir ${D}${datadir}/selftest-replaceme-subdir - install -m 0644 ${WORKDIR}/selftest-replaceme-src-globfile ${D}${datadir}/selftest-replaceme-src-globfile cp ${WORKDIR}/selftest-replaceme-inst-glob* ${D}${datadir}/selftest-replaceme-inst-globfile cp ${WORKDIR}/selftest-replaceme-inst-todir-glob* ${D}${datadir} install -d ${D}${sysconfdir} diff --git a/poky/meta-yocto-bsp/conf/machine/beaglebone-yocto.conf b/poky/meta-yocto-bsp/conf/machine/beaglebone-yocto.conf index 5b481434b..b7defb0d0 100644 --- a/poky/meta-yocto-bsp/conf/machine/beaglebone-yocto.conf +++ b/poky/meta-yocto-bsp/conf/machine/beaglebone-yocto.conf @@ -24,7 +24,7 @@ SERIAL_CONSOLES ?= "115200;ttyS0 115200;ttyO0 115200;ttyAMA0" SERIAL_CONSOLES_CHECK = "${SERIAL_CONSOLES}" PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" -PREFERRED_VERSION_linux-yocto ?= "5.4%" +PREFERRED_VERSION_linux-yocto ?= "5.8%" KERNEL_IMAGETYPE = "zImage" KERNEL_DEVICETREE = "am335x-bone.dtb am335x-boneblack.dtb am335x-bonegreen.dtb" diff --git a/poky/meta-yocto-bsp/conf/machine/edgerouter.conf b/poky/meta-yocto-bsp/conf/machine/edgerouter.conf index d6fd934fa..75bc5e5f2 100644 --- a/poky/meta-yocto-bsp/conf/machine/edgerouter.conf +++ b/poky/meta-yocto-bsp/conf/machine/edgerouter.conf @@ -11,7 +11,7 @@ KERNEL_ALT_IMAGETYPE = "vmlinux.bin" KERNEL_IMAGE_STRIP_EXTRA_SECTIONS = ".comment" PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto" -PREFERRED_VERSION_linux-yocto ?= "5.4%" +PREFERRED_VERSION_linux-yocto ?= "5.8%" SERIAL_CONSOLES = "115200;ttyS0" USE_VT ?= "0" diff --git a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend index 2c73eb2f6..9c37f91bc 100644 --- a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend +++ b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.4.bbappend @@ -7,17 +7,17 @@ KMACHINE_genericx86 ?= "common-pc" KMACHINE_genericx86-64 ?= "common-pc-64" KMACHINE_beaglebone-yocto ?= "beaglebone" -SRCREV_machine_genericx86 ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" -SRCREV_machine_genericx86-64 ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" -SRCREV_machine_edgerouter ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" -SRCREV_machine_beaglebone-yocto ?= "9fc2fb2e73466a520ee9a3c48b3ca2f5b21415dc" +SRCREV_machine_genericx86 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_genericx86-64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_edgerouter ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" +SRCREV_machine_beaglebone-yocto ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" COMPATIBLE_MACHINE_genericx86 = "genericx86" COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" COMPATIBLE_MACHINE_edgerouter = "edgerouter" COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto" -LINUX_VERSION_genericx86 = "5.4.54" -LINUX_VERSION_genericx86-64 = "5.4.54" -LINUX_VERSION_edgerouter = "5.4.54" -LINUX_VERSION_beaglebone-yocto = "5.4.54" +LINUX_VERSION_genericx86 = "5.4.58" +LINUX_VERSION_genericx86-64 = "5.4.58" +LINUX_VERSION_edgerouter = "5.4.58" +LINUX_VERSION_beaglebone-yocto = "5.4.58" diff --git a/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.8.bbappend b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.8.bbappend new file mode 100644 index 000000000..5d844423a --- /dev/null +++ b/poky/meta-yocto-bsp/recipes-kernel/linux/linux-yocto_5.8.bbappend @@ -0,0 +1,23 @@ +KBRANCH_genericx86 = "v5.8/standard/base" +KBRANCH_genericx86-64 = "v5.8/standard/base" +KBRANCH_edgerouter = "v5.8/standard/edgerouter" +KBRANCH_beaglebone-yocto = "v5.8/standard/beaglebone" + +KMACHINE_genericx86 ?= "common-pc" +KMACHINE_genericx86-64 ?= "common-pc-64" +KMACHINE_beaglebone-yocto ?= "beaglebone" + +SRCREV_machine_genericx86 ?= "912adf166eb0688e011154048f5fa0e5863249c3" +SRCREV_machine_genericx86-64 ?= "912adf166eb0688e011154048f5fa0e5863249c3" +SRCREV_machine_edgerouter ?= "912adf166eb0688e011154048f5fa0e5863249c3" +SRCREV_machine_beaglebone-yocto ?= "912adf166eb0688e011154048f5fa0e5863249c3" + +COMPATIBLE_MACHINE_genericx86 = "genericx86" +COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64" +COMPATIBLE_MACHINE_edgerouter = "edgerouter" +COMPATIBLE_MACHINE_beaglebone-yocto = "beaglebone-yocto" + +LINUX_VERSION_genericx86 = "5.8.2" +LINUX_VERSION_genericx86-64 = "5.8.2" +LINUX_VERSION_edgerouter = "5.8.2" +LINUX_VERSION_beaglebone-yocto = "5.8.2" diff --git a/poky/meta/classes/buildhistory.bbclass b/poky/meta/classes/buildhistory.bbclass index 805e976ac..0f26c3c07 100644 --- a/poky/meta/classes/buildhistory.bbclass +++ b/poky/meta/classes/buildhistory.bbclass @@ -7,6 +7,8 @@ # Copyright (C) 2007-2011 Koen Kooi # +inherit image-artifact-names + BUILDHISTORY_FEATURES ?= "image package sdk" BUILDHISTORY_DIR ?= "${TOPDIR}/buildhistory" BUILDHISTORY_DIR_IMAGE = "${BUILDHISTORY_DIR}/images/${MACHINE_ARCH}/${TCLIBC}/${IMAGE_BASENAME}" @@ -258,20 +260,15 @@ python buildhistory_emit_pkghistory() { rcpinfo.config = sortlist(oe.utils.squashspaces(d.getVar('PACKAGECONFIG') or "")) write_recipehistory(rcpinfo, d) - pkgdest = d.getVar('PKGDEST') + bb.build.exec_func("read_subpackage_metadata", d) + for pkg in packagelist: - pkgdata = {} - with open(os.path.join(pkgdata_dir, 'runtime', pkg)) as f: - for line in f.readlines(): - item = line.rstrip('\n').split(': ', 1) - key = item[0] - if key.endswith('_' + pkg): - key = key[:-len(pkg)-1] - pkgdata[key] = item[1].encode('latin-1').decode('unicode_escape') - - pkge = pkgdata.get('PKGE', '0') - pkgv = pkgdata['PKGV'] - pkgr = pkgdata['PKGR'] + localdata = d.createCopy() + localdata.setVar('OVERRIDES', d.getVar("OVERRIDES", False) + ":" + pkg) + + pkge = localdata.getVar("PKGE") or '0' + pkgv = localdata.getVar("PKGV") + pkgr = localdata.getVar("PKGR") # # Find out what the last version was # Make sure the version did not decrease @@ -288,31 +285,31 @@ python buildhistory_emit_pkghistory() { pkginfo = PackageInfo(pkg) # Apparently the version can be different on a per-package basis (see Python) - pkginfo.pe = pkgdata.get('PE', '0') - pkginfo.pv = pkgdata['PV'] - pkginfo.pr = pkgdata['PR'] - pkginfo.pkg = pkgdata['PKG'] + pkginfo.pe = localdata.getVar("PE") or '0' + pkginfo.pv = localdata.getVar("PV") + pkginfo.pr = localdata.getVar("PR") + pkginfo.pkg = localdata.getVar("PKG") pkginfo.pkge = pkge pkginfo.pkgv = pkgv pkginfo.pkgr = pkgr - pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(pkgdata.get('RPROVIDES', ""))) - pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RDEPENDS', ""))) - pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(pkgdata.get('RRECOMMENDS', ""))) - pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(pkgdata.get('RSUGGESTS', ""))) - pkginfo.rreplaces = sortpkglist(oe.utils.squashspaces(pkgdata.get('RREPLACES', ""))) - pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(pkgdata.get('RCONFLICTS', ""))) - pkginfo.files = oe.utils.squashspaces(pkgdata.get('FILES', "")) + pkginfo.rprovides = sortpkglist(oe.utils.squashspaces(localdata.getVar("RPROVIDES") or "")) + pkginfo.rdepends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RDEPENDS") or "")) + pkginfo.rrecommends = sortpkglist(oe.utils.squashspaces(localdata.getVar("RRECOMMENDS") or "")) + pkginfo.rsuggests = sortpkglist(oe.utils.squashspaces(localdata.getVar("RSUGGESTS") or "")) + pkginfo.replaces = sortpkglist(oe.utils.squashspaces(localdata.getVar("RREPLACES") or "")) + pkginfo.rconflicts = sortpkglist(oe.utils.squashspaces(localdata.getVar("RCONFLICTS") or "")) + pkginfo.files = oe.utils.squashspaces(localdata.getVar("FILES") or "") for filevar in pkginfo.filevars: - pkginfo.filevars[filevar] = pkgdata.get(filevar, "") + pkginfo.filevars[filevar] = localdata.getVar(filevar) or "" # Gather information about packaged files - val = pkgdata.get('FILES_INFO', '') + val = localdata.getVar('FILES_INFO') or '' dictval = json.loads(val) filelist = list(dictval.keys()) filelist.sort() pkginfo.filelist = " ".join([shlex.quote(x) for x in filelist]) - pkginfo.size = int(pkgdata['PKGSIZE']) + pkginfo.size = int(localdata.getVar('PKGSIZE') or '0') write_pkghistory(pkginfo, d) diff --git a/poky/meta/classes/cve-check.bbclass b/poky/meta/classes/cve-check.bbclass index 0889e7544..02fef7c20 100644 --- a/poky/meta/classes/cve-check.bbclass +++ b/poky/meta/classes/cve-check.bbclass @@ -27,6 +27,7 @@ CVE_VERSION ??= "${PV}" CVE_CHECK_DB_DIR ?= "${DL_DIR}/CVE_CHECK" CVE_CHECK_DB_FILE ?= "${CVE_CHECK_DB_DIR}/nvdcve_1.1.db" +CVE_CHECK_DB_FILE_LOCK ?= "${CVE_CHECK_DB_FILE}.lock" CVE_CHECK_LOG ?= "${T}/cve.log" CVE_CHECK_TMP_FILE ?= "${TMPDIR}/cve_check" @@ -62,14 +63,15 @@ python cve_save_summary_handler () { timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S') cve_summary_file = os.path.join(cvelogpath, "%s-%s.txt" % (cve_summary_name, timestamp)) - shutil.copyfile(cve_tmp_file, cve_summary_file) + if os.path.exists(cve_tmp_file): + shutil.copyfile(cve_tmp_file, cve_summary_file) - if cve_summary_file and os.path.exists(cve_summary_file): - cvefile_link = os.path.join(cvelogpath, cve_summary_name) + if cve_summary_file and os.path.exists(cve_summary_file): + cvefile_link = os.path.join(cvelogpath, cve_summary_name) - if os.path.exists(os.path.realpath(cvefile_link)): - os.remove(cvefile_link) - os.symlink(os.path.basename(cve_summary_file), cvefile_link) + if os.path.exists(os.path.realpath(cvefile_link)): + os.remove(cvefile_link) + os.symlink(os.path.basename(cve_summary_file), cvefile_link) } addhandler cve_save_summary_handler @@ -95,7 +97,7 @@ python do_cve_check () { } addtask cve_check before do_build after do_fetch -do_cve_check[depends] = "cve-update-db-native:do_populate_cve_db" +do_cve_check[depends] = "cve-update-db-native:do_fetch" do_cve_check[nostamp] = "1" python cve_check_cleanup () { diff --git a/poky/meta/classes/image-artifact-names.bbclass b/poky/meta/classes/image-artifact-names.bbclass new file mode 100644 index 000000000..5ab8f1b7a --- /dev/null +++ b/poky/meta/classes/image-artifact-names.bbclass @@ -0,0 +1,15 @@ +################################################################## +# Specific image creation and rootfs population info. +################################################################## + +IMAGE_BASENAME = "${PN}" +IMAGE_VERSION_SUFFIX = "-${DATETIME}" +IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME" +IMAGE_NAME = "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}" +IMAGE_LINK_NAME = "${IMAGE_BASENAME}-${MACHINE}" + +# IMAGE_NAME is the base name for everything produced when building images. +# The actual image that contains the rootfs has an additional suffix (.rootfs +# by default) followed by additional suffices which describe the format (.ext4, +# .ext4.xz, etc.). +IMAGE_NAME_SUFFIX ??= ".rootfs" diff --git a/poky/meta/classes/image-live.bbclass b/poky/meta/classes/image-live.bbclass index 54058b350..9ea5ddc31 100644 --- a/poky/meta/classes/image-live.bbclass +++ b/poky/meta/classes/image-live.bbclass @@ -22,7 +22,7 @@ # ${HDDIMG_ID} - FAT image volume-id # ${ROOTFS} - indicates a filesystem image to include as the root filesystem (optional) -inherit live-vm-common +inherit live-vm-common image-artifact-names do_bootimg[depends] += "dosfstools-native:do_populate_sysroot \ mtools-native:do_populate_sysroot \ diff --git a/poky/meta/classes/image.bbclass b/poky/meta/classes/image.bbclass index 3b5600e55..730c843c1 100644 --- a/poky/meta/classes/image.bbclass +++ b/poky/meta/classes/image.bbclass @@ -651,7 +651,7 @@ reproducible_final_image_task () { if [ "${BUILD_REPRODUCIBLE_BINARIES}" = "1" ]; then if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then REPRODUCIBLE_TIMESTAMP_ROOTFS=`git -C "${COREBASE}" log -1 --pretty=%ct 2>/dev/null` || true - if [ "${REPRODUCIBLE_TIMESTAMP_ROOTFS}" = "" ]; then + if [ "$REPRODUCIBLE_TIMESTAMP_ROOTFS" = "" ]; then REPRODUCIBLE_TIMESTAMP_ROOTFS=`stat -c%Y ${@bb.utils.which(d.getVar("BBPATH"), "conf/bitbake.conf")}` fi fi diff --git a/poky/meta/classes/image_types.bbclass b/poky/meta/classes/image_types.bbclass index ab05cc90f..66884af8e 100644 --- a/poky/meta/classes/image_types.bbclass +++ b/poky/meta/classes/image_types.bbclass @@ -1,9 +1,3 @@ -# IMAGE_NAME is the base name for everything produced when building images. -# The actual image that contains the rootfs has an additional suffix (.rootfs -# by default) followed by additional suffices which describe the format (.ext4, -# .ext4.xz, etc.). -IMAGE_NAME_SUFFIX ??= ".rootfs" - # The default aligment of the size of the rootfs is set to 1KiB. In case # you're using the SD card emulation of a QEMU system simulator you may # set this value to 2048 (2MiB alignment). @@ -231,7 +225,8 @@ IMAGE_CMD_f2fs () { EXTRA_IMAGECMD = "" -inherit siteinfo kernel-arch +inherit siteinfo kernel-arch image-artifact-names + JFFS2_ENDIANNESS ?= "${@oe.utils.conditional('SITEINFO_ENDIANNESS', 'le', '-l', '-b', d)}" JFFS2_ERASEBLOCK ?= "0x40000" EXTRA_IMAGECMD_jffs2 ?= "--pad ${JFFS2_ENDIANNESS} --eraseblock=${JFFS2_ERASEBLOCK} --no-cleanmarkers" diff --git a/poky/meta/classes/image_types_wic.bbclass b/poky/meta/classes/image_types_wic.bbclass index 7b1db50a2..4f888ef6e 100644 --- a/poky/meta/classes/image_types_wic.bbclass +++ b/poky/meta/classes/image_types_wic.bbclass @@ -1,10 +1,11 @@ # The WICVARS variable is used to define list of bitbake variables used in wic code # variables from this list is written to .env file WICVARS ?= "\ - BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_BOOT_FILES \ + BBLAYERS IMGDEPLOYDIR DEPLOY_DIR_IMAGE FAKEROOTCMD IMAGE_BASENAME IMAGE_EFI_BOOT_FILES IMAGE_BOOT_FILES \ IMAGE_LINK_NAME IMAGE_ROOTFS INITRAMFS_FSTYPES INITRD INITRD_LIVE ISODIR RECIPE_SYSROOT_NATIVE \ ROOTFS_SIZE STAGING_DATADIR STAGING_DIR STAGING_LIBDIR TARGET_SYS \ - KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND" + KERNEL_IMAGETYPE MACHINE INITRAMFS_IMAGE INITRAMFS_IMAGE_BUNDLE INITRAMFS_LINK_NAME APPEND \ + ASSUME_PROVIDED" inherit ${@bb.utils.contains('INITRAMFS_IMAGE_BUNDLE', '1', 'kernel-artifact-names', '', d)} diff --git a/poky/meta/classes/insane.bbclass b/poky/meta/classes/insane.bbclass index ee19ef673..c6dff9659 100644 --- a/poky/meta/classes/insane.bbclass +++ b/poky/meta/classes/insane.bbclass @@ -27,6 +27,7 @@ WARN_QA ?= " libdir xorg-driver-abi \ infodir build-deps src-uri-bad symlink-to-sysroot multilib \ invalid-packageconfig host-user-contaminated uppercase-pn patch-fuzz \ mime mime-xdg unlisted-pkg-lics unhandled-features-check \ + missing-update-alternatives \ " ERROR_QA ?= "dev-so debug-deps dev-deps debug-files arch pkgconfig la \ perms dep-cmp pkgvarcheck perm-config perm-line perm-link \ @@ -438,12 +439,13 @@ def package_qa_hash_style(path, name, d, elf, messages): for line in phdrs.split("\n"): if "SYMTAB" in line: has_syms = True - if "GNU_HASH" or "DT_MIPS_XHASH" in line: + if "GNU_HASH" in line or "DT_MIPS_XHASH" in line: sane = True if ("[mips32]" in line or "[mips64]" in line) and d.getVar('TCLIBC') == "musl": sane = True if has_syms and not sane: - package_qa_add_message(messages, "ldflags", "No GNU_HASH in the ELF binary %s, didn't pass LDFLAGS?" % path) + path = package_qa_clean_path(path, d, name) + package_qa_add_message(messages, "ldflags", "File %s in package %s doesn't have GNU_HASH (didn't pass LDFLAGS?)" % (path, name)) QAPATHTEST[buildpaths] = "package_qa_check_buildpaths" @@ -708,12 +710,13 @@ def package_qa_walk(warnfuncs, errorfuncs, package, d): warnings = {} errors = {} for path in pkgfiles[package]: - elf = oe.qa.ELFFile(path) - try: - elf.open() - except (IOError, oe.qa.NotELFFileError): - # IOError can happen if the packaging control files disappear, - elf = None + elf = None + if os.path.isfile(path): + elf = oe.qa.ELFFile(path) + try: + elf.open() + except oe.qa.NotELFFileError: + elf = None for func in warnfuncs: func(path, package, d, elf, warnings) for func in errorfuncs: @@ -987,6 +990,14 @@ def package_qa_check_unhandled_features_check(pn, d, messages): if var_set: package_qa_handle_error("unhandled-features-check", "%s: recipe doesn't inherit features_check" % pn, d) +QARECIPETEST[missing-update-alternatives] = "package_qa_check_missing_update_alternatives" +def package_qa_check_missing_update_alternatives(pn, d, messages): + # Look at all packages and find out if any of those sets ALTERNATIVE variable + # without inheriting update-alternatives class + for pkg in (d.getVar('PACKAGES') or '').split(): + if d.getVar('ALTERNATIVE_%s' % pkg) and not bb.data.inherits_class('update-alternatives', d): + package_qa_handle_error("missing-update-alternatives", "%s: recipe defines ALTERNATIVE_%s but doesn't inherit update-alternatives. This might fail during do_rootfs later!" % (pn, pkg), d) + # The PACKAGE FUNC to scan each package python do_package_qa () { import subprocess diff --git a/poky/meta/classes/kernel-artifact-names.bbclass b/poky/meta/classes/kernel-artifact-names.bbclass index bbeecba7b..a65cdddb3 100644 --- a/poky/meta/classes/kernel-artifact-names.bbclass +++ b/poky/meta/classes/kernel-artifact-names.bbclass @@ -1,3 +1,11 @@ +################################################################## +# Specific kernel creation info +# for recipes/bbclasses which need to reuse some of the kernel +# artifacts, but aren't kernel recipes themselves +################################################################## + +inherit image-artifact-names + KERNEL_ARTIFACT_NAME ?= "${PKGE}-${PKGV}-${PKGR}-${MACHINE}${IMAGE_VERSION_SUFFIX}" KERNEL_ARTIFACT_LINK_NAME ?= "${MACHINE}" diff --git a/poky/meta/classes/kernel-yocto.bbclass b/poky/meta/classes/kernel-yocto.bbclass index 96ea61225..a35c5923d 100644 --- a/poky/meta/classes/kernel-yocto.bbclass +++ b/poky/meta/classes/kernel-yocto.bbclass @@ -18,6 +18,7 @@ SRCREV_FORMAT ?= "meta_machine" KCONF_AUDIT_LEVEL ?= "1" KCONF_BSP_AUDIT_LEVEL ?= "0" KMETA_AUDIT ?= "yes" +KMETA_AUDIT_WERROR ?= "" # returns local (absolute) path names for all valid patches in the # src_uri @@ -85,6 +86,21 @@ def get_machine_branch(d, default): return default +# returns a list of all directories that are on FILESEXTRAPATHS (and +# hence available to the build) that contain .scc or .cfg files +def get_dirs_with_fragments(d): + extrapaths = [] + extrafiles = [] + extrapathsvalue = (d.getVar("FILESEXTRAPATHS") or "") + # Remove default flag which was used for checking + extrapathsvalue = extrapathsvalue.replace("__default:", "") + extrapaths = extrapathsvalue.split(":") + for path in extrapaths: + if path + ":True" not in extrafiles: + extrafiles.append(path + ":" + str(os.path.exists(path))) + + return " ".join(extrafiles) + do_kernel_metadata() { set +e @@ -225,7 +241,7 @@ do_kernel_metadata() { for feature in ${KERNEL_FEATURES}; do feature_found=f for d in $includes; do - path_to_check=$(echo $d | sed 's/-I//g') + path_to_check=$(echo $d | sed 's/^-I//') if [ "$feature_found" = "f" ] && [ -e "$path_to_check/$feature" ]; then feature_found=t fi @@ -367,6 +383,7 @@ do_kernel_checkout[dirs] = "${S}" addtask kernel_checkout before do_kernel_metadata after do_symlink_kernsrc addtask kernel_metadata after do_validate_branches do_unpack before do_patch do_kernel_metadata[depends] = "kern-tools-native:do_populate_sysroot" +do_kernel_metadata[file-checksums] = " ${@get_dirs_with_fragments(d)}" do_validate_branches[depends] = "kern-tools-native:do_populate_sysroot" do_kernel_configme[depends] += "virtual/${TARGET_PREFIX}binutils:do_populate_sysroot" @@ -507,6 +524,8 @@ python do_kernel_configcheck() { config_check_visibility = int(d.getVar("KCONF_AUDIT_LEVEL") or 0) bsp_check_visibility = int(d.getVar("KCONF_BSP_AUDIT_LEVEL") or 0) + kmeta_audit_werror = d.getVar("KMETA_AUDIT_WERROR") or "" + warnings_detected = False # if config check visibility is "1", that's the lowest level of audit. So # we add the --classify option to the run, since classification will @@ -533,6 +552,7 @@ python do_kernel_configcheck() { with open (outfile, "r") as myfile: results = myfile.read() bb.warn( "[kernel config]: specified values did not make it into the kernel's final configuration:\n\n%s" % results) + warnings_detected = True # category #2: invalid fragment elements extra_params = "" @@ -552,8 +572,9 @@ python do_kernel_configcheck() { if bsp_check_visibility and os.stat(outfile).st_size > 0: with open (outfile, "r") as myfile: - results = myfile.read() - bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results) + results = myfile.read() + bb.warn( "[kernel config]: This BSP contains fragments with warnings:\n\n%s" % results) + warnings_detected = True # category #3: redefined options (this is pretty verbose and is debug only) try: @@ -574,6 +595,10 @@ python do_kernel_configcheck() { with open (outfile, "r") as myfile: results = myfile.read() bb.warn( "[kernel config]: This BSP has configuration options defined in more than one config, with differing values:\n\n%s" % results) + warnings_detected = True + + if warnings_detected and kmeta_audit_werror: + bb.fatal( "configuration warnings detected, werror is set, promoting to fatal" ) } # Ensure that the branches (BSP and meta) are on the locations specified by diff --git a/poky/meta/classes/kernel.bbclass b/poky/meta/classes/kernel.bbclass index e2ceb6a33..48135b3d4 100644 --- a/poky/meta/classes/kernel.bbclass +++ b/poky/meta/classes/kernel.bbclass @@ -172,7 +172,7 @@ python do_symlink_kernsrc () { shutil.move(s, kernsrc) os.symlink(kernsrc, s) } -addtask symlink_kernsrc before do_configure after do_unpack +addtask symlink_kernsrc before do_patch after do_unpack inherit kernel-arch deploy @@ -407,7 +407,7 @@ kernel_do_install() { install -d ${D}/${KERNEL_IMAGEDEST} install -d ${D}/boot for imageType in ${KERNEL_IMAGETYPES} ; do - install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} ${D}/${KERNEL_IMAGEDEST}/${imageType}-${KERNEL_VERSION} + install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType ${D}/${KERNEL_IMAGEDEST}/$imageType-${KERNEL_VERSION} done install -m 0644 System.map ${D}/boot/System.map-${KERNEL_VERSION} install -m 0644 .config ${D}/boot/config-${KERNEL_VERSION} @@ -416,7 +416,6 @@ kernel_do_install() { install -d ${D}${sysconfdir}/modules-load.d install -d ${D}${sysconfdir}/modprobe.d } -do_install[prefuncs] += "package_get_auto_pr" # Must be ran no earlier than after do_kernel_checkout or else Makefile won't be in ${S}/Makefile do_kernel_version_sanity_check() { @@ -717,11 +716,10 @@ kernel_do_deploy() { fi for imageType in ${KERNEL_IMAGETYPES} ; do - base_name=${imageType}-${KERNEL_IMAGE_NAME} - install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType} $deployDir/${base_name}.bin - symlink_name=${imageType}-${KERNEL_IMAGE_LINK_NAME} - ln -sf ${base_name}.bin $deployDir/${symlink_name}.bin - ln -sf ${base_name}.bin $deployDir/${imageType} + baseName=$imageType-${KERNEL_IMAGE_NAME} + install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType $deployDir/$baseName.bin + ln -sf $baseName.bin $deployDir/$imageType-${KERNEL_IMAGE_LINK_NAME}.bin + ln -sf $baseName.bin $deployDir/$imageType done if [ ${MODULE_TARBALL_DEPLOY} = "1" ] && (grep -q -i -e '^CONFIG_MODULES=y$' .config); then @@ -742,14 +740,16 @@ kernel_do_deploy() { if [ "$imageType" = "fitImage" ] ; then continue fi - initramfs_base_name=${imageType}-${INITRAMFS_NAME} - initramfs_symlink_name=${imageType}-${INITRAMFS_LINK_NAME} - install -m 0644 ${KERNEL_OUTPUT_DIR}/${imageType}.initramfs $deployDir/${initramfs_base_name}.bin - ln -sf ${initramfs_base_name}.bin $deployDir/${initramfs_symlink_name}.bin + initramfsBaseName=$imageType-${INITRAMFS_NAME} + install -m 0644 ${KERNEL_OUTPUT_DIR}/$imageType.initramfs $deployDir/$initramfsBaseName.bin + ln -sf $initramfsBaseName.bin $deployDir/$imageType-${INITRAMFS_LINK_NAME}.bin done fi } -do_deploy[prefuncs] += "package_get_auto_pr" + +# We deploy to filenames that include PKGV and PKGR, read the saved data to +# ensure we get the right values for both +do_deploy[prefuncs] += "read_subpackage_metadata" addtask deploy after do_populate_sysroot do_packagedata diff --git a/poky/meta/classes/license_image.bbclass b/poky/meta/classes/license_image.bbclass index a8c72da3c..702e9f9c5 100644 --- a/poky/meta/classes/license_image.bbclass +++ b/poky/meta/classes/license_image.bbclass @@ -200,6 +200,17 @@ def license_deployed_manifest(d): image_license_manifest = os.path.join(lic_manifest_dir, 'image_license.manifest') write_license_files(d, image_license_manifest, man_dic, rootfs=False) + link_name = d.getVar('IMAGE_LINK_NAME') + if link_name: + lic_manifest_symlink_dir = os.path.join(d.getVar('LICENSE_DIRECTORY'), + link_name) + # remove old symlink + if os.path.islink(lic_manifest_symlink_dir): + os.unlink(lic_manifest_symlink_dir) + + # create the image dir symlink + os.symlink(lic_manifest_dir, lic_manifest_symlink_dir) + def get_deployed_dependencies(d): """ Get all the deployed dependencies of an image diff --git a/poky/meta/classes/linuxloader.bbclass b/poky/meta/classes/linuxloader.bbclass index ec0e0556d..176fd89fd 100644 --- a/poky/meta/classes/linuxloader.bbclass +++ b/poky/meta/classes/linuxloader.bbclass @@ -21,6 +21,8 @@ def get_musl_loader_arch(d): ldso_arch = "aarch64${ARMPKGSFX_ENDIAN_64}" elif targetarch.startswith("riscv64"): ldso_arch = "riscv64${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" + elif targetarch.startswith("riscv32"): + ldso_arch = "riscv32${@['', '-sf'][d.getVar('TARGET_FPU') == 'soft']}" return ldso_arch def get_musl_loader(d): diff --git a/poky/meta/classes/nopackages.bbclass b/poky/meta/classes/nopackages.bbclass index 559f5078b..7a4f632d7 100644 --- a/poky/meta/classes/nopackages.bbclass +++ b/poky/meta/classes/nopackages.bbclass @@ -2,6 +2,7 @@ deltask do_package deltask do_package_write_rpm deltask do_package_write_ipk deltask do_package_write_deb +deltask do_package_write_tar deltask do_package_qa deltask do_packagedata deltask do_package_setscene diff --git a/poky/meta/classes/package.bbclass b/poky/meta/classes/package.bbclass index 7a36262eb..e6236c0bb 100644 --- a/poky/meta/classes/package.bbclass +++ b/poky/meta/classes/package.bbclass @@ -7,7 +7,7 @@ # # There are the following default steps but PACKAGEFUNCS can be extended: # -# a) package_get_auto_pr - get PRAUTO from remote PR service +# a) package_convert_pr_autoinc - convert AUTOINC in PKGV to ${PRSERV_PV_AUTOINC} # # b) perform_packagecopy - Copy D into PKGD # @@ -664,12 +664,20 @@ def runtime_mapping_rename (varname, pkg, d): #bb.note("%s after: %s" % (varname, d.getVar(varname))) # -# Package functions suitable for inclusion in PACKAGEFUNCS +# Used by do_packagedata (and possibly other routines post do_package) # +package_get_auto_pr[vardepsexclude] = "BB_TASKDEPDATA" python package_get_auto_pr() { import oe.prservice - import re + + def get_do_package_hash(pn): + if d.getVar("BB_RUNTASK") != "do_package": + taskdepdata = d.getVar("BB_TASKDEPDATA", False) + for dep in taskdepdata: + if taskdepdata[dep][1] == "do_package" and taskdepdata[dep][0] == pn: + return taskdepdata[dep][6] + return None # Support per recipe PRSERV_HOST pn = d.getVar('PN') @@ -681,15 +689,22 @@ python package_get_auto_pr() { # PR Server not active, handle AUTOINC if not d.getVar('PRSERV_HOST'): - if 'AUTOINC' in pkgv: - d.setVar("PKGV", pkgv.replace("AUTOINC", "0")) + d.setVar("PRSERV_PV_AUTOINC", "0") return auto_pr = None pv = d.getVar("PV") version = d.getVar("PRAUTOINX") pkgarch = d.getVar("PACKAGE_ARCH") - checksum = d.getVar("BB_TASKHASH") + checksum = get_do_package_hash(pn) + + # If do_package isn't in the dependencies, we can't get the checksum... + if not checksum: + bb.warn('Task %s requested do_package unihash, but it was not available.' % d.getVar('BB_RUNTASK')) + #taskdepdata = d.getVar("BB_TASKDEPDATA", False) + #for dep in taskdepdata: + # bb.warn('%s:%s = %s' % (taskdepdata[dep][0], taskdepdata[dep][1], taskdepdata[dep][6])) + return if d.getVar('PRSERV_LOCKDOWN'): auto_pr = d.getVar('PRAUTO_' + version + '_' + pkgarch) or d.getVar('PRAUTO_' + version) or None @@ -707,7 +722,7 @@ python package_get_auto_pr() { srcpv = bb.fetch2.get_srcrev(d) base_ver = "AUTOINC-%s" % version[:version.find(srcpv)] value = conn.getPR(base_ver, pkgarch, srcpv) - d.setVar("PKGV", pkgv.replace("AUTOINC", str(value))) + d.setVar("PRSERV_PV_AUTOINC", str(value)) auto_pr = conn.getPR(version, pkgarch, checksum) except Exception as e: @@ -717,6 +732,22 @@ python package_get_auto_pr() { d.setVar('PRAUTO',str(auto_pr)) } +# +# Package functions suitable for inclusion in PACKAGEFUNCS +# + +python package_convert_pr_autoinc() { + pkgv = d.getVar("PKGV") + + # Adjust pkgv as necessary... + if 'AUTOINC' in pkgv: + d.setVar("PKGV", pkgv.replace("AUTOINC", "${PRSERV_PV_AUTOINC}")) + + # Change PRSERV_PV_AUTOINC and EXTENDPRAUTO usage to special values + d.setVar('PRSERV_PV_AUTOINC', '@PRSERV_PV_AUTOINC@') + d.setVar('EXTENDPRAUTO', '@EXTENDPRAUTO@') +} + LOCALEBASEPN ??= "${PN}" python package_do_split_locales() { @@ -1638,7 +1669,7 @@ fi # Symlinks needed for rprovides lookup rprov = d.getVar('RPROVIDES_%s' % pkg) or d.getVar('RPROVIDES') if rprov: - for p in rprov.strip().split(): + for p in bb.utils.explode_deps(rprov): subdata_sym = pkgdatadir + "/runtime-rprovides/%s/%s" % (p, pkg) bb.utils.mkdirhier(os.path.dirname(subdata_sym)) oe.path.symlink("../../runtime/%s" % pkg, subdata_sym, True) @@ -2335,7 +2366,7 @@ python do_package () { package_qa_handle_error("var-undefined", msg, d) return - bb.build.exec_func("package_get_auto_pr", d) + bb.build.exec_func("package_convert_pr_autoinc", d) ########################################################################### # Optimisations @@ -2407,9 +2438,20 @@ addtask do_package_setscene # Copy from PKGDESTWORK to tempdirectory as tempdirectory can be cleaned at both # do_package_setscene and do_packagedata_setscene leading to races python do_packagedata () { + bb.build.exec_func("package_get_auto_pr", d) + src = d.expand("${PKGDESTWORK}") dest = d.expand("${WORKDIR}/pkgdata-pdata-input") oe.path.copyhardlinktree(src, dest) + + bb.build.exec_func("packagedata_translate_pr_autoinc", d) +} + +# Translate the EXTENDPRAUTO and AUTOINC to the final values +packagedata_translate_pr_autoinc() { + find ${WORKDIR}/pkgdata-pdata-input -type f | xargs --no-run-if-empty \ + sed -e 's,@PRSERV_PV_AUTOINC@,${PRSERV_PV_AUTOINC},g' \ + -e 's,@EXTENDPRAUTO@,${EXTENDPRAUTO},g' -i } addtask packagedata before do_build after do_package diff --git a/poky/meta/classes/package_rpm.bbclass b/poky/meta/classes/package_rpm.bbclass index 519c22be4..53b4700cd 100644 --- a/poky/meta/classes/package_rpm.bbclass +++ b/poky/meta/classes/package_rpm.bbclass @@ -557,7 +557,7 @@ python write_specfile () { print_deps(srcrrecommends, "Recommends", spec_preamble_top, d) print_deps(srcrsuggests, "Suggests", spec_preamble_top, d) - print_deps(srcrprovides + (" /bin/sh" if srcname.startswith("nativesdk-") else ""), "Provides", spec_preamble_top, d) + print_deps(srcrprovides, "Provides", spec_preamble_top, d) print_deps(srcrobsoletes, "Obsoletes", spec_preamble_top, d) print_deps(srcrconflicts, "Conflicts", spec_preamble_top, d) diff --git a/poky/meta/classes/package_tar.bbclass b/poky/meta/classes/package_tar.bbclass index ce3ab4c8e..d6c1b306f 100644 --- a/poky/meta/classes/package_tar.bbclass +++ b/poky/meta/classes/package_tar.bbclass @@ -57,10 +57,8 @@ python do_package_tar () { python () { if d.getVar('PACKAGES') != '': - deps = (d.getVarFlag('do_package_write_tar', 'depends') or "").split() - deps.append('tar-native:do_populate_sysroot') - deps.append('virtual/fakeroot-native:do_populate_sysroot') - d.setVarFlag('do_package_write_tar', 'depends', " ".join(deps)) + deps = ' tar-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot' + d.appendVarFlag('do_package_write_tar', 'depends', deps) d.setVarFlag('do_package_write_tar', 'fakeroot', "1") } diff --git a/poky/meta/classes/populate_sdk_ext.bbclass b/poky/meta/classes/populate_sdk_ext.bbclass index 44d99cfb9..d659b6940 100644 --- a/poky/meta/classes/populate_sdk_ext.bbclass +++ b/poky/meta/classes/populate_sdk_ext.bbclass @@ -310,8 +310,9 @@ python copy_buildsystem () { if os.path.exists(builddir + '/conf/auto.conf'): with open(builddir + '/conf/auto.conf', 'r') as f: oldlines += f.readlines() - with open(builddir + '/conf/local.conf', 'r') as f: - oldlines += f.readlines() + if os.path.exists(builddir + '/conf/local.conf'): + with open(builddir + '/conf/local.conf', 'r') as f: + oldlines += f.readlines() (updated, newlines) = bb.utils.edit_metadata(oldlines, varlist, handle_var) with open(baseoutpath + '/conf/local.conf', 'w') as f: diff --git a/poky/meta/classes/qemuboot.bbclass b/poky/meta/classes/qemuboot.bbclass index 4162c4e79..d8f62ef6e 100644 --- a/poky/meta/classes/qemuboot.bbclass +++ b/poky/meta/classes/qemuboot.bbclass @@ -86,6 +86,8 @@ QB_ROOTFS_EXTRA_OPT ?= "" # This should be kept align with ROOT_VM QB_DRIVE_TYPE ?= "/dev/sd" +inherit image-artifact-names + # Create qemuboot.conf addtask do_write_qemuboot_conf after do_rootfs before do_image diff --git a/poky/meta/classes/rootfs-postcommands.bbclass b/poky/meta/classes/rootfs-postcommands.bbclass index 984730ebe..1f27a3d07 100644 --- a/poky/meta/classes/rootfs-postcommands.bbclass +++ b/poky/meta/classes/rootfs-postcommands.bbclass @@ -39,6 +39,8 @@ ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd" ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;' +inherit image-artifact-names + # Sort the user and group entries in /etc by ID in order to make the content # deterministic. Package installs are not deterministic, causing the ordering # of entries to change between builds. In case that this isn't desired, diff --git a/poky/meta/classes/testimage.bbclass b/poky/meta/classes/testimage.bbclass index 00f0c2983..6c8bedcd6 100644 --- a/poky/meta/classes/testimage.bbclass +++ b/poky/meta/classes/testimage.bbclass @@ -3,6 +3,8 @@ # Released under the MIT license (see COPYING.MIT) inherit metadata_scm +inherit image-artifact-names + # testimage.bbclass enables testing of qemu images using python unittests. # Most of the tests are commands run on target image over ssh. # To use it add testimage to global inherit and call your target image with -c testimage diff --git a/poky/meta/conf/bitbake.conf b/poky/meta/conf/bitbake.conf index 353caacef..a318d1ca5 100644 --- a/poky/meta/conf/bitbake.conf +++ b/poky/meta/conf/bitbake.conf @@ -208,6 +208,7 @@ PF = "${PN}-${EXTENDPE}${PV}-${PR}" EXTENDPE = "${@['','${PE}_'][int(d.getVar('PE') or 0) > 0]}" P = "${PN}-${PV}" +PRSERV_PV_AUTOINC = "AUTOINC" PRAUTO = "" EXTENDPRAUTO = "${@['.${PRAUTO}', ''][not d.getVar('PRAUTO')]}" PRAUTOINX = "${PF}" @@ -356,8 +357,11 @@ FILESYSTEM_PERMS_TABLES ?= "${@'files/fs-perms.txt' if oe.types.boolean(d.getVar # General work and output directories for the build system. ################################################################## +TCMODE ?= "default" +TCLIBC ?= "glibc" TMPDIR ?= "${TOPDIR}/tmp" -CACHE = "${TMPDIR}/cache${@['', '/' + str(d.getVar('MACHINE'))][bool(d.getVar('MACHINE'))]}${@['', '/' + str(d.getVar('SDKMACHINE'))][bool(d.getVar('SDKMACHINE'))]}" + +CACHE = "${TMPDIR}/cache/${TCMODE}-${TCLIBC}${@['', '/' + str(d.getVar('MACHINE'))][bool(d.getVar('MACHINE'))]}${@['', '/' + str(d.getVar('SDKMACHINE'))][bool(d.getVar('SDKMACHINE'))]}" # The persistent cache should be shared by all builds PERSISTENT_DIR = "${TOPDIR}/cache" LOG_DIR = "${TMPDIR}/log" @@ -451,11 +455,6 @@ STAGING_KERNEL_BUILDDIR = "${TMPDIR}/work-shared/${MACHINE}/kernel-build-artifac ################################################################## IMAGE_ROOTFS = "${WORKDIR}/rootfs" -IMAGE_BASENAME = "${PN}" -IMAGE_VERSION_SUFFIX = "-${DATETIME}" -IMAGE_VERSION_SUFFIX[vardepsexclude] += "DATETIME" -IMAGE_NAME = "${IMAGE_BASENAME}-${MACHINE}${IMAGE_VERSION_SUFFIX}" -IMAGE_LINK_NAME = "${IMAGE_BASENAME}-${MACHINE}" # This option allows for a percentage overage of the actual image size rather than a # fixed extra space, this is space needed for initial startup and basic operations. diff --git a/poky/meta/conf/distro/defaultsetup.conf b/poky/meta/conf/distro/defaultsetup.conf index 66fd24652..b36a4ffff 100644 --- a/poky/meta/conf/distro/defaultsetup.conf +++ b/poky/meta/conf/distro/defaultsetup.conf @@ -3,10 +3,7 @@ include conf/distro/include/default-versions.inc include conf/distro/include/default-distrovars.inc include conf/distro/include/maintainers.inc -TCMODE ?= "default" require conf/distro/include/tcmode-${TCMODE}.inc - -TCLIBC ?= "glibc" require conf/distro/include/tclibc-${TCLIBC}.inc require conf/distro/include/uninative-flags.inc @@ -15,8 +12,6 @@ require conf/distro/include/uninative-flags.inc TCLIBCAPPEND ?= "-${TCLIBC}" TMPDIR .= "${TCLIBCAPPEND}" -CACHE = "${TMPDIR}/cache/${TCMODE}-${TCLIBC}${@['', '/' + str(d.getVar('MACHINE'))][bool(d.getVar('MACHINE'))]}${@['', '/' + str(d.getVar('SDKMACHINE'))][bool(d.getVar('SDKMACHINE'))]}" - USER_CLASSES ?= "" PACKAGE_CLASSES ?= "package_ipk" INHERIT_BLACKLIST = "blacklist" diff --git a/poky/meta/conf/distro/include/distro_alias.inc b/poky/meta/conf/distro/include/distro_alias.inc index 56055f779..b4dfcfc5f 100644 --- a/poky/meta/conf/distro/include/distro_alias.inc +++ b/poky/meta/conf/distro/include/distro_alias.inc @@ -81,10 +81,6 @@ DISTRO_PN_ALIAS_pn-encodings = "Ubuntu=xfonts-encodings Mandriva=x11-font-encodi DISTRO_PN_ALIAS_pn-font-alias = "Fedora=xorg-x11-fonts-base Mandriva=x11-font-alias Meego=xorg-x11-fonts" DISTRO_PN_ALIAS_pn-font-util = "Meego=xorg-x11-font-utils Fedora=xorg-x11-font-utils Ubuntu=xfonts-utils Mandriva=x11-font-util Debian=xfonts-utils" DISTRO_PN_ALIAS_pn-formfactor = "OE-Core" -DISTRO_PN_ALIAS_pn-gcc-cross-initial = "OE-Core" -DISTRO_PN_ALIAS_pn-gcc-cross-initial-i586 = "OE-Core" -DISTRO_PN_ALIAS_pn-gcc-crosssdk-initial = "OE-Core" -DISTRO_PN_ALIAS_pn-gcc-crosssdk-initial-x86_64 = "OE-Core" DISTRO_PN_ALIAS_pn-gccmakedep = "Mandriva=gccmakedep Ubuntu=xutils-dev" DISTRO_PN_ALIAS_pn-gcc-runtime = "Ubuntu=gcc Fedora=gcc" DISTRO_PN_ALIAS_pn-gcc-sanitizers = "Ubuntu=gcc Fedora=gcc" diff --git a/poky/meta/conf/distro/include/maintainers.inc b/poky/meta/conf/distro/include/maintainers.inc index c3a1f2733..a094b39b2 100644 --- a/poky/meta/conf/distro/include/maintainers.inc +++ b/poky/meta/conf/distro/include/maintainers.inc @@ -143,7 +143,7 @@ RECIPE_MAINTAINER_pn-debianutils = "Yi Zhao " RECIPE_MAINTAINER_pn-dejagnu = "Nathan Rossi " RECIPE_MAINTAINER_pn-depmodwrapper-cross = "Unassigned " RECIPE_MAINTAINER_pn-desktop-file-utils = "Alexander Kanavin " -RECIPE_MAINTAINER_pn-dhcp = "Hongxu Jia " +RECIPE_MAINTAINER_pn-dhcpcd = "Armin Kuster " RECIPE_MAINTAINER_pn-diffoscope = "Joshua Watt " RECIPE_MAINTAINER_pn-diffstat = "Chen Qi " RECIPE_MAINTAINER_pn-diffutils = "Chen Qi " @@ -255,7 +255,7 @@ RECIPE_MAINTAINER_pn-gtk-doc = "Alexander Kanavin " RECIPE_MAINTAINER_pn-gzip = "Denys Dmytriyenko " RECIPE_MAINTAINER_pn-harfbuzz = "Anuj Mittal " RECIPE_MAINTAINER_pn-hdparm = "Denys Dmytriyenko " -RECIPE_MAINTAINER_pn-help2man-native = "Hongxu Jia " +RECIPE_MAINTAINER_pn-help2man = "Hongxu Jia " RECIPE_MAINTAINER_pn-hicolor-icon-theme = "Anuj Mittal " RECIPE_MAINTAINER_pn-hwlatdetect = "Alexander Kanavin " RECIPE_MAINTAINER_pn-i2c-tools = "Anuj Mittal " @@ -287,6 +287,7 @@ RECIPE_MAINTAINER_pn-json-c = "Yi Zhao " RECIPE_MAINTAINER_pn-json-glib = "Yi Zhao " RECIPE_MAINTAINER_pn-jquery = "Joshua Watt " RECIPE_MAINTAINER_pn-kbd = "Alexander Kanavin " +RECIPE_MAINTAINER_pn-kea = "Armin Kuster " RECIPE_MAINTAINER_pn-kern-tools-native = "Bruce Ashfield " RECIPE_MAINTAINER_pn-kernel-devsrc = "Bruce Ashfield " RECIPE_MAINTAINER_pn-kexec-tools = "Armin Kuster " @@ -450,6 +451,7 @@ RECIPE_MAINTAINER_pn-linux-yocto-rt = "Bruce Ashfield RECIPE_MAINTAINER_pn-linux-yocto-tiny = "Bruce Ashfield " RECIPE_MAINTAINER_pn-llvm = "Khem Raj " RECIPE_MAINTAINER_pn-logrotate = "Yi Zhao " +RECIPE_MAINTAINER_pn-log4cplus = "Armin Kuster " RECIPE_MAINTAINER_pn-lrzsz = "Anuj Mittal " RECIPE_MAINTAINER_pn-lsb-release = "Hongxu Jia " RECIPE_MAINTAINER_pn-lsof = "Ross Burton " diff --git a/poky/meta/conf/distro/include/tcmode-default.inc b/poky/meta/conf/distro/include/tcmode-default.inc index 4f29d00e9..50e8a20f3 100644 --- a/poky/meta/conf/distro/include/tcmode-default.inc +++ b/poky/meta/conf/distro/include/tcmode-default.inc @@ -23,16 +23,14 @@ GDBVERSION ?= "9.%" GLIBCVERSION ?= "2.32" LINUXLIBCVERSION ?= "5.8%" QEMUVERSION ?= "5.1%" -GOVERSION ?= "1.14%" +GOVERSION ?= "1.15%" # This can not use wildcards like 8.0.% since it is also used in mesa to denote # llvm version being used, so always bump it with llvm recipe version bump LLVMVERSION ?= "10.0.1" PREFERRED_VERSION_gcc ?= "${GCCVERSION}" PREFERRED_VERSION_gcc-cross-${TARGET_ARCH} ?= "${GCCVERSION}" -PREFERRED_VERSION_gcc-cross-initial-${TARGET_ARCH} ?= "${GCCVERSION}" PREFERRED_VERSION_gcc-crosssdk-${SDK_SYS} ?= "${SDKGCCVERSION}" -PREFERRED_VERSION_gcc-crosssdk-initial-${SDK_SYS} ?= "${SDKGCCVERSION}" PREFERRED_VERSION_gcc-cross-canadian-${TRANSLATED_TARGET_ARCH} ?= "${GCCVERSION}" PREFERRED_VERSION_gcc-runtime ?= "${GCCVERSION}" PREFERRED_VERSION_gcc-sanitizers ?= "${GCCVERSION}" diff --git a/poky/meta/conf/distro/include/yocto-uninative.inc b/poky/meta/conf/distro/include/yocto-uninative.inc index 889695eae..69b6edee5 100644 --- a/poky/meta/conf/distro/include/yocto-uninative.inc +++ b/poky/meta/conf/distro/include/yocto-uninative.inc @@ -6,9 +6,9 @@ # to the distro running on the build machine. # -UNINATIVE_MAXGLIBCVERSION = "2.31" +UNINATIVE_MAXGLIBCVERSION = "2.32" -UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.8/" -UNINATIVE_CHECKSUM[aarch64] ?= "989187344bf9539b464fb7ed9c223e51f4bdb4c7a677d2c314e6fed393176efe" -UNINATIVE_CHECKSUM[i686] ?= "cc3e45bc8594488b407363e3fa9af5a099279dab2703c64342098719bd674990" -UNINATIVE_CHECKSUM[x86_64] ?= "a09922172c3a439105e0ae6b943daad2d83505b17da0aba97961ff433b8c21ab" +UNINATIVE_URL ?= "http://downloads.yoctoproject.org/releases/uninative/2.9/" +UNINATIVE_CHECKSUM[aarch64] ?= "9f25a667aee225b1dd65c4aea73e01983e825b1cb9b56937932a1ee328b45f81" +UNINATIVE_CHECKSUM[i686] ?= "cae5d73245d95b07cf133b780ba3f6c8d0adca3ffc4e7e7fab999961d5e24d36" +UNINATIVE_CHECKSUM[x86_64] ?= "d07916b95c419c81541a19c8ef0ed8cbd78ae18437ff28a4c8a60ef40518e423" diff --git a/poky/meta/conf/layer.conf b/poky/meta/conf/layer.conf index 1a01d02fe..38df0f3af 100644 --- a/poky/meta/conf/layer.conf +++ b/poky/meta/conf/layer.conf @@ -91,7 +91,7 @@ SIGGEN_EXCLUDE_SAFE_RECIPE_DEPS += " \ # dependency incidentally. This improves determinism and avoids build # failures when people switch to external toolchains. SSTATE_EXCLUDEDEPS_SYSROOT += ".*->bison-native" -# Nothing needs to depend on libc-initial/gcc-cross-initial +# Nothing needs to depend on libc-initial # base-passwd/shadow-sysroot don't need their dependencies SSTATE_EXCLUDEDEPS_SYSROOT += "\ .*->.*-initial.* \ diff --git a/poky/meta/conf/machine/include/tune-cortex-m0plus.inc b/poky/meta/conf/machine/include/tune-cortex-m0plus.inc old mode 100755 new mode 100644 diff --git a/poky/meta/conf/machine/include/tune-cortexa32.inc b/poky/meta/conf/machine/include/tune-cortexa32.inc index 3ab1addd9..0ffb3e068 100644 --- a/poky/meta/conf/machine/include/tune-cortexa32.inc +++ b/poky/meta/conf/machine/include/tune-cortexa32.inc @@ -11,7 +11,7 @@ AVAILTUNES += "cortexa32 cortexa32-crypto" ARMPKGARCH_tune-cortexa32 = "cortexa32" ARMPKGARCH_tune-cortexa32-crypto = "cortexa32" TUNE_FEATURES_tune-cortexa32 = "armv8a cortexa32 crc" -TUNE_FEATURES_tune-cortexa32-crypto = "armv8a cortexa32 crc crypto" +TUNE_FEATURES_tune-cortexa32-crypto = "${TUNE_FEATURES_tune-cortexa32} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa32 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa32" PACKAGE_EXTRA_ARCHS_tune-cortexa32-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa32 cortexa32-crypto" BASE_LIB_tune-cortexa32 = "lib" diff --git a/poky/meta/conf/machine/include/tune-cortexa35.inc b/poky/meta/conf/machine/include/tune-cortexa35.inc index d6df6cd6b..61696da54 100644 --- a/poky/meta/conf/machine/include/tune-cortexa35.inc +++ b/poky/meta/conf/machine/include/tune-cortexa35.inc @@ -10,7 +10,7 @@ AVAILTUNES += "cortexa35 cortexa35-crypto" ARMPKGARCH_tune-cortexa35 = "cortexa35" ARMPKGARCH_tune-cortexa35-crypto = "cortexa35" TUNE_FEATURES_tune-cortexa35 = "aarch64 cortexa35 crc" -TUNE_FEATURES_tune-cortexa35-crypto = "aarch64 cortexa35 crc crypto" +TUNE_FEATURES_tune-cortexa35-crypto = "${TUNE_FEATURES_tune-cortexa35} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa35 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa35" PACKAGE_EXTRA_ARCHS_tune-cortexa35-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa35 cortexa35-crypto" BASE_LIB_tune-cortexa35 = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortexa53.inc b/poky/meta/conf/machine/include/tune-cortexa53.inc index c0bb28a19..79ce7c4b1 100644 --- a/poky/meta/conf/machine/include/tune-cortexa53.inc +++ b/poky/meta/conf/machine/include/tune-cortexa53.inc @@ -10,7 +10,7 @@ AVAILTUNES += "cortexa53 cortexa53-crypto" ARMPKGARCH_tune-cortexa53 = "cortexa53" ARMPKGARCH_tune-cortexa53-crypto = "cortexa53-crypto" TUNE_FEATURES_tune-cortexa53 = "aarch64 cortexa53 crc" -TUNE_FEATURES_tune-cortexa53-crypto = "aarch64 cortexa53 crc crypto" +TUNE_FEATURES_tune-cortexa53-crypto = "${TUNE_FEATURES_tune-cortexa53} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa53" PACKAGE_EXTRA_ARCHS_tune-cortexa53-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa53 cortexa53-crypto" diff --git a/poky/meta/conf/machine/include/tune-cortexa55.inc b/poky/meta/conf/machine/include/tune-cortexa55.inc index b383eb733..66a5d0c43 100644 --- a/poky/meta/conf/machine/include/tune-cortexa55.inc +++ b/poky/meta/conf/machine/include/tune-cortexa55.inc @@ -8,6 +8,6 @@ require conf/machine/include/arm/arch-armv8-2a.inc # Little Endian base configs AVAILTUNES += "cortexa55" ARMPKGARCH_tune-cortexa55 = "cortexa55" -TUNE_FEATURES_tune-cortexa55 = "aarch64 cortexa55 crc crypto" -PACKAGE_EXTRA_ARCHS_tune-cortexa55 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa55" +TUNE_FEATURES_tune-cortexa55 = "aarch64 cortexa55 crypto" +PACKAGE_EXTRA_ARCHS_tune-cortexa55 = "${PACKAGE_EXTRA_ARCHS_tune-armv8-2a-crypto} cortexa55" BASE_LIB_tune-cortexa55 = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortexa57-cortexa53.inc b/poky/meta/conf/machine/include/tune-cortexa57-cortexa53.inc index ba4b07385..5880bf203 100644 --- a/poky/meta/conf/machine/include/tune-cortexa57-cortexa53.inc +++ b/poky/meta/conf/machine/include/tune-cortexa57-cortexa53.inc @@ -10,6 +10,6 @@ require conf/machine/include/arm/arch-armv8a.inc # Little Endian base configs AVAILTUNES += "cortexa57-cortexa53" ARMPKGARCH_tune-cortexa57-cortexa53 = "cortexa57-cortexa53" -TUNE_FEATURES_tune-cortexa57-cortexa53 = "aarch64 cortexa57-cortexa53" -PACKAGE_EXTRA_ARCHS_tune-cortexa57-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-aarch64} cortexa57-cortexa53" +TUNE_FEATURES_tune-cortexa57-cortexa53 = "aarch64 crc cortexa57-cortexa53" +PACKAGE_EXTRA_ARCHS_tune-cortexa57-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa57-cortexa53" BASE_LIB_tune-cortexa57-cortexa53 = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortexa57.inc b/poky/meta/conf/machine/include/tune-cortexa57.inc index 0811d503c..3206ce75a 100644 --- a/poky/meta/conf/machine/include/tune-cortexa57.inc +++ b/poky/meta/conf/machine/include/tune-cortexa57.inc @@ -10,7 +10,7 @@ AVAILTUNES += "cortexa57 cortexa57-crypto" ARMPKGARCH_tune-cortexa57 = "cortexa57" ARMPKGARCH_tune-cortexa57-crypto = "cortexa57-crypto" TUNE_FEATURES_tune-cortexa57 = "aarch64 cortexa57 crc" -TUNE_FEATURES_tune-cortexa57-crypto = "aarch64 cortexa57 crc crypto" +TUNE_FEATURES_tune-cortexa57-crypto = "${TUNE_FEATURES_tune-cortexa57} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa57 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa57" PACKAGE_EXTRA_ARCHS_tune-cortexa57-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa57 cortexa57-crypto" BASE_LIB_tune-cortexa57 = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortexa72-cortexa53.inc b/poky/meta/conf/machine/include/tune-cortexa72-cortexa53.inc index e857f875f..feb1df5c1 100644 --- a/poky/meta/conf/machine/include/tune-cortexa72-cortexa53.inc +++ b/poky/meta/conf/machine/include/tune-cortexa72-cortexa53.inc @@ -12,7 +12,7 @@ AVAILTUNES += "cortexa72-cortexa53 cortexa72-cortexa53-crypto" ARMPKGARCH_tune-cortexa72-cortexa53 = "cortexa72-cortexa53" ARMPKGARCH_tune-cortexa72-cortexa53-crypto = "cortexa72-cortexa53-crypto" TUNE_FEATURES_tune-cortexa72-cortexa53 = "aarch64 crc cortexa72-cortexa53" -TUNE_FEATURES_tune-cortexa72-cortexa53-crypto = "aarch64 crc crypto cortexa72-cortexa53" +TUNE_FEATURES_tune-cortexa72-cortexa53-crypto = "${TUNE_FEATURES_tune-cortexa72-cortexa53} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa72-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa72-cortexa53" PACKAGE_EXTRA_ARCHS_tune-cortexa72-cortexa53-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa72-cortexa53 cortexa72-cortexa53-crypto" BASE_LIB_tune-cortexa72-cortexa53 = "lib64" diff --git a/poky/meta/conf/machine/include/tune-cortexa73-cortexa53.inc b/poky/meta/conf/machine/include/tune-cortexa73-cortexa53.inc index ba2b9b195..1c221999f 100644 --- a/poky/meta/conf/machine/include/tune-cortexa73-cortexa53.inc +++ b/poky/meta/conf/machine/include/tune-cortexa73-cortexa53.inc @@ -12,7 +12,7 @@ AVAILTUNES += "cortexa73-cortexa53 cortexa73-cortexa53-crypto" ARMPKGARCH_tune-cortexa73-cortexa53 = "cortexa73-cortexa53" ARMPKGARCH_tune-cortexa73-cortexa53-crypto = "cortexa73-cortexa53-crypto" TUNE_FEATURES_tune-cortexa73-cortexa53 = "aarch64 crc cortexa73-cortexa53" -TUNE_FEATURES_tune-cortexa73-cortexa53-crypto = "aarch64 crc crypto cortexa73-cortexa53" +TUNE_FEATURES_tune-cortexa73-cortexa53-crypto = "${TUNE_FEATURES_tune-cortexa73-cortexa53} crypto" PACKAGE_EXTRA_ARCHS_tune-cortexa73-cortexa53 = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc} cortexa73-cortexa53" PACKAGE_EXTRA_ARCHS_tune-cortexa73-cortexa53-crypto = "${PACKAGE_EXTRA_ARCHS_tune-armv8a-crc-crypto} cortexa73-cortexa53 cortexa73-cortexa53-crypto" BASE_LIB_tune-cortexa73-cortexa53 = "lib64" diff --git a/poky/meta/conf/machine/include/x86-base.inc b/poky/meta/conf/machine/include/x86-base.inc index a72714711..a3169b740 100644 --- a/poky/meta/conf/machine/include/x86-base.inc +++ b/poky/meta/conf/machine/include/x86-base.inc @@ -18,7 +18,7 @@ SERIAL_CONSOLES ?= "115200;ttyS0" # kernel-related variables # PREFERRED_PROVIDER_virtual/kernel ??= "linux-yocto" -PREFERRED_VERSION_linux-yocto ??= "5.4%" +PREFERRED_VERSION_linux-yocto ??= "5.8%" # # XSERVER subcomponents, used to build the XSERVER variable diff --git a/poky/meta/conf/machine/qemuarmv5.conf b/poky/meta/conf/machine/qemuarmv5.conf index 6940efe46..48e83f13e 100644 --- a/poky/meta/conf/machine/qemuarmv5.conf +++ b/poky/meta/conf/machine/qemuarmv5.conf @@ -16,7 +16,7 @@ QB_KERNEL_CMDLINE_APPEND = "console=ttyAMA0,115200 console=tty" QB_OPT_APPEND = "-usb -device usb-tablet" # Add the 'virtio-rng-pci' device otherwise the guest may run out of entropy QB_OPT_APPEND += "-object rng-random,filename=/dev/urandom,id=rng0 -device virtio-rng-pci,rng=rng0" -PREFERRED_VERSION_linux-yocto ??= "5.4%" +PREFERRED_VERSION_linux-yocto ??= "5.8%" QB_DTB = "${@oe.utils.version_less_or_equal('PREFERRED_VERSION_linux-yocto', '4.7', '', 'zImage-versatile-pb.dtb', d)}" KMACHINE_qemuarmv5 = "qemuarm" diff --git a/poky/meta/lib/oe/recipeutils.py b/poky/meta/lib/oe/recipeutils.py index 36427eec9..ef69ef207 100644 --- a/poky/meta/lib/oe/recipeutils.py +++ b/poky/meta/lib/oe/recipeutils.py @@ -563,6 +563,23 @@ def get_bbfile_path(d, destdir, extrapathhint=None): confdata = bb.cookerdata.parse_config_file(destlayerconf, confdata) pn = d.getVar('PN') + # Parse BBFILES_DYNAMIC and append to BBFILES + bbfiles_dynamic = (confdata.getVar('BBFILES_DYNAMIC') or "").split() + collections = (confdata.getVar('BBFILE_COLLECTIONS') or "").split() + invalid = [] + for entry in bbfiles_dynamic: + parts = entry.split(":", 1) + if len(parts) != 2: + invalid.append(entry) + continue + l, f = parts + invert = l[0] == "!" + if invert: + l = l[1:] + if (l in collections and not invert) or (l not in collections and invert): + confdata.appendVar("BBFILES", " " + f) + if invalid: + return None bbfilespecs = (confdata.getVar('BBFILES') or '').split() if destdir == destlayerdir: for bbfilespec in bbfilespecs: diff --git a/poky/meta/lib/oeqa/runtime/cases/multilib.py b/poky/meta/lib/oeqa/runtime/cases/multilib.py index 62e662b01..0d1b9ae2c 100644 --- a/poky/meta/lib/oeqa/runtime/cases/multilib.py +++ b/poky/meta/lib/oeqa/runtime/cases/multilib.py @@ -7,6 +7,8 @@ from oeqa.core.decorator.depends import OETestDepends from oeqa.core.decorator.data import skipIfNotInDataVar from oeqa.runtime.decorator.package import OEHasPackage +import subprocess + class MultilibTest(OERuntimeTestCase): def archtest(self, binary, arch): @@ -14,8 +16,10 @@ class MultilibTest(OERuntimeTestCase): Check that ``binary`` has the ELF class ``arch`` (e.g. ELF32/ELF64). """ - status, output = self.target.run('readelf -h %s' % binary) - self.assertEqual(status, 0, 'Failed to readelf %s' % binary) + dest = "{}/test_binary".format(self.td.get('T', '')) + self.target.copyFrom(binary, dest) + output = subprocess.check_output("readelf -h {}".format(dest), shell=True).decode() + os.remove(dest) l = [l.split()[1] for l in output.split('\n') if "Class:" in l] if l: @@ -29,7 +33,6 @@ class MultilibTest(OERuntimeTestCase): @skipIfNotInDataVar('MULTILIBS', 'multilib:lib32', "This isn't a multilib:lib32 image") @OETestDepends(['ssh.SSHTest.test_ssh']) - @OEHasPackage(['binutils']) @OEHasPackage(['lib32-libc6']) def test_check_multilib_libc(self): """ @@ -39,6 +42,6 @@ class MultilibTest(OERuntimeTestCase): self.archtest("/lib64/libc.so.6", "ELF64") @OETestDepends(['multilib.MultilibTest.test_check_multilib_libc']) - @OEHasPackage(['lib32-connman', '!connman']) + @OEHasPackage(['lib32-connman']) def test_file_connman(self): self.archtest("/usr/sbin/connmand", "ELF32") diff --git a/poky/meta/lib/oeqa/runtime/cases/terminal.py b/poky/meta/lib/oeqa/runtime/cases/terminal.py new file mode 100644 index 000000000..a268f2688 --- /dev/null +++ b/poky/meta/lib/oeqa/runtime/cases/terminal.py @@ -0,0 +1,18 @@ +from oeqa.runtime.case import OERuntimeTestCase +from oeqa.core.decorator.depends import OETestDepends +from oeqa.runtime.decorator.package import OEHasPackage + +import threading +import time + +class TerminalTest(OERuntimeTestCase): + + @OEHasPackage(['matchbox-terminal']) + @OETestDepends(['ssh.SSHTest.test_ssh']) + def test_terminal_running(self): + t_thread = threading.Thread(target=self.target.run, args=('export DISPLAY=:0 && matchbox-terminal',)) + t_thread.start() + time.sleep(2) + status, output = self.target.run('pidof matchbox-terminal') + self.target.run('kill -9 %s' % output) + self.assertEqual(status, 0, msg='Not able to find process that runs terminal.') diff --git a/poky/meta/lib/oeqa/runtime/cases/weston.py b/poky/meta/lib/oeqa/runtime/cases/weston.py index ac29eca6e..36b4f9e37 100644 --- a/poky/meta/lib/oeqa/runtime/cases/weston.py +++ b/poky/meta/lib/oeqa/runtime/cases/weston.py @@ -34,7 +34,10 @@ class WestonTest(OERuntimeTestCase): return 'export XDG_RUNTIME_DIR=/run/user/0; export WAYLAND_DISPLAY=wayland-0; %s' % cmd def run_weston_init(self): - self.target.run(self.get_weston_command('weston --log=%s' % self.weston_log_file)) + if 'systemd' in self.tc.td['DISTRO_FEATURES']: + self.target.run('systemd-run --collect --unit=weston-ptest.service --uid=0 -p PAMName=login -p TTYPath=/dev/tty6 -E XDG_RUNTIME_DIR=/tmp -E WAYLAND_DISPLAY=wayland-0 /usr/bin/weston --socket=wayland-1 --log=%s' % self.weston_log_file) + else: + self.target.run(self.get_weston_command('openvt -- weston --socket=wayland-1 --log=%s' % self.weston_log_file)) def get_new_wayland_processes(self, existing_wl_processes): try_cnt = 0 @@ -63,7 +66,10 @@ class WestonTest(OERuntimeTestCase): new_wl_processes, try_cnt = self.get_new_wayland_processes(existing_wl_processes) existing_and_new_weston_processes = self.get_processes_of('weston', 'existing and new') new_weston_processes = [x for x in existing_and_new_weston_processes if x not in existing_weston_processes] - for w in new_weston_processes: - self.target.run('kill -9 %s' % w) + if 'systemd' in self.tc.td['DISTRO_FEATURES']: + self.target.run('systemctl stop weston-ptest.service') + else: + for w in new_weston_processes: + self.target.run('kill -9 %s' % w) __, weston_log = self.target.run('cat %s' % self.weston_log_file) self.assertTrue(new_wl_processes, msg='Could not get new weston-desktop-shell processes (%s, try_cnt:%s) weston log: %s' % (new_wl_processes, try_cnt, weston_log)) diff --git a/poky/meta/lib/oeqa/runtime/cases/x32lib.py b/poky/meta/lib/oeqa/runtime/cases/x32lib.py index ddf220140..f419c8f18 100644 --- a/poky/meta/lib/oeqa/runtime/cases/x32lib.py +++ b/poky/meta/lib/oeqa/runtime/cases/x32lib.py @@ -6,16 +6,21 @@ from oeqa.runtime.case import OERuntimeTestCase from oeqa.core.decorator.depends import OETestDepends from oeqa.core.decorator.data import skipIfNotInDataVar +import subprocess + class X32libTest(OERuntimeTestCase): @skipIfNotInDataVar('DEFAULTTUNE', 'x86-64-x32', 'DEFAULTTUNE is not set to x86-64-x32') @OETestDepends(['ssh.SSHTest.test_ssh']) def test_x32_file(self): - cmd = 'readelf -h /bin/ls | grep Class | grep ELF32' - status1 = self.target.run(cmd)[0] - cmd = 'readelf -h /bin/ls | grep Machine | grep X86-64' - status2 = self.target.run(cmd)[0] - msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says: %s" % - self.target.run("readelf -h /bin/ls")[1]) + dest = self.td.get('T', '') + "/ls.x32test" + self.target.copyFrom("/bin/ls", dest) + cmd = 'readelf -h {} | grep Class | grep ELF32'.format(dest) + status1 = subprocess.call(cmd, shell=True) + cmd = 'readelf -h {} | grep Machine | grep X86-64'.format(dest) + status2 = subprocess.call(cmd, shell=True) + msg = ("/bin/ls isn't an X86-64 ELF32 binary. readelf says:\n{}".format( + subprocess.check_output("readelf -h {}".format(dest), shell=True).decode())) + os.remove(dest) self.assertTrue(status1 == 0 and status2 == 0, msg=msg) diff --git a/poky/meta/lib/oeqa/sdk/case.py b/poky/meta/lib/oeqa/sdk/case.py index ebb03af9e..c45882689 100644 --- a/poky/meta/lib/oeqa/sdk/case.py +++ b/poky/meta/lib/oeqa/sdk/case.py @@ -26,7 +26,7 @@ class OESDKTestCase(OETestCase): return tarball tarball = os.path.join(workdir, archive) - subprocess.check_output(["wget", "-O", tarball, url]) + subprocess.check_output(["wget", "-O", tarball, url], stderr=subprocess.STDOUT) return tarball def check_elf(self, path, target_os=None, target_arch=None): diff --git a/poky/meta/lib/oeqa/sdk/cases/assimp.py b/poky/meta/lib/oeqa/sdk/cases/assimp.py index f26b17f2e..f166758e4 100644 --- a/poky/meta/lib/oeqa/sdk/cases/assimp.py +++ b/poky/meta/lib/oeqa/sdk/cases/assimp.py @@ -30,7 +30,7 @@ class BuildAssimp(OESDKTestCase): dirs["build"] = os.path.join(testdir, "build") dirs["install"] = os.path.join(testdir, "install") - subprocess.check_output(["tar", "xf", tarball, "-C", testdir]) + subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT) self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildcpio.py b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py index e56582654..e7fc211a4 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildcpio.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildcpio.py @@ -24,7 +24,7 @@ class BuildCpioTest(OESDKTestCase): dirs["build"] = os.path.join(testdir, "build") dirs["install"] = os.path.join(testdir, "install") - subprocess.check_output(["tar", "xf", tarball, "-C", testdir]) + subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT) self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildepoxy.py b/poky/meta/lib/oeqa/sdk/cases/buildepoxy.py index 4211955f8..385f8ccca 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildepoxy.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildepoxy.py @@ -28,7 +28,7 @@ class EpoxyTest(OESDKTestCase): dirs["build"] = os.path.join(testdir, "build") dirs["install"] = os.path.join(testdir, "install") - subprocess.check_output(["tar", "xf", tarball, "-C", testdir]) + subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT) self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py index 1121ed20e..eb3c8ddf3 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildgalculator.py @@ -31,7 +31,7 @@ class GalculatorTest(OESDKTestCase): dirs["build"] = os.path.join(testdir, "build") dirs["install"] = os.path.join(testdir, "install") - subprocess.check_output(["tar", "xf", tarball, "-C", testdir]) + subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT) self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) diff --git a/poky/meta/lib/oeqa/sdk/cases/buildlzip.py b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py index 515acd289..49ae756bf 100644 --- a/poky/meta/lib/oeqa/sdk/cases/buildlzip.py +++ b/poky/meta/lib/oeqa/sdk/cases/buildlzip.py @@ -20,7 +20,7 @@ class BuildLzipTest(OESDKTestCase): dirs["build"] = os.path.join(testdir, "build") dirs["install"] = os.path.join(testdir, "install") - subprocess.check_output(["tar", "xf", tarball, "-C", testdir]) + subprocess.check_output(["tar", "xf", tarball, "-C", testdir], stderr=subprocess.STDOUT) self.assertTrue(os.path.isdir(dirs["source"])) os.makedirs(dirs["build"]) diff --git a/poky/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt b/poky/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt new file mode 100644 index 000000000..f70f10e4d --- /dev/null +++ b/poky/meta/lib/oeqa/selftest/cases/diffoscope/A/file.txt @@ -0,0 +1 @@ +A diff --git a/poky/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt b/poky/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt new file mode 100644 index 000000000..223b7836f --- /dev/null +++ b/poky/meta/lib/oeqa/selftest/cases/diffoscope/B/file.txt @@ -0,0 +1 @@ +B diff --git a/poky/meta/lib/oeqa/selftest/cases/meta_ide.py b/poky/meta/lib/oeqa/selftest/cases/meta_ide.py index 809142559..6f10d30dc 100644 --- a/poky/meta/lib/oeqa/selftest/cases/meta_ide.py +++ b/poky/meta/lib/oeqa/selftest/cases/meta_ide.py @@ -43,7 +43,7 @@ class MetaIDE(OESelftestTestCase): "https://ftp.gnu.org/gnu/cpio/cpio-2.13.tar.gz", self.tmpdir_metaideQA, self.td['DATETIME'], dl_dir=dl_dir) self.project.download_archive() - self.assertEqual(self.project.run_configure(), 0, + self.assertEqual(self.project.run_configure('$CONFIGURE_FLAGS --disable-maintainer-mode','sed -i -e "/char \*program_name/d" src/global.c;'), 0, msg="Running configure failed") self.assertEqual(self.project.run_make(), 0, msg="Running make failed") diff --git a/poky/meta/lib/oeqa/selftest/cases/prservice.py b/poky/meta/lib/oeqa/selftest/cases/prservice.py index fe1f24ea6..578b2b4dd 100644 --- a/poky/meta/lib/oeqa/selftest/cases/prservice.py +++ b/poky/meta/lib/oeqa/selftest/cases/prservice.py @@ -23,7 +23,7 @@ class BitbakePrTests(OESelftestTestCase): package_data_file = os.path.join(self.pkgdata_dir, 'runtime', package_name) package_data = ftools.read_file(package_data_file) find_pr = re.search(r"PKGR: r[0-9]+\.([0-9]+)", package_data) - self.assertTrue(find_pr, "No PKG revision found in %s" % package_data_file) + self.assertTrue(find_pr, "No PKG revision found via regex 'PKGR: r[0-9]+\.([0-9]+)' in %s" % package_data_file) return int(find_pr.group(1)) def get_task_stamp(self, package_name, recipe_task): @@ -40,7 +40,7 @@ class BitbakePrTests(OESelftestTestCase): return str(stamps[0]) def increment_package_pr(self, package_name): - inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\"\n}" % datetime.datetime.now() + inc_data = "do_package_append() {\n bb.build.exec_func('do_test_prserv', d)\n}\ndo_test_prserv() {\necho \"The current date is: %s\" > ${PKGDESTWORK}/${PN}.datestamp\n}" % datetime.datetime.now() self.write_recipeinc(package_name, inc_data) res = bitbake(package_name, ignore_status=True) self.delete_recipeinc(package_name) @@ -63,7 +63,7 @@ class BitbakePrTests(OESelftestTestCase): pr_2 = self.get_pr_version(package_name) stamp_2 = self.get_task_stamp(package_name, track_task) - self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1") + self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1)) self.assertTrue(stamp_1 != stamp_2, "Different pkg rev. but same stamp: %s" % stamp_1) def run_test_pr_export_import(self, package_name, replace_current_db=True): @@ -89,7 +89,7 @@ class BitbakePrTests(OESelftestTestCase): self.increment_package_pr(package_name) pr_2 = self.get_pr_version(package_name) - self.assertTrue(pr_2 - pr_1 == 1, "Step between same pkg. revision is greater than 1") + self.assertTrue(pr_2 - pr_1 == 1, "New PR %s did not increment as expected (from %s), difference should be 1" % (pr_2, pr_1)) def test_import_export_replace_db(self): self.run_test_pr_export_import('m4') diff --git a/poky/meta/lib/oeqa/selftest/cases/recipetool.py b/poky/meta/lib/oeqa/selftest/cases/recipetool.py index c2ade2543..6bac53cf3 100644 --- a/poky/meta/lib/oeqa/selftest/cases/recipetool.py +++ b/poky/meta/lib/oeqa/selftest/cases/recipetool.py @@ -226,19 +226,6 @@ class RecipetoolTests(RecipetoolBase): _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-subdir', self.testfile, '', expectedlines, ['testfile']) self.assertNotIn('WARNING: ', output) - def test_recipetool_appendfile_src_glob(self): - # A file that's in SRC_URI as a glob - expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n', - '\n', - 'SRC_URI += "file://testfile"\n', - '\n', - 'do_install_append() {\n', - ' install -d ${D}${datadir}\n', - ' install -m 0644 ${WORKDIR}/testfile ${D}${datadir}/selftest-replaceme-src-globfile\n', - '}\n'] - _, output = self._try_recipetool_appendfile('selftest-recipetool-appendfile', '/usr/share/selftest-replaceme-src-globfile', self.testfile, '', expectedlines, ['testfile']) - self.assertNotIn('WARNING: ', output) - def test_recipetool_appendfile_inst_glob(self): # A file that's in do_install as a glob expectedlines = ['FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"\n', diff --git a/poky/meta/lib/oeqa/selftest/cases/reproducible.py b/poky/meta/lib/oeqa/selftest/cases/reproducible.py index 5d3959be7..a7ef33614 100644 --- a/poky/meta/lib/oeqa/selftest/cases/reproducible.py +++ b/poky/meta/lib/oeqa/selftest/cases/reproducible.py @@ -77,6 +77,32 @@ def compare_file(reference, test, diffutils_sysroot): result.status = SAME return result +def run_diffoscope(a_dir, b_dir, html_dir, **kwargs): + return runCmd(['diffoscope', '--no-default-limits', '--exclude-directory-metadata', 'yes', '--html-dir', html_dir, a_dir, b_dir], + **kwargs) + +class DiffoscopeTests(OESelftestTestCase): + diffoscope_test_files = os.path.join(os.path.dirname(os.path.abspath(__file__)), "diffoscope") + + def test_diffoscope(self): + bitbake("diffoscope-native -c addto_recipe_sysroot") + diffoscope_sysroot = get_bb_var("RECIPE_SYSROOT_NATIVE", "diffoscope-native") + + # Check that diffoscope doesn't return an error when the files compare + # the same (a general check that diffoscope is working) + with tempfile.TemporaryDirectory() as tmpdir: + run_diffoscope('A', 'A', tmpdir, + native_sysroot=diffoscope_sysroot, cwd=self.diffoscope_test_files) + + # Check that diffoscope generates an index.html file when the files are + # different + with tempfile.TemporaryDirectory() as tmpdir: + r = run_diffoscope('A', 'B', tmpdir, + native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=self.diffoscope_test_files) + + self.assertNotEqual(r.status, 0, msg="diffoscope was successful when an error was expected") + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'index.html')), "HTML index not found!") + class ReproducibleTests(OESelftestTestCase): package_classes = ['deb', 'ipk'] images = ['core-image-minimal', 'core-image-sato', 'core-image-full-cmdline'] @@ -232,7 +258,7 @@ class ReproducibleTests(OESelftestTestCase): # Copy jquery to improve the diffoscope output usability self.copy_file(os.path.join(jquery_sysroot, 'usr/share/javascript/jquery/jquery.min.js'), os.path.join(package_html_dir, 'jquery.js')) - runCmd(['diffoscope', '--no-default-limits', '--exclude-directory-metadata', '--html-dir', package_html_dir, 'reproducibleA', 'reproducibleB'], + run_diffoscope('reproducibleA', 'reproducibleB', package_html_dir, native_sysroot=diffoscope_sysroot, ignore_status=True, cwd=package_dir) if fails: diff --git a/poky/meta/lib/oeqa/selftest/cases/runtime_test.py b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py index 793c98a33..d89731c69 100644 --- a/poky/meta/lib/oeqa/selftest/cases/runtime_test.py +++ b/poky/meta/lib/oeqa/selftest/cases/runtime_test.py @@ -156,7 +156,7 @@ class TestImage(OESelftestTestCase): self.gpg_home = tempfile.mkdtemp(prefix="oeqa-feed-sign-") self.track_for_cleanup(self.gpg_home) signing_key_dir = os.path.join(self.testlayer_path, 'files', 'signing') - runCmd('gpg --batch --homedir %s --import %s' % (self.gpg_home, os.path.join(signing_key_dir, 'key.secret')), native_sysroot=get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native")) + runCmd('gpgconf --list-dirs --homedir %s; gpg -v --batch --homedir %s --import %s' % (self.gpg_home, self.gpg_home, os.path.join(signing_key_dir, 'key.secret')), native_sysroot=get_bb_var("RECIPE_SYSROOT_NATIVE", "gnupg-native"), shell=True) features += 'INHERIT += "sign_package_feed"\n' features += 'PACKAGE_FEED_GPG_NAME = "testuser"\n' features += 'PACKAGE_FEED_GPG_PASSPHRASE_FILE = "%s"\n' % os.path.join(signing_key_dir, 'key.passphrase') diff --git a/poky/meta/lib/oeqa/selftest/cases/signing.py b/poky/meta/lib/oeqa/selftest/cases/signing.py index 202d54994..a28c7eb19 100644 --- a/poky/meta/lib/oeqa/selftest/cases/signing.py +++ b/poky/meta/lib/oeqa/selftest/cases/signing.py @@ -44,7 +44,9 @@ class Signing(OESelftestTestCase): origenv = os.environ.copy() for e in os.environ: - if builddir in os.environ[e]: + if builddir + "/" in os.environ[e]: + os.environ[e] = os.environ[e].replace(builddir + "/", newbuilddir + "/") + if os.environ[e].endswith(builddir): os.environ[e] = os.environ[e].replace(builddir, newbuilddir) os.chdir(newbuilddir) diff --git a/poky/meta/lib/oeqa/selftest/cases/wic.py b/poky/meta/lib/oeqa/selftest/cases/wic.py index 8b58285c3..714637ec1 100644 --- a/poky/meta/lib/oeqa/selftest/cases/wic.py +++ b/poky/meta/lib/oeqa/selftest/cases/wic.py @@ -235,6 +235,17 @@ class Wic(WicTestCase): runCmd(cmd) self.assertEqual(1, len(glob(self.resultdir + "systemd-bootdisk-*direct"))) + def test_efi_bootpart(self): + """Test creation of efi-bootpart image""" + cmd = "wic create mkefidisk -e core-image-minimal -o %s" % self.resultdir + kimgtype = get_bb_var('KERNEL_IMAGETYPE', 'core-image-minimal') + self.append_config('IMAGE_EFI_BOOT_FILES = "%s;kernel"\n' % kimgtype) + runCmd(cmd) + sysroot = get_bb_var('RECIPE_SYSROOT_NATIVE', 'wic-tools') + images = glob(self.resultdir + "mkefidisk-*.direct") + result = runCmd("wic ls %s:1/ -n %s" % (images[0], sysroot)) + self.assertIn("kernel",result.output) + def test_sdimage_bootpart(self): """Test creation of sdimage-bootpart image""" cmd = "wic create sdimage-bootpart -e core-image-minimal -o %s" % self.resultdir @@ -689,7 +700,7 @@ class Wic2(WicTestCase): wicvars = wicvars.difference(('DEPLOY_DIR_IMAGE', 'IMAGE_BOOT_FILES', 'INITRD', 'INITRD_LIVE', 'ISODIR','INITRAMFS_IMAGE', 'INITRAMFS_IMAGE_BUNDLE', 'INITRAMFS_LINK_NAME', - 'APPEND')) + 'APPEND', 'IMAGE_EFI_BOOT_FILES')) with open(path) as envfile: content = dict(line.split("=", 1) for line in envfile) # test if variables used by wic present in the .env file @@ -889,6 +900,30 @@ class Wic2(WicTestCase): "2:103424kiB:205824kiB:102400kiB:ext4:primary:;", ]) + with NamedTemporaryFile("w", suffix=".wks") as tempf: + # Test that partitions can be placed on a 512 byte sector boundary + tempf.write("bootloader --ptable gpt\n" \ + "part / --source rootfs --ondisk hda --offset 65s --fixed-size 99M --fstype=ext4\n" \ + "part /bar --ondisk hda --offset 102432 --fixed-size 100M --fstype=ext4\n") + tempf.flush() + + _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) + self.assertEqual(partlns, [ + "1:32.5kiB:101408kiB:101376kiB:ext4:primary:;", + "2:102432kiB:204832kiB:102400kiB:ext4:primary:;", + ]) + + with NamedTemporaryFile("w", suffix=".wks") as tempf: + # Test that a partition can be placed immediately after a MSDOS partition table + tempf.write("bootloader --ptable msdos\n" \ + "part / --source rootfs --ondisk hda --offset 1s --fixed-size 100M --fstype=ext4\n") + tempf.flush() + + _, partlns = self._get_wic_partitions(tempf.name, native_sysroot) + self.assertEqual(partlns, [ + "1:0.50kiB:102400kiB:102400kiB:ext4::;", + ]) + with NamedTemporaryFile("w", suffix=".wks") as tempf: # Test that image creation fails if the partitions would overlap tempf.write("bootloader --ptable gpt\n" \ diff --git a/poky/meta/lib/oeqa/selftest/context.py b/poky/meta/lib/oeqa/selftest/context.py index 23f7d71bd..dd3609c1d 100644 --- a/poky/meta/lib/oeqa/selftest/context.py +++ b/poky/meta/lib/oeqa/selftest/context.py @@ -82,7 +82,9 @@ class OESelftestTestContext(OETestContext): oe.path.copytree(selftestdir, newselftestdir) for e in os.environ: - if builddir + "/" in os.environ[e] or os.environ[e].endswith(builddir): + if builddir + "/" in os.environ[e]: + os.environ[e] = os.environ[e].replace(builddir + "/", newbuilddir + "/") + if os.environ[e].endswith(builddir): os.environ[e] = os.environ[e].replace(builddir, newbuilddir) subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True) diff --git a/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb b/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb new file mode 100644 index 000000000..613e3161f --- /dev/null +++ b/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.1.bb @@ -0,0 +1,30 @@ +SUMMARY = "U-Boot libraries and tools to access environment" + +DESCRIPTION = "This package contains tools and libraries to read \ +and modify U-Boot environment. \ +It provides a hardware-independent replacement for fw_printenv/setenv utilities \ +provided by U-Boot" + +HOMEPAGE = "https://github.com/sbabic/libubootenv" +LICENSE = "LGPL-2.1" +LIC_FILES_CHKSUM = "file://Licenses/lgpl-2.1.txt;md5=4fbd65380cdd255951079008b364516c" +SECTION = "libs" + +SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https" +SRCREV = "824551ac77bab1d0f7ae34d7a7c77b155240e754" + +S = "${WORKDIR}/git" + +inherit uboot-config cmake lib_package + +EXTRA_OECMAKE = "-DCMAKE_BUILD_TYPE=Release" + +DEPENDS = "zlib" +PROVIDES += "u-boot-fw-utils" +RPROVIDES_${PN}-bin += "u-boot-fw-utils" + +PACKAGE_ARCH = "${MACHINE_ARCH}" + +RRECOMMENDS_${PN}-bin_append_class-target = " u-boot-default-env" + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb b/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb deleted file mode 100644 index 47e64f911..000000000 --- a/poky/meta/recipes-bsp/u-boot/libubootenv_0.3.bb +++ /dev/null @@ -1,30 +0,0 @@ -SUMMARY = "U-Boot libraries and tools to access environment" - -DESCRIPTION = "This package contains tools and libraries to read \ -and modify U-Boot environment. \ -It provides a hardware-independent replacement for fw_printenv/setenv utilities \ -provided by U-Boot" - -HOMEPAGE = "https://github.com/sbabic/libubootenv" -LICENSE = "LGPL-2.1" -LIC_FILES_CHKSUM = "file://Licenses/lgpl-2.1.txt;md5=4fbd65380cdd255951079008b364516c" -SECTION = "libs" - -SRC_URI = "git://github.com/sbabic/libubootenv;protocol=https" -SRCREV = "ad253cfdb07c8492f2ee46a52fbc607ad0b96414" - -S = "${WORKDIR}/git" - -inherit cmake lib_package - -EXTRA_OECMAKE = "-DCMAKE_BUILD_TYPE=Release" - -DEPENDS = "zlib" -PROVIDES += "u-boot-fw-utils" -RPROVIDES_${PN}-bin += "u-boot-fw-utils" - -PACKAGE_ARCH = "${MACHINE_ARCH}" - -RRECOMMENDS_${PN}-bin_append_class-target = " u-boot-default-env" - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-avoid-start-failure-with-bind-user.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-avoid-start-failure-with-bind-user.patch new file mode 100644 index 000000000..8db96ec04 --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-avoid-start-failure-with-bind-user.patch @@ -0,0 +1,27 @@ +From 31dde3562f287429eea94b77250d184818b49063 Mon Sep 17 00:00:00 2001 +From: Chen Qi +Date: Mon, 15 Oct 2018 16:55:09 +0800 +Subject: [PATCH] avoid start failure with bind user + +Upstream-Status: Pending + +Signed-off-by: Chen Qi +--- + init.d | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/init.d b/init.d +index b2eec60..6e03936 100644 +--- a/init.d ++++ b/init.d +@@ -57,6 +57,7 @@ case "$1" in + modprobe capability >/dev/null 2>&1 || true + if [ ! -f /etc/bind/rndc.key ]; then + /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom ++ chown root:bind /etc/bind/rndc.key >/dev/null 2>&1 || true + chmod 0640 /etc/bind/rndc.key + fi + if [ -f /var/run/named/named.pid ]; then +-- +2.7.4 + diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-named-lwresd-V-and-start-log-hide-build-options.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-named-lwresd-V-and-start-log-hide-build-options.patch new file mode 100644 index 000000000..5bcc16c9b --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/0001-named-lwresd-V-and-start-log-hide-build-options.patch @@ -0,0 +1,35 @@ +From a3af4a405baf5ff582e82aaba392dd9667d94bdc Mon Sep 17 00:00:00 2001 +From: Hongxu Jia +Date: Mon, 27 Aug 2018 21:24:20 +0800 +Subject: [PATCH] `named/lwresd -V' and start log hide build options + +The build options expose build path directories, so hide them. +[snip] +$ named -V +|built by make with *** (options are hidden) +[snip] + +Upstream-Status: Inappropriate [oe-core specific] + +Signed-off-by: Hongxu Jia + +Refreshed for 9.16.0 +Signed-off-by: Armin Kuster + +--- + bin/named/include/named/globals.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: bind-9.16.0/bin/named/include/named/globals.h +=================================================================== +--- bind-9.16.0.orig/bin/named/include/named/globals.h ++++ bind-9.16.0/bin/named/include/named/globals.h +@@ -69,7 +69,7 @@ EXTERN const char *named_g_version I + EXTERN const char *named_g_product INIT(PRODUCT); + EXTERN const char *named_g_description INIT(DESCRIPTION); + EXTERN const char *named_g_srcid INIT(SRCID); +-EXTERN const char *named_g_configargs INIT(CONFIGARGS); ++EXTERN const char *named_g_configargs INIT("*** (options are hidden)"); + EXTERN const char *named_g_builder INIT(BUILDER); + EXTERN in_port_t named_g_port INIT(0); + EXTERN isc_dscp_t named_g_dscp INIT(-1); diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind-ensure-searching-for-json-headers-searches-sysr.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind-ensure-searching-for-json-headers-searches-sysr.patch new file mode 100644 index 000000000..f9cdc7ca4 --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind-ensure-searching-for-json-headers-searches-sysr.patch @@ -0,0 +1,47 @@ +From edda20fb5a6e88548f85e39d34d6c074306e15bc Mon Sep 17 00:00:00 2001 +From: Paul Gortmaker +Date: Tue, 9 Jun 2015 11:22:00 -0400 +Subject: [PATCH] bind: ensure searching for json headers searches sysroot + +Bind can fail configure by detecting headers w/o libs[1], or +it can fail the host contamination check as per below: + +ERROR: This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities. +Rerun configure task after fixing this. The path was 'build/tmp/work/core2-64-poky-linux/bind/9.10.2-r1/build' +ERROR: Function failed: do_qa_configure +ERROR: Logfile of failure stored in: build/tmp/work/core2-64-poky-linux/bind/9.10.2-r1/temp/log.do_configure.5242 +ERROR: Task 5 (meta/recipes-connectivity/bind/bind_9.10.2.bb, do_configure) failed with exit code '1' +NOTE: Tasks Summary: Attempted 773 tasks of which 768 didn't need to be rerun and 1 failed. +No currently running tasks (773 of 781) + +Summary: 1 task failed: + /meta/recipes-connectivity/bind/bind_9.10.2.bb, do_configure + +One way to fix it would be to unconditionally disable json in bind +configure[2] but here we fix it by using the path to where we would +put the header if we had json in the sysroot, in case someone wants +to make use of the combination some day. + +[1] https://trac.macports.org/ticket/45305 +[2] https://trac.macports.org/changeset/126406 + +Upstream-Status: Inappropriate [OE Specific] +Signed-off-by: Paul Gortmaker + +--- + configure.ac | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +Index: bind-9.16.4/configure.ac +=================================================================== +--- bind-9.16.4.orig/configure.ac ++++ bind-9.16.4/configure.ac +@@ -1232,7 +1232,7 @@ case "$use_lmdb" in + LMDB_LIBS="" + ;; + auto|yes) +- for d in /usr /usr/local /opt/local ++ for d in "${STAGING_INCDIR}" + do + if test -f "${d}/include/lmdb.h" + then diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind9 b/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind9 new file mode 100644 index 000000000..968679ff7 --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/bind9 @@ -0,0 +1,2 @@ +# startup options for the server +OPTIONS="-u bind" diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/conf.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/conf.patch new file mode 100644 index 000000000..aad345f9f --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/conf.patch @@ -0,0 +1,330 @@ +Upstream-Status: Inappropriate [configuration] + +the patch is imported from openembedded project + +11/30/2010 - Qing He + +diff -urN bind-9.3.1.orig/conf/db.0 bind-9.3.1/conf/db.0 +--- bind-9.3.1.orig/conf/db.0 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.0 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,12 @@ ++; ++; BIND reverse data file for broadcast zone ++; ++$TTL 604800 ++@ IN SOA localhost. root.localhost. ( ++ 1 ; Serial ++ 604800 ; Refresh ++ 86400 ; Retry ++ 2419200 ; Expire ++ 604800 ) ; Negative Cache TTL ++; ++@ IN NS localhost. +diff -urN bind-9.3.1.orig/conf/db.127 bind-9.3.1/conf/db.127 +--- bind-9.3.1.orig/conf/db.127 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.127 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,13 @@ ++; ++; BIND reverse data file for local loopback interface ++; ++$TTL 604800 ++@ IN SOA localhost. root.localhost. ( ++ 1 ; Serial ++ 604800 ; Refresh ++ 86400 ; Retry ++ 2419200 ; Expire ++ 604800 ) ; Negative Cache TTL ++; ++@ IN NS localhost. ++1.0.0 IN PTR localhost. +diff -urN bind-9.3.1.orig/conf/db.empty bind-9.3.1/conf/db.empty +--- bind-9.3.1.orig/conf/db.empty 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.empty 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,14 @@ ++; BIND reverse data file for empty rfc1918 zone ++; ++; DO NOT EDIT THIS FILE - it is used for multiple zones. ++; Instead, copy it, edit named.conf, and use that copy. ++; ++$TTL 86400 ++@ IN SOA localhost. root.localhost. ( ++ 1 ; Serial ++ 604800 ; Refresh ++ 86400 ; Retry ++ 2419200 ; Expire ++ 86400 ) ; Negative Cache TTL ++; ++@ IN NS localhost. +diff -urN bind-9.3.1.orig/conf/db.255 bind-9.3.1/conf/db.255 +--- bind-9.3.1.orig/conf/db.255 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.255 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,12 @@ ++; ++; BIND reserve data file for broadcast zone ++; ++$TTL 604800 ++@ IN SOA localhost. root.localhost. ( ++ 1 ; Serial ++ 604800 ; Refresh ++ 86400 ; Retry ++ 2419200 ; Expire ++ 604800 ) ; Negative Cache TTL ++; ++@ IN NS localhost. +diff -urN bind-9.3.1.orig/conf/db.local bind-9.3.1/conf/db.local +--- bind-9.3.1.orig/conf/db.local 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.local 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,13 @@ ++; ++; BIND data file for local loopback interface ++; ++$TTL 604800 ++@ IN SOA localhost. root.localhost. ( ++ 1 ; Serial ++ 604800 ; Refresh ++ 86400 ; Retry ++ 2419200 ; Expire ++ 604800 ) ; Negative Cache TTL ++; ++@ IN NS localhost. ++@ IN A 127.0.0.1 +diff -urN bind-9.3.1.orig/conf/db.root bind-9.3.1/conf/db.root +--- bind-9.3.1.orig/conf/db.root 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/db.root 2005-07-10 22:14:00.000000000 +0200 +@@ -0,0 +1,45 @@ ++ ++; <<>> DiG 9.2.3 <<>> ns . @a.root-servers.net. ++;; global options: printcmd ++;; Got answer: ++;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 18944 ++;; flags: qr aa rd; QUERY: 1, ANSWER: 13, AUTHORITY: 0, ADDITIONAL: 13 ++ ++;; QUESTION SECTION: ++;. IN NS ++ ++;; ANSWER SECTION: ++. 518400 IN NS A.ROOT-SERVERS.NET. ++. 518400 IN NS B.ROOT-SERVERS.NET. ++. 518400 IN NS C.ROOT-SERVERS.NET. ++. 518400 IN NS D.ROOT-SERVERS.NET. ++. 518400 IN NS E.ROOT-SERVERS.NET. ++. 518400 IN NS F.ROOT-SERVERS.NET. ++. 518400 IN NS G.ROOT-SERVERS.NET. ++. 518400 IN NS H.ROOT-SERVERS.NET. ++. 518400 IN NS I.ROOT-SERVERS.NET. ++. 518400 IN NS J.ROOT-SERVERS.NET. ++. 518400 IN NS K.ROOT-SERVERS.NET. ++. 518400 IN NS L.ROOT-SERVERS.NET. ++. 518400 IN NS M.ROOT-SERVERS.NET. ++ ++;; ADDITIONAL SECTION: ++A.ROOT-SERVERS.NET. 3600000 IN A 198.41.0.4 ++B.ROOT-SERVERS.NET. 3600000 IN A 192.228.79.201 ++C.ROOT-SERVERS.NET. 3600000 IN A 192.33.4.12 ++D.ROOT-SERVERS.NET. 3600000 IN A 128.8.10.90 ++E.ROOT-SERVERS.NET. 3600000 IN A 192.203.230.10 ++F.ROOT-SERVERS.NET. 3600000 IN A 192.5.5.241 ++G.ROOT-SERVERS.NET. 3600000 IN A 192.112.36.4 ++H.ROOT-SERVERS.NET. 3600000 IN A 128.63.2.53 ++I.ROOT-SERVERS.NET. 3600000 IN A 192.36.148.17 ++J.ROOT-SERVERS.NET. 3600000 IN A 192.58.128.30 ++K.ROOT-SERVERS.NET. 3600000 IN A 193.0.14.129 ++L.ROOT-SERVERS.NET. 3600000 IN A 198.32.64.12 ++M.ROOT-SERVERS.NET. 3600000 IN A 202.12.27.33 ++ ++;; Query time: 81 msec ++;; SERVER: 198.41.0.4#53(a.root-servers.net.) ++;; WHEN: Sun Feb 1 11:27:14 2004 ++;; MSG SIZE rcvd: 436 ++ +diff -urN bind-9.3.1.orig/conf/named.conf bind-9.3.1/conf/named.conf +--- bind-9.3.1.orig/conf/named.conf 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/named.conf 2005-07-10 22:33:46.000000000 +0200 +@@ -0,0 +1,49 @@ ++// This is the primary configuration file for the BIND DNS server named. ++// ++// If you are just adding zones, please do that in /etc/bind/named.conf.local ++ ++include "/etc/bind/named.conf.options"; ++ ++// prime the server with knowledge of the root servers ++zone "." { ++ type hint; ++ file "/etc/bind/db.root"; ++}; ++ ++// be authoritative for the localhost forward and reverse zones, and for ++// broadcast zones as per RFC 1912 ++ ++zone "localhost" { ++ type master; ++ file "/etc/bind/db.local"; ++}; ++ ++zone "127.in-addr.arpa" { ++ type master; ++ file "/etc/bind/db.127"; ++}; ++ ++zone "0.in-addr.arpa" { ++ type master; ++ file "/etc/bind/db.0"; ++}; ++ ++zone "255.in-addr.arpa" { ++ type master; ++ file "/etc/bind/db.255"; ++}; ++ ++// zone "com" { type delegation-only; }; ++// zone "net" { type delegation-only; }; ++ ++// From the release notes: ++// Because many of our users are uncomfortable receiving undelegated answers ++// from root or top level domains, other than a few for whom that behaviour ++// has been trusted and expected for quite some length of time, we have now ++// introduced the "root-delegations-only" feature which applies delegation-only ++// logic to all top level domains, and to the root domain. An exception list ++// should be specified, including "MUSEUM" and "DE", and any other top level ++// domains from whom undelegated responses are expected and trusted. ++// root-delegation-only exclude { "DE"; "MUSEUM"; }; ++ ++include "/etc/bind/named.conf.local"; +diff -urN bind-9.3.1.orig/conf/named.conf.local bind-9.3.1/conf/named.conf.local +--- bind-9.3.1.orig/conf/named.conf.local 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/named.conf.local 2005-07-10 22:14:06.000000000 +0200 +@@ -0,0 +1,8 @@ ++// ++// Do any local configuration here ++// ++ ++// Consider adding the 1918 zones here, if they are not used in your ++// organization ++//include "/etc/bind/zones.rfc1918"; ++ +diff -urN bind-9.3.1.orig/conf/named.conf.options bind-9.3.1/conf/named.conf.options +--- bind-9.3.1.orig/conf/named.conf.options 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/named.conf.options 2005-07-10 22:14:06.000000000 +0200 +@@ -0,0 +1,24 @@ ++options { ++ directory "/var/cache/bind"; ++ ++ // If there is a firewall between you and nameservers you want ++ // to talk to, you might need to uncomment the query-source ++ // directive below. Previous versions of BIND always asked ++ // questions using port 53, but BIND 8.1 and later use an unprivileged ++ // port by default. ++ ++ // query-source address * port 53; ++ ++ // If your ISP provided one or more IP addresses for stable ++ // nameservers, you probably want to use them as forwarders. ++ // Uncomment the following block, and insert the addresses replacing ++ // the all-0's placeholder. ++ ++ // forwarders { ++ // 0.0.0.0; ++ // }; ++ ++ auth-nxdomain no; # conform to RFC1035 ++ ++}; ++ +diff -urN bind-9.3.1.orig/conf/zones.rfc1918 bind-9.3.1/conf/zones.rfc1918 +--- bind-9.3.1.orig/conf/zones.rfc1918 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/conf/zones.rfc1918 2005-07-10 22:14:10.000000000 +0200 +@@ -0,0 +1,20 @@ ++zone "10.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++ ++zone "16.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "17.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "18.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "19.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "20.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "21.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "22.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "23.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "24.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "25.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "26.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "27.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "28.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "29.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "30.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++zone "31.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; ++ ++zone "168.192.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; +diff -urN bind-9.3.1.orig/init.d bind-9.3.1/init.d +--- bind-9.3.1.orig/init.d 1970-01-01 01:00:00.000000000 +0100 ++++ bind-9.3.1/init.d 2005-07-10 23:09:58.000000000 +0200 +@@ -0,0 +1,70 @@ ++#!/bin/sh ++ ++PATH=/sbin:/bin:/usr/sbin:/usr/bin ++ ++# for a chrooted server: "-u bind -t /var/lib/named" ++# Don't modify this line, change or create /etc/default/bind9. ++OPTIONS="" ++ ++test -f /etc/default/bind9 && . /etc/default/bind9 ++ ++test -x /usr/sbin/rndc || exit 0 ++ ++case "$1" in ++ start) ++ echo -n "Starting domain name service: named" ++ ++ modprobe capability >/dev/null 2>&1 || true ++ if [ ! -f /etc/bind/rndc.key ]; then ++ /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom ++ chmod 0640 /etc/bind/rndc.key ++ fi ++ if [ -f /var/run/named/named.pid ]; then ++ ps `cat /var/run/named/named.pid` > /dev/null && exit 1 ++ fi ++ ++ # dirs under /var/run can go away on reboots. ++ mkdir -p /var/run/named ++ mkdir -p /var/cache/bind ++ chmod 775 /var/run/named ++ chown root:bind /var/run/named >/dev/null 2>&1 || true ++ ++ if [ ! -x /usr/sbin/named ]; then ++ echo "named binary missing - not starting" ++ exit 1 ++ fi ++ if start-stop-daemon --start --quiet --exec /usr/sbin/named \ ++ --pidfile /var/run/named/named.pid -- $OPTIONS; then ++ if [ -x /sbin/resolvconf ] ; then ++ echo "nameserver 127.0.0.1" | /sbin/resolvconf -a lo ++ fi ++ fi ++ echo "." ++ ;; ++ ++ stop) ++ echo -n "Stopping domain name service: named" ++ if [ -x /sbin/resolvconf ]; then ++ /sbin/resolvconf -d lo ++ fi ++ /usr/sbin/rndc stop >/dev/null 2>&1 ++ echo "." ++ ;; ++ ++ reload) ++ /usr/sbin/rndc reload ++ ;; ++ ++ restart|force-reload) ++ $0 stop ++ sleep 2 ++ $0 start ++ ;; ++ ++ *) ++ echo "Usage: /etc/init.d/bind {start|stop|reload|restart|force-reload}" >&2 ++ exit 1 ++ ;; ++esac ++ ++exit 0 diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/generate-rndc-key.sh b/poky/meta/recipes-connectivity/bind/bind-9.16.5/generate-rndc-key.sh new file mode 100644 index 000000000..ef915c0ae --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/generate-rndc-key.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +if [ ! -s /etc/bind/rndc.key ]; then + echo -n "Generating /etc/bind/rndc.key:" + /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom + chown root:bind /etc/bind/rndc.key + chmod 0640 /etc/bind/rndc.key +fi diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/init.d-add-support-for-read-only-rootfs.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/init.d-add-support-for-read-only-rootfs.patch new file mode 100644 index 000000000..11db95ede --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/init.d-add-support-for-read-only-rootfs.patch @@ -0,0 +1,65 @@ +Subject: init.d: add support for read-only rootfs + +Upstream-Status: Inappropriate [oe specific] + +Signed-off-by: Chen Qi +--- + init.d | 40 ++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 40 insertions(+) + +diff --git a/init.d b/init.d +index 0111ed4..24677c8 100644 +--- a/init.d ++++ b/init.d +@@ -6,8 +6,48 @@ PATH=/sbin:/bin:/usr/sbin:/usr/bin + # Don't modify this line, change or create /etc/default/bind9. + OPTIONS="" + ++test -f /etc/default/rcS && . /etc/default/rcS + test -f /etc/default/bind9 && . /etc/default/bind9 + ++# This function is here because it's possible that /var and / are on different partitions. ++is_on_read_only_partition () { ++ DIRECTORY=$1 ++ dir=`readlink -f $DIRECTORY` ++ while true; do ++ if [ ! -d "$dir" ]; then ++ echo "ERROR: $dir is not a directory" ++ exit 1 ++ else ++ for flag in `awk -v dir=$dir '{ if ($2 == dir) { print "FOUND"; split($4,FLAGS,",") } }; \ ++ END { for (f in FLAGS) print FLAGS[f] }' < /proc/mounts`; do ++ [ "$flag" = "FOUND" ] && partition="read-write" ++ [ "$flag" = "ro" ] && { partition="read-only"; break; } ++ done ++ if [ "$dir" = "/" -o -n "$partition" ]; then ++ break ++ else ++ dir=`dirname $dir` ++ fi ++ fi ++ done ++ [ "$partition" = "read-only" ] && echo "yes" || echo "no" ++} ++ ++bind_mount () { ++ olddir=$1 ++ newdir=$2 ++ mkdir -p $olddir ++ cp -a $newdir/* $olddir ++ mount --bind $olddir $newdir ++} ++ ++# Deal with read-only rootfs ++if [ "$ROOTFS_READ_ONLY" = "yes" ]; then ++ [ "$VERBOSE" != "no" ] && echo "WARN: start bind service in read-only rootfs" ++ [ `is_on_read_only_partition /etc/bind` = "yes" ] && bind_mount /var/volatile/bind/etc /etc/bind ++ [ `is_on_read_only_partition /var/named` = "yes" ] && bind_mount /var/volatile/bind/named /var/named ++fi ++ + test -x /usr/sbin/rndc || exit 0 + + case "$1" in +-- +1.7.9.5 + diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/make-etc-initd-bind-stop-work.patch b/poky/meta/recipes-connectivity/bind/bind-9.16.5/make-etc-initd-bind-stop-work.patch new file mode 100644 index 000000000..146f3e35d --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/make-etc-initd-bind-stop-work.patch @@ -0,0 +1,42 @@ +bind: make "/etc/init.d/bind stop" work + +Upstream-Status: Inappropriate [configuration] + +Add some configurations, make rndc command be able to controls +the named daemon. + +Signed-off-by: Roy Li +--- + conf/named.conf | 5 +++++ + conf/rndc.conf | 5 +++++ + 2 files changed, 10 insertions(+), 0 deletions(-) + create mode 100644 conf/rndc.conf + +diff --git a/conf/named.conf b/conf/named.conf +index 95829cf..c8899e7 100644 +--- a/conf/named.conf ++++ b/conf/named.conf +@@ -47,3 +47,8 @@ zone "255.in-addr.arpa" { + // root-delegation-only exclude { "DE"; "MUSEUM"; }; + + include "/etc/bind/named.conf.local"; ++include "/etc/bind/rndc.key" ; ++controls { ++ inet 127.0.0.1 allow { localhost; } ++ keys { rndc-key; }; ++}; +diff --git a/conf/rndc.conf b/conf/rndc.conf +new file mode 100644 +index 0000000..a0b481d +--- /dev/null ++++ b/conf/rndc.conf +@@ -0,0 +1,5 @@ ++include "/etc/bind/rndc.key"; ++options { ++ default-server localhost; ++ default-key rndc-key; ++}; + +-- +1.7.5.4 + diff --git a/poky/meta/recipes-connectivity/bind/bind-9.16.5/named.service b/poky/meta/recipes-connectivity/bind/bind-9.16.5/named.service new file mode 100644 index 000000000..cda56ef01 --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind-9.16.5/named.service @@ -0,0 +1,22 @@ +[Unit] +Description=Berkeley Internet Name Domain (DNS) +Wants=nss-lookup.target +Before=nss-lookup.target +After=network.target + +[Service] +Type=forking +EnvironmentFile=-/etc/default/bind9 +PIDFile=/run/named/named.pid + +ExecStartPre=@SBINDIR@/generate-rndc-key.sh +ExecStart=@SBINDIR@/named $OPTIONS + +ExecReload=@BASE_BINDIR@/sh -c '@SBINDIR@/rndc reload > /dev/null 2>&1 || @BASE_BINDIR@/kill -HUP $MAINPID' + +ExecStop=@BASE_BINDIR@/sh -c '@SBINDIR@/rndc stop > /dev/null 2>&1 || @BASE_BINDIR@/kill -TERM $MAINPID' + +PrivateTmp=true + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch b/poky/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch deleted file mode 100644 index 8db96ec04..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/0001-avoid-start-failure-with-bind-user.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 31dde3562f287429eea94b77250d184818b49063 Mon Sep 17 00:00:00 2001 -From: Chen Qi -Date: Mon, 15 Oct 2018 16:55:09 +0800 -Subject: [PATCH] avoid start failure with bind user - -Upstream-Status: Pending - -Signed-off-by: Chen Qi ---- - init.d | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/init.d b/init.d -index b2eec60..6e03936 100644 ---- a/init.d -+++ b/init.d -@@ -57,6 +57,7 @@ case "$1" in - modprobe capability >/dev/null 2>&1 || true - if [ ! -f /etc/bind/rndc.key ]; then - /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom -+ chown root:bind /etc/bind/rndc.key >/dev/null 2>&1 || true - chmod 0640 /etc/bind/rndc.key - fi - if [ -f /var/run/named/named.pid ]; then --- -2.7.4 - diff --git a/poky/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch b/poky/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch deleted file mode 100644 index 9d31b9808..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/0001-configure.in-remove-useless-L-use_openssl-lib.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 2325a92f1896a2a7f586611686801b41fbc91b50 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Mon, 27 Aug 2018 15:00:51 +0800 -Subject: [PATCH] configure.in: remove useless `-L$use_openssl/lib' - -Since `--with-openssl=${STAGING_DIR_HOST}${prefix}' is used in bind recipe, -the `-L$use_openssl/lib' has a hardcoded suffix, removing it is harmless -and helpful for clean up host build path in isc-config.sh - -Upstream-Status: Inappropriate [oe-core specific] - -Signed-off-by: Hongxu Jia - ---- - configure.ac | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/configure.ac b/configure.ac -index e85a5c6..2bbfc58 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -1631,7 +1631,7 @@ If you don't want OpenSSL, use --without-openssl]) - fi - ;; - *) -- DST_OPENSSL_LIBS="-L$use_openssl/lib -lcrypto" -+ DST_OPENSSL_LIBS="-lcrypto" - ;; - esac - fi diff --git a/poky/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch b/poky/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch deleted file mode 100644 index 75908aa63..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/0001-named-lwresd-V-and-start-log-hide-build-options.patch +++ /dev/null @@ -1,34 +0,0 @@ -From a3af4a405baf5ff582e82aaba392dd9667d94bdc Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Mon, 27 Aug 2018 21:24:20 +0800 -Subject: [PATCH] `named/lwresd -V' and start log hide build options - -The build options expose build path directories, so hide them. -[snip] -$ named -V -|built by make with *** (options are hidden) -[snip] - -Upstream-Status: Inappropriate [oe-core specific] - -Signed-off-by: Hongxu Jia ---- - bin/named/include/named/globals.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/bin/named/include/named/globals.h b/bin/named/include/named/globals.h -index ba3457e..7741da7 100644 ---- a/bin/named/include/named/globals.h -+++ b/bin/named/include/named/globals.h -@@ -68,7 +68,7 @@ EXTERN const char * ns_g_version INIT(VERSION); - EXTERN const char * ns_g_product INIT(PRODUCT); - EXTERN const char * ns_g_description INIT(DESCRIPTION); - EXTERN const char * ns_g_srcid INIT(SRCID); --EXTERN const char * ns_g_configargs INIT(CONFIGARGS); -+EXTERN const char * ns_g_configargs INIT("*** (options are hidden)"); - EXTERN const char * ns_g_builder INIT(BUILDER); - EXTERN in_port_t ns_g_port INIT(0); - EXTERN isc_dscp_t ns_g_dscp INIT(-1); --- -2.7.4 - diff --git a/poky/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch b/poky/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch deleted file mode 100644 index 84559e5f3..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/bind-ensure-searching-for-json-headers-searches-sysr.patch +++ /dev/null @@ -1,47 +0,0 @@ -From edda20fb5a6e88548f85e39d34d6c074306e15bc Mon Sep 17 00:00:00 2001 -From: Paul Gortmaker -Date: Tue, 9 Jun 2015 11:22:00 -0400 -Subject: [PATCH] bind: ensure searching for json headers searches sysroot - -Bind can fail configure by detecting headers w/o libs[1], or -it can fail the host contamination check as per below: - -ERROR: This autoconf log indicates errors, it looked at host include and/or library paths while determining system capabilities. -Rerun configure task after fixing this. The path was 'build/tmp/work/core2-64-poky-linux/bind/9.10.2-r1/build' -ERROR: Function failed: do_qa_configure -ERROR: Logfile of failure stored in: build/tmp/work/core2-64-poky-linux/bind/9.10.2-r1/temp/log.do_configure.5242 -ERROR: Task 5 (meta/recipes-connectivity/bind/bind_9.10.2.bb, do_configure) failed with exit code '1' -NOTE: Tasks Summary: Attempted 773 tasks of which 768 didn't need to be rerun and 1 failed. -No currently running tasks (773 of 781) - -Summary: 1 task failed: - /meta/recipes-connectivity/bind/bind_9.10.2.bb, do_configure - -One way to fix it would be to unconditionally disable json in bind -configure[2] but here we fix it by using the path to where we would -put the header if we had json in the sysroot, in case someone wants -to make use of the combination some day. - -[1] https://trac.macports.org/ticket/45305 -[2] https://trac.macports.org/changeset/126406 - -Upstream-Status: Inappropriate [OE Specific] -Signed-off-by: Paul Gortmaker - ---- - configure.ac | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/configure.ac b/configure.ac -index 17392fd..e85a5c6 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -2449,7 +2449,7 @@ case "$use_libjson" in - libjson_libs="" - ;; - auto|yes) -- for d in /usr /usr/local /opt/local -+ for d in "${STAGING_INCDIR}" - do - if test -f "${d}/include/json/json.h" - then diff --git a/poky/meta/recipes-connectivity/bind/bind/bind9 b/poky/meta/recipes-connectivity/bind/bind/bind9 deleted file mode 100644 index 968679ff7..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/bind9 +++ /dev/null @@ -1,2 +0,0 @@ -# startup options for the server -OPTIONS="-u bind" diff --git a/poky/meta/recipes-connectivity/bind/bind/conf.patch b/poky/meta/recipes-connectivity/bind/bind/conf.patch deleted file mode 100644 index aad345f9f..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/conf.patch +++ /dev/null @@ -1,330 +0,0 @@ -Upstream-Status: Inappropriate [configuration] - -the patch is imported from openembedded project - -11/30/2010 - Qing He - -diff -urN bind-9.3.1.orig/conf/db.0 bind-9.3.1/conf/db.0 ---- bind-9.3.1.orig/conf/db.0 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.0 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,12 @@ -+; -+; BIND reverse data file for broadcast zone -+; -+$TTL 604800 -+@ IN SOA localhost. root.localhost. ( -+ 1 ; Serial -+ 604800 ; Refresh -+ 86400 ; Retry -+ 2419200 ; Expire -+ 604800 ) ; Negative Cache TTL -+; -+@ IN NS localhost. -diff -urN bind-9.3.1.orig/conf/db.127 bind-9.3.1/conf/db.127 ---- bind-9.3.1.orig/conf/db.127 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.127 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,13 @@ -+; -+; BIND reverse data file for local loopback interface -+; -+$TTL 604800 -+@ IN SOA localhost. root.localhost. ( -+ 1 ; Serial -+ 604800 ; Refresh -+ 86400 ; Retry -+ 2419200 ; Expire -+ 604800 ) ; Negative Cache TTL -+; -+@ IN NS localhost. -+1.0.0 IN PTR localhost. -diff -urN bind-9.3.1.orig/conf/db.empty bind-9.3.1/conf/db.empty ---- bind-9.3.1.orig/conf/db.empty 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.empty 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,14 @@ -+; BIND reverse data file for empty rfc1918 zone -+; -+; DO NOT EDIT THIS FILE - it is used for multiple zones. -+; Instead, copy it, edit named.conf, and use that copy. -+; -+$TTL 86400 -+@ IN SOA localhost. root.localhost. ( -+ 1 ; Serial -+ 604800 ; Refresh -+ 86400 ; Retry -+ 2419200 ; Expire -+ 86400 ) ; Negative Cache TTL -+; -+@ IN NS localhost. -diff -urN bind-9.3.1.orig/conf/db.255 bind-9.3.1/conf/db.255 ---- bind-9.3.1.orig/conf/db.255 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.255 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,12 @@ -+; -+; BIND reserve data file for broadcast zone -+; -+$TTL 604800 -+@ IN SOA localhost. root.localhost. ( -+ 1 ; Serial -+ 604800 ; Refresh -+ 86400 ; Retry -+ 2419200 ; Expire -+ 604800 ) ; Negative Cache TTL -+; -+@ IN NS localhost. -diff -urN bind-9.3.1.orig/conf/db.local bind-9.3.1/conf/db.local ---- bind-9.3.1.orig/conf/db.local 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.local 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,13 @@ -+; -+; BIND data file for local loopback interface -+; -+$TTL 604800 -+@ IN SOA localhost. root.localhost. ( -+ 1 ; Serial -+ 604800 ; Refresh -+ 86400 ; Retry -+ 2419200 ; Expire -+ 604800 ) ; Negative Cache TTL -+; -+@ IN NS localhost. -+@ IN A 127.0.0.1 -diff -urN bind-9.3.1.orig/conf/db.root bind-9.3.1/conf/db.root ---- bind-9.3.1.orig/conf/db.root 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/db.root 2005-07-10 22:14:00.000000000 +0200 -@@ -0,0 +1,45 @@ -+ -+; <<>> DiG 9.2.3 <<>> ns . @a.root-servers.net. -+;; global options: printcmd -+;; Got answer: -+;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 18944 -+;; flags: qr aa rd; QUERY: 1, ANSWER: 13, AUTHORITY: 0, ADDITIONAL: 13 -+ -+;; QUESTION SECTION: -+;. IN NS -+ -+;; ANSWER SECTION: -+. 518400 IN NS A.ROOT-SERVERS.NET. -+. 518400 IN NS B.ROOT-SERVERS.NET. -+. 518400 IN NS C.ROOT-SERVERS.NET. -+. 518400 IN NS D.ROOT-SERVERS.NET. -+. 518400 IN NS E.ROOT-SERVERS.NET. -+. 518400 IN NS F.ROOT-SERVERS.NET. -+. 518400 IN NS G.ROOT-SERVERS.NET. -+. 518400 IN NS H.ROOT-SERVERS.NET. -+. 518400 IN NS I.ROOT-SERVERS.NET. -+. 518400 IN NS J.ROOT-SERVERS.NET. -+. 518400 IN NS K.ROOT-SERVERS.NET. -+. 518400 IN NS L.ROOT-SERVERS.NET. -+. 518400 IN NS M.ROOT-SERVERS.NET. -+ -+;; ADDITIONAL SECTION: -+A.ROOT-SERVERS.NET. 3600000 IN A 198.41.0.4 -+B.ROOT-SERVERS.NET. 3600000 IN A 192.228.79.201 -+C.ROOT-SERVERS.NET. 3600000 IN A 192.33.4.12 -+D.ROOT-SERVERS.NET. 3600000 IN A 128.8.10.90 -+E.ROOT-SERVERS.NET. 3600000 IN A 192.203.230.10 -+F.ROOT-SERVERS.NET. 3600000 IN A 192.5.5.241 -+G.ROOT-SERVERS.NET. 3600000 IN A 192.112.36.4 -+H.ROOT-SERVERS.NET. 3600000 IN A 128.63.2.53 -+I.ROOT-SERVERS.NET. 3600000 IN A 192.36.148.17 -+J.ROOT-SERVERS.NET. 3600000 IN A 192.58.128.30 -+K.ROOT-SERVERS.NET. 3600000 IN A 193.0.14.129 -+L.ROOT-SERVERS.NET. 3600000 IN A 198.32.64.12 -+M.ROOT-SERVERS.NET. 3600000 IN A 202.12.27.33 -+ -+;; Query time: 81 msec -+;; SERVER: 198.41.0.4#53(a.root-servers.net.) -+;; WHEN: Sun Feb 1 11:27:14 2004 -+;; MSG SIZE rcvd: 436 -+ -diff -urN bind-9.3.1.orig/conf/named.conf bind-9.3.1/conf/named.conf ---- bind-9.3.1.orig/conf/named.conf 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/named.conf 2005-07-10 22:33:46.000000000 +0200 -@@ -0,0 +1,49 @@ -+// This is the primary configuration file for the BIND DNS server named. -+// -+// If you are just adding zones, please do that in /etc/bind/named.conf.local -+ -+include "/etc/bind/named.conf.options"; -+ -+// prime the server with knowledge of the root servers -+zone "." { -+ type hint; -+ file "/etc/bind/db.root"; -+}; -+ -+// be authoritative for the localhost forward and reverse zones, and for -+// broadcast zones as per RFC 1912 -+ -+zone "localhost" { -+ type master; -+ file "/etc/bind/db.local"; -+}; -+ -+zone "127.in-addr.arpa" { -+ type master; -+ file "/etc/bind/db.127"; -+}; -+ -+zone "0.in-addr.arpa" { -+ type master; -+ file "/etc/bind/db.0"; -+}; -+ -+zone "255.in-addr.arpa" { -+ type master; -+ file "/etc/bind/db.255"; -+}; -+ -+// zone "com" { type delegation-only; }; -+// zone "net" { type delegation-only; }; -+ -+// From the release notes: -+// Because many of our users are uncomfortable receiving undelegated answers -+// from root or top level domains, other than a few for whom that behaviour -+// has been trusted and expected for quite some length of time, we have now -+// introduced the "root-delegations-only" feature which applies delegation-only -+// logic to all top level domains, and to the root domain. An exception list -+// should be specified, including "MUSEUM" and "DE", and any other top level -+// domains from whom undelegated responses are expected and trusted. -+// root-delegation-only exclude { "DE"; "MUSEUM"; }; -+ -+include "/etc/bind/named.conf.local"; -diff -urN bind-9.3.1.orig/conf/named.conf.local bind-9.3.1/conf/named.conf.local ---- bind-9.3.1.orig/conf/named.conf.local 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/named.conf.local 2005-07-10 22:14:06.000000000 +0200 -@@ -0,0 +1,8 @@ -+// -+// Do any local configuration here -+// -+ -+// Consider adding the 1918 zones here, if they are not used in your -+// organization -+//include "/etc/bind/zones.rfc1918"; -+ -diff -urN bind-9.3.1.orig/conf/named.conf.options bind-9.3.1/conf/named.conf.options ---- bind-9.3.1.orig/conf/named.conf.options 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/named.conf.options 2005-07-10 22:14:06.000000000 +0200 -@@ -0,0 +1,24 @@ -+options { -+ directory "/var/cache/bind"; -+ -+ // If there is a firewall between you and nameservers you want -+ // to talk to, you might need to uncomment the query-source -+ // directive below. Previous versions of BIND always asked -+ // questions using port 53, but BIND 8.1 and later use an unprivileged -+ // port by default. -+ -+ // query-source address * port 53; -+ -+ // If your ISP provided one or more IP addresses for stable -+ // nameservers, you probably want to use them as forwarders. -+ // Uncomment the following block, and insert the addresses replacing -+ // the all-0's placeholder. -+ -+ // forwarders { -+ // 0.0.0.0; -+ // }; -+ -+ auth-nxdomain no; # conform to RFC1035 -+ -+}; -+ -diff -urN bind-9.3.1.orig/conf/zones.rfc1918 bind-9.3.1/conf/zones.rfc1918 ---- bind-9.3.1.orig/conf/zones.rfc1918 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/conf/zones.rfc1918 2005-07-10 22:14:10.000000000 +0200 -@@ -0,0 +1,20 @@ -+zone "10.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+ -+zone "16.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "17.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "18.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "19.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "20.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "21.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "22.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "23.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "24.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "25.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "26.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "27.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "28.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "29.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "30.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+zone "31.172.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -+ -+zone "168.192.in-addr.arpa" { type master; file "/etc/bind/db.empty"; }; -diff -urN bind-9.3.1.orig/init.d bind-9.3.1/init.d ---- bind-9.3.1.orig/init.d 1970-01-01 01:00:00.000000000 +0100 -+++ bind-9.3.1/init.d 2005-07-10 23:09:58.000000000 +0200 -@@ -0,0 +1,70 @@ -+#!/bin/sh -+ -+PATH=/sbin:/bin:/usr/sbin:/usr/bin -+ -+# for a chrooted server: "-u bind -t /var/lib/named" -+# Don't modify this line, change or create /etc/default/bind9. -+OPTIONS="" -+ -+test -f /etc/default/bind9 && . /etc/default/bind9 -+ -+test -x /usr/sbin/rndc || exit 0 -+ -+case "$1" in -+ start) -+ echo -n "Starting domain name service: named" -+ -+ modprobe capability >/dev/null 2>&1 || true -+ if [ ! -f /etc/bind/rndc.key ]; then -+ /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom -+ chmod 0640 /etc/bind/rndc.key -+ fi -+ if [ -f /var/run/named/named.pid ]; then -+ ps `cat /var/run/named/named.pid` > /dev/null && exit 1 -+ fi -+ -+ # dirs under /var/run can go away on reboots. -+ mkdir -p /var/run/named -+ mkdir -p /var/cache/bind -+ chmod 775 /var/run/named -+ chown root:bind /var/run/named >/dev/null 2>&1 || true -+ -+ if [ ! -x /usr/sbin/named ]; then -+ echo "named binary missing - not starting" -+ exit 1 -+ fi -+ if start-stop-daemon --start --quiet --exec /usr/sbin/named \ -+ --pidfile /var/run/named/named.pid -- $OPTIONS; then -+ if [ -x /sbin/resolvconf ] ; then -+ echo "nameserver 127.0.0.1" | /sbin/resolvconf -a lo -+ fi -+ fi -+ echo "." -+ ;; -+ -+ stop) -+ echo -n "Stopping domain name service: named" -+ if [ -x /sbin/resolvconf ]; then -+ /sbin/resolvconf -d lo -+ fi -+ /usr/sbin/rndc stop >/dev/null 2>&1 -+ echo "." -+ ;; -+ -+ reload) -+ /usr/sbin/rndc reload -+ ;; -+ -+ restart|force-reload) -+ $0 stop -+ sleep 2 -+ $0 start -+ ;; -+ -+ *) -+ echo "Usage: /etc/init.d/bind {start|stop|reload|restart|force-reload}" >&2 -+ exit 1 -+ ;; -+esac -+ -+exit 0 diff --git a/poky/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh b/poky/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh deleted file mode 100644 index ef915c0ae..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/generate-rndc-key.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh - -if [ ! -s /etc/bind/rndc.key ]; then - echo -n "Generating /etc/bind/rndc.key:" - /usr/sbin/rndc-confgen -a -b 512 -r /dev/urandom - chown root:bind /etc/bind/rndc.key - chmod 0640 /etc/bind/rndc.key -fi diff --git a/poky/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch b/poky/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch deleted file mode 100644 index 11db95ede..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/init.d-add-support-for-read-only-rootfs.patch +++ /dev/null @@ -1,65 +0,0 @@ -Subject: init.d: add support for read-only rootfs - -Upstream-Status: Inappropriate [oe specific] - -Signed-off-by: Chen Qi ---- - init.d | 40 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 40 insertions(+) - -diff --git a/init.d b/init.d -index 0111ed4..24677c8 100644 ---- a/init.d -+++ b/init.d -@@ -6,8 +6,48 @@ PATH=/sbin:/bin:/usr/sbin:/usr/bin - # Don't modify this line, change or create /etc/default/bind9. - OPTIONS="" - -+test -f /etc/default/rcS && . /etc/default/rcS - test -f /etc/default/bind9 && . /etc/default/bind9 - -+# This function is here because it's possible that /var and / are on different partitions. -+is_on_read_only_partition () { -+ DIRECTORY=$1 -+ dir=`readlink -f $DIRECTORY` -+ while true; do -+ if [ ! -d "$dir" ]; then -+ echo "ERROR: $dir is not a directory" -+ exit 1 -+ else -+ for flag in `awk -v dir=$dir '{ if ($2 == dir) { print "FOUND"; split($4,FLAGS,",") } }; \ -+ END { for (f in FLAGS) print FLAGS[f] }' < /proc/mounts`; do -+ [ "$flag" = "FOUND" ] && partition="read-write" -+ [ "$flag" = "ro" ] && { partition="read-only"; break; } -+ done -+ if [ "$dir" = "/" -o -n "$partition" ]; then -+ break -+ else -+ dir=`dirname $dir` -+ fi -+ fi -+ done -+ [ "$partition" = "read-only" ] && echo "yes" || echo "no" -+} -+ -+bind_mount () { -+ olddir=$1 -+ newdir=$2 -+ mkdir -p $olddir -+ cp -a $newdir/* $olddir -+ mount --bind $olddir $newdir -+} -+ -+# Deal with read-only rootfs -+if [ "$ROOTFS_READ_ONLY" = "yes" ]; then -+ [ "$VERBOSE" != "no" ] && echo "WARN: start bind service in read-only rootfs" -+ [ `is_on_read_only_partition /etc/bind` = "yes" ] && bind_mount /var/volatile/bind/etc /etc/bind -+ [ `is_on_read_only_partition /var/named` = "yes" ] && bind_mount /var/volatile/bind/named /var/named -+fi -+ - test -x /usr/sbin/rndc || exit 0 - - case "$1" in --- -1.7.9.5 - diff --git a/poky/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch b/poky/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch deleted file mode 100644 index 146f3e35d..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/make-etc-initd-bind-stop-work.patch +++ /dev/null @@ -1,42 +0,0 @@ -bind: make "/etc/init.d/bind stop" work - -Upstream-Status: Inappropriate [configuration] - -Add some configurations, make rndc command be able to controls -the named daemon. - -Signed-off-by: Roy Li ---- - conf/named.conf | 5 +++++ - conf/rndc.conf | 5 +++++ - 2 files changed, 10 insertions(+), 0 deletions(-) - create mode 100644 conf/rndc.conf - -diff --git a/conf/named.conf b/conf/named.conf -index 95829cf..c8899e7 100644 ---- a/conf/named.conf -+++ b/conf/named.conf -@@ -47,3 +47,8 @@ zone "255.in-addr.arpa" { - // root-delegation-only exclude { "DE"; "MUSEUM"; }; - - include "/etc/bind/named.conf.local"; -+include "/etc/bind/rndc.key" ; -+controls { -+ inet 127.0.0.1 allow { localhost; } -+ keys { rndc-key; }; -+}; -diff --git a/conf/rndc.conf b/conf/rndc.conf -new file mode 100644 -index 0000000..a0b481d ---- /dev/null -+++ b/conf/rndc.conf -@@ -0,0 +1,5 @@ -+include "/etc/bind/rndc.key"; -+options { -+ default-server localhost; -+ default-key rndc-key; -+}; - --- -1.7.5.4 - diff --git a/poky/meta/recipes-connectivity/bind/bind/named.service b/poky/meta/recipes-connectivity/bind/bind/named.service deleted file mode 100644 index cda56ef01..000000000 --- a/poky/meta/recipes-connectivity/bind/bind/named.service +++ /dev/null @@ -1,22 +0,0 @@ -[Unit] -Description=Berkeley Internet Name Domain (DNS) -Wants=nss-lookup.target -Before=nss-lookup.target -After=network.target - -[Service] -Type=forking -EnvironmentFile=-/etc/default/bind9 -PIDFile=/run/named/named.pid - -ExecStartPre=@SBINDIR@/generate-rndc-key.sh -ExecStart=@SBINDIR@/named $OPTIONS - -ExecReload=@BASE_BINDIR@/sh -c '@SBINDIR@/rndc reload > /dev/null 2>&1 || @BASE_BINDIR@/kill -HUP $MAINPID' - -ExecStop=@BASE_BINDIR@/sh -c '@SBINDIR@/rndc stop > /dev/null 2>&1 || @BASE_BINDIR@/kill -TERM $MAINPID' - -PrivateTmp=true - -[Install] -WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/bind/bind_9.11.21.bb b/poky/meta/recipes-connectivity/bind/bind_9.11.21.bb deleted file mode 100644 index ee546a0a2..000000000 --- a/poky/meta/recipes-connectivity/bind/bind_9.11.21.bb +++ /dev/null @@ -1,140 +0,0 @@ -SUMMARY = "ISC Internet Domain Name Server" -HOMEPAGE = "http://www.isc.org/sw/bind/" -SECTION = "console/network" - -LICENSE = "ISC & BSD" -LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=bf39058a7f64b2a934ce14dc9ec1dd45" - -DEPENDS = "openssl libcap zlib" - -SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \ - file://conf.patch \ - file://named.service \ - file://bind9 \ - file://generate-rndc-key.sh \ - file://make-etc-initd-bind-stop-work.patch \ - file://init.d-add-support-for-read-only-rootfs.patch \ - file://bind-ensure-searching-for-json-headers-searches-sysr.patch \ - file://0001-configure.in-remove-useless-L-use_openssl-lib.patch \ - file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \ - file://0001-avoid-start-failure-with-bind-user.patch \ - " - -SRC_URI[sha256sum] = "668158b005b3de4328fa0dbbbb3f524b66f28f024c67538aa9412a9e69c9dfbc" - -UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/" -# stay at 9.11 until 9.16, from 9.16 follow the ESV versions divisible by 4 -UPSTREAM_CHECK_REGEX = "(?P9.(11|16|20|24|28)(\.\d+)+(-P\d+)*)/" - -# BIND >= 9.11.2 need dhcpd >= 4.4.0, -# don't report it here since dhcpd is already recent enough. -CVE_CHECK_WHITELIST += "CVE-2019-6470" - -inherit autotools update-rc.d systemd useradd pkgconfig multilib_script multilib_header - -MULTILIB_SCRIPTS = "${PN}:${bindir}/bind9-config ${PN}:${bindir}/isc-config.sh" - -# PACKAGECONFIGs readline and libedit should NOT be set at same time -PACKAGECONFIG ?= "readline" -PACKAGECONFIG[httpstats] = "--with-libxml2=${STAGING_DIR_HOST}${prefix},--without-libxml2,libxml2" -PACKAGECONFIG[readline] = "--with-readline=-lreadline,,readline" -PACKAGECONFIG[libedit] = "--with-readline=-ledit,,libedit" -PACKAGECONFIG[urandom] = "--with-randomdev=/dev/urandom,--with-randomdev=/dev/random,," -PACKAGECONFIG[python3] = "--with-python=yes --with-python-install-dir=${PYTHON_SITEPACKAGES_DIR} , --without-python, python3-ply-native," - -ENABLE_IPV6 = "--enable-ipv6=${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', 'yes', 'no', d)}" -EXTRA_OECONF = " ${ENABLE_IPV6} --with-libtool --enable-threads \ - --disable-devpoll --enable-epoll --with-gost=no \ - --with-gssapi=no --with-ecdsa=yes --with-eddsa=no \ - --with-lmdb=no \ - --sysconfdir=${sysconfdir}/bind \ - --with-openssl=${STAGING_DIR_HOST}${prefix} \ - " - -inherit ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native distutils3-base', '', d)} - -# dhcp needs .la so keep them -REMOVE_LIBTOOL_LA = "0" - -USERADD_PACKAGES = "${PN}" -USERADD_PARAM_${PN} = "--system --home ${localstatedir}/cache/bind --no-create-home \ - --user-group bind" - -INITSCRIPT_NAME = "bind" -INITSCRIPT_PARAMS = "defaults" - -SYSTEMD_SERVICE_${PN} = "named.service" - -do_install_prepend() { - # clean host path in isc-config.sh before the hardlink created - # by "make install": - # bind9-config -> isc-config.sh - sed -i -e "s,${STAGING_LIBDIR},${libdir}," ${B}/isc-config.sh -} - -do_install_append() { - - rmdir "${D}${localstatedir}/run" - rmdir --ignore-fail-on-non-empty "${D}${localstatedir}" - install -d -o bind "${D}${localstatedir}/cache/bind" - install -d "${D}${sysconfdir}/bind" - install -d "${D}${sysconfdir}/init.d" - install -m 644 ${S}/conf/* "${D}${sysconfdir}/bind/" - install -m 755 "${S}/init.d" "${D}${sysconfdir}/init.d/bind" - if ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'true', 'false', d)}; then - sed -i -e '1s,#!.*python3,#! /usr/bin/python3,' \ - ${D}${sbindir}/dnssec-coverage \ - ${D}${sbindir}/dnssec-checkds \ - ${D}${sbindir}/dnssec-keymgr - fi - - # Install systemd related files - install -d ${D}${sbindir} - install -m 755 ${WORKDIR}/generate-rndc-key.sh ${D}${sbindir} - install -d ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/named.service ${D}${systemd_unitdir}/system - sed -i -e 's,@BASE_BINDIR@,${base_bindir},g' \ - -e 's,@SBINDIR@,${sbindir},g' \ - ${D}${systemd_unitdir}/system/named.service - - install -d ${D}${sysconfdir}/default - install -m 0644 ${WORKDIR}/bind9 ${D}${sysconfdir}/default - - if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then - install -d ${D}${sysconfdir}/tmpfiles.d - echo "d /run/named 0755 bind bind - -" > ${D}${sysconfdir}/tmpfiles.d/bind.conf - fi - - oe_multilib_header isc/platform.h -} - -CONFFILES_${PN} = " \ - ${sysconfdir}/bind/named.conf \ - ${sysconfdir}/bind/named.conf.local \ - ${sysconfdir}/bind/named.conf.options \ - ${sysconfdir}/bind/db.0 \ - ${sysconfdir}/bind/db.127 \ - ${sysconfdir}/bind/db.empty \ - ${sysconfdir}/bind/db.local \ - ${sysconfdir}/bind/db.root \ - " - -ALTERNATIVE_${PN}-utils = "nslookup" -ALTERNATIVE_LINK_NAME[nslookup] = "${bindir}/nslookup" -ALTERNATIVE_PRIORITY = "100" - -PACKAGE_BEFORE_PN += "${PN}-utils" -FILES_${PN}-utils = "${bindir}/host ${bindir}/dig ${bindir}/mdig ${bindir}/nslookup ${bindir}/nsupdate" -FILES_${PN}-dev += "${bindir}/isc-config.h" -FILES_${PN} += "${sbindir}/generate-rndc-key.sh" - -PACKAGE_BEFORE_PN += "${PN}-libs" -FILES_${PN}-libs = "${libdir}/*.so*" -FILES_${PN}-staticdev += "${libdir}/*.la" - -PACKAGE_BEFORE_PN += "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-bind', '', d)}" -FILES_python3-bind = "${sbindir}/dnssec-coverage ${sbindir}/dnssec-checkds \ - ${sbindir}/dnssec-keymgr ${PYTHON_SITEPACKAGES_DIR}" - -RDEPENDS_${PN}-dev = "" -RDEPENDS_python3-bind = "python3-core python3-ply" diff --git a/poky/meta/recipes-connectivity/bind/bind_9.16.5.bb b/poky/meta/recipes-connectivity/bind/bind_9.16.5.bb new file mode 100644 index 000000000..103192490 --- /dev/null +++ b/poky/meta/recipes-connectivity/bind/bind_9.16.5.bb @@ -0,0 +1,123 @@ +SUMMARY = "ISC Internet Domain Name Server" +HOMEPAGE = "http://www.isc.org/sw/bind/" +SECTION = "console/network" + +LICENSE = "MPL-2.0" +LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=188b8d0644bd6835df43b84e3f180be1" + +DEPENDS = "openssl libcap zlib libuv" + +SRC_URI = "https://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.xz \ + file://conf.patch \ + file://named.service \ + file://bind9 \ + file://generate-rndc-key.sh \ + file://make-etc-initd-bind-stop-work.patch \ + file://init.d-add-support-for-read-only-rootfs.patch \ + file://bind-ensure-searching-for-json-headers-searches-sysr.patch \ + file://0001-named-lwresd-V-and-start-log-hide-build-options.patch \ + file://0001-avoid-start-failure-with-bind-user.patch \ + " + +SRC_URI[sha256sum] = "6378b3e51fef11a8be4794dc48e8111ba92d211c0dfd129a0c296ed06a3dc075" + +UPSTREAM_CHECK_URI = "https://ftp.isc.org/isc/bind9/" +# stay at 9.16 follow the ESV versions divisible by 4 +UPSTREAM_CHECK_REGEX = "(?P9.(16|20|24|28)(\.\d+)+(-P\d+)*)/" + +inherit autotools update-rc.d systemd useradd pkgconfig multilib_header update-alternatives + +# PACKAGECONFIGs readline and libedit should NOT be set at same time +PACKAGECONFIG ?= "readline" +PACKAGECONFIG[httpstats] = "--with-libxml2=${STAGING_DIR_HOST}${prefix},--without-libxml2,libxml2" +PACKAGECONFIG[readline] = "--with-readline=-lreadline,,readline" +PACKAGECONFIG[libedit] = "--with-readline=-ledit,,libedit" +PACKAGECONFIG[python3] = "--with-python=yes --with-python-install-dir=${PYTHON_SITEPACKAGES_DIR} , --without-python, python3-ply-native," + +EXTRA_OECONF = " --with-libtool --disable-devpoll --enable-epoll \ + --with-gssapi=no --with-lmdb=no --with-zlib \ + --sysconfdir=${sysconfdir}/bind \ + --with-openssl=${STAGING_DIR_HOST}${prefix} \ + " +LDFLAGS_append = " -lz" + +inherit ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native distutils3-base', '', d)} + +# dhcp needs .la so keep them +REMOVE_LIBTOOL_LA = "0" + +USERADD_PACKAGES = "${PN}" +USERADD_PARAM_${PN} = "--system --home ${localstatedir}/cache/bind --no-create-home \ + --user-group bind" + +INITSCRIPT_NAME = "bind" +INITSCRIPT_PARAMS = "defaults" + +SYSTEMD_SERVICE_${PN} = "named.service" + +do_install_append() { + + rmdir "${D}${localstatedir}/run" + rmdir --ignore-fail-on-non-empty "${D}${localstatedir}" + install -d -o bind "${D}${localstatedir}/cache/bind" + install -d "${D}${sysconfdir}/bind" + install -d "${D}${sysconfdir}/init.d" + install -m 644 ${S}/conf/* "${D}${sysconfdir}/bind/" + install -m 755 "${S}/init.d" "${D}${sysconfdir}/init.d/bind" + if ${@bb.utils.contains('PACKAGECONFIG', 'python3', 'true', 'false', d)}; then + sed -i -e '1s,#!.*python3,#! /usr/bin/python3,' \ + ${D}${sbindir}/dnssec-coverage \ + ${D}${sbindir}/dnssec-checkds \ + ${D}${sbindir}/dnssec-keymgr + fi + + # Install systemd related files + install -d ${D}${sbindir} + install -m 755 ${WORKDIR}/generate-rndc-key.sh ${D}${sbindir} + install -d ${D}${systemd_unitdir}/system + install -m 0644 ${WORKDIR}/named.service ${D}${systemd_unitdir}/system + sed -i -e 's,@BASE_BINDIR@,${base_bindir},g' \ + -e 's,@SBINDIR@,${sbindir},g' \ + ${D}${systemd_unitdir}/system/named.service + + install -d ${D}${sysconfdir}/default + install -m 0644 ${WORKDIR}/bind9 ${D}${sysconfdir}/default + + if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then + install -d ${D}${sysconfdir}/tmpfiles.d + echo "d /run/named 0755 bind bind - -" > ${D}${sysconfdir}/tmpfiles.d/bind.conf + fi + + oe_multilib_header isc/platform.h +} + +CONFFILES_${PN} = " \ + ${sysconfdir}/bind/named.conf \ + ${sysconfdir}/bind/named.conf.local \ + ${sysconfdir}/bind/named.conf.options \ + ${sysconfdir}/bind/db.0 \ + ${sysconfdir}/bind/db.127 \ + ${sysconfdir}/bind/db.empty \ + ${sysconfdir}/bind/db.local \ + ${sysconfdir}/bind/db.root \ + " + +ALTERNATIVE_${PN}-utils = "nslookup" +ALTERNATIVE_LINK_NAME[nslookup] = "${bindir}/nslookup" +ALTERNATIVE_PRIORITY = "100" + +PACKAGE_BEFORE_PN += "${PN}-utils" +FILES_${PN}-utils = "${bindir}/host ${bindir}/dig ${bindir}/mdig ${bindir}/nslookup ${bindir}/nsupdate" +FILES_${PN}-dev += "${bindir}/isc-config.h" +FILES_${PN} += "${sbindir}/generate-rndc-key.sh" + +PACKAGE_BEFORE_PN += "${PN}-libs" +FILES_${PN}-libs = "${libdir}/*.so* ${libdir}/named/*.so*" +FILES_${PN}-staticdev += "${libdir}/*.la" + +PACKAGE_BEFORE_PN += "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-bind', '', d)}" +FILES_python3-bind = "${sbindir}/dnssec-coverage ${sbindir}/dnssec-checkds \ + ${sbindir}/dnssec-keymgr ${PYTHON_SITEPACKAGES_DIR}" + +RDEPENDS_${PN}-dev = "" +RDEPENDS_python3-bind = "python3-core python3-ply" diff --git a/poky/meta/recipes-connectivity/bluez5/bluez5.inc b/poky/meta/recipes-connectivity/bluez5/bluez5.inc index eee7a53cd..4c1156c67 100644 --- a/poky/meta/recipes-connectivity/bluez5/bluez5.inc +++ b/poky/meta/recipes-connectivity/bluez5/bluez5.inc @@ -42,8 +42,8 @@ PACKAGECONFIG[sixaxis] = "--enable-sixaxis,--disable-sixaxis" PACKAGECONFIG[tools] = "--enable-tools,--disable-tools" PACKAGECONFIG[threads] = "--enable-threads,--disable-threads" PACKAGECONFIG[deprecated] = "--enable-deprecated,--disable-deprecated" -PACKAGECONFIG[mesh] = "--enable-mesh,--disable-mesh, json-c ell" -PACKAGECONFIG[btpclient] = "--enable-btpclient,--disable-btpclient, ell" +PACKAGECONFIG[mesh] = "--enable-mesh --enable-external-ell,--disable-mesh, json-c ell" +PACKAGECONFIG[btpclient] = "--enable-btpclient --enable-external-ell,--disable-btpclient, ell" PACKAGECONFIG[udev] = "--enable-udev,--disable-udev,udev" SRC_URI = "${KERNELORG_MIRROR}/linux/bluetooth/bluez-${PV}.tar.xz \ diff --git a/poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb b/poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb index 778bf5019..af986c4ea 100644 --- a/poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb +++ b/poky/meta/recipes-connectivity/connman/connman-gnome_0.7.bb @@ -13,7 +13,7 @@ SRCREV = "cf3c325b23dae843c5499a113591cfbc98acb143" SRC_URI = "git://github.com/connectivity/connman-gnome.git \ file://0001-Removed-icon-from-connman-gnome-about-applet.patch \ file://null_check_for_ipv4_config.patch \ - file://images/* \ + file://images/ \ file://connman-gnome-fix-dbus-interface-name.patch \ file://0001-Port-to-Gtk3.patch \ " diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp.inc b/poky/meta/recipes-connectivity/dhcp/dhcp.inc deleted file mode 100644 index d46130d49..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp.inc +++ /dev/null @@ -1,149 +0,0 @@ -SECTION = "console/network" -SUMMARY = "Internet Software Consortium DHCP package" -DESCRIPTION = "DHCP (Dynamic Host Configuration Protocol) is a protocol \ -which allows individual devices on an IP network to get their own \ -network configuration information from a server. DHCP helps make it \ -easier to administer devices." - -HOMEPAGE = "http://www.isc.org/" - -LICENSE = "ISC" -LIC_FILES_CHKSUM = "file://LICENSE;beginline=4;md5=004a4db50a1e20972e924a8618747c01" - -DEPENDS = "openssl bind" - -SRC_URI = "http://ftp.isc.org/isc/dhcp/${PV}/dhcp-${PV}.tar.gz \ - file://init-relay file://default-relay \ - file://init-server file://default-server \ - file://dhclient.conf file://dhcpd.conf \ - file://dhclient-systemd-wrapper \ - file://dhclient.service \ - file://dhcpd.service file://dhcrelay.service \ - file://dhcpd6.service \ - " -UPSTREAM_CHECK_URI = "http://ftp.isc.org/isc/dhcp/" -UPSTREAM_CHECK_REGEX = "(?P\d+\.\d+\.(\d+?))/" - -inherit autotools-brokensep systemd useradd update-rc.d - -USERADD_PACKAGES = "${PN}-server" -USERADD_PARAM_${PN}-server = "--system --no-create-home --home-dir /var/run/${BPN} --shell /bin/false --user-group ${BPN}" - -SYSTEMD_PACKAGES = "${PN}-server ${PN}-relay ${PN}-client" -SYSTEMD_SERVICE_${PN}-server = "dhcpd.service dhcpd6.service" -SYSTEMD_AUTO_ENABLE_${PN}-server = "disable" - -SYSTEMD_SERVICE_${PN}-relay = "dhcrelay.service" -SYSTEMD_AUTO_ENABLE_${PN}-relay = "disable" - -SYSTEMD_SERVICE_${PN}-client = "dhclient.service" -SYSTEMD_AUTO_ENABLE_${PN}-client = "disable" - -INITSCRIPT_PACKAGES = "dhcp-server" -INITSCRIPT_NAME_dhcp-server = "dhcp-server" -INITSCRIPT_PARAMS_dhcp-server = "defaults" - -CFLAGS += "-D_GNU_SOURCE" -EXTRA_OECONF = "--with-srv-lease-file=${localstatedir}/lib/dhcp/dhcpd.leases \ - --with-srv6-lease-file=${localstatedir}/lib/dhcp/dhcpd6.leases \ - --with-cli-lease-file=${localstatedir}/lib/dhcp/dhclient.leases \ - --with-cli6-lease-file=${localstatedir}/lib/dhcp/dhclient6.leases \ - --enable-paranoia --disable-static \ - --with-randomdev=/dev/random \ - --with-libbind=${STAGING_DIR_HOST} \ - --enable-libtool \ - " - -#Enable shared libs per dhcp README -do_configure_prepend () { - cp configure.ac+lt configure.ac -} - -do_install_append () { - install -d ${D}${sysconfdir}/init.d - install -d ${D}${sysconfdir}/default - install -d ${D}${sysconfdir}/dhcp - install -m 0755 ${WORKDIR}/init-relay ${D}${sysconfdir}/init.d/dhcp-relay - install -m 0644 ${WORKDIR}/default-relay ${D}${sysconfdir}/default/dhcp-relay - install -m 0755 ${WORKDIR}/init-server ${D}${sysconfdir}/init.d/dhcp-server - install -m 0644 ${WORKDIR}/default-server ${D}${sysconfdir}/default/dhcp-server - - rm -f ${D}${sysconfdir}/dhclient.conf* - rm -f ${D}${sysconfdir}/dhcpd.conf* - install -m 0644 ${WORKDIR}/dhclient.conf ${D}${sysconfdir}/dhcp/dhclient.conf - install -m 0644 ${WORKDIR}/dhcpd.conf ${D}${sysconfdir}/dhcp/dhcpd.conf - - install -d ${D}${base_sbindir}/ - if [ "${sbindir}" != "${base_sbindir}" ]; then - mv ${D}${sbindir}/dhclient ${D}${base_sbindir}/ - fi - install -m 0755 ${S}/client/scripts/linux ${D}${base_sbindir}/dhclient-script - - # Install systemd unit files - install -d ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/dhcpd.service ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/dhcpd6.service ${D}${systemd_unitdir}/system - install -m 0644 ${WORKDIR}/dhcrelay.service ${D}${systemd_unitdir}/system - sed -i -e 's,@SBINDIR@,${sbindir},g' ${D}${systemd_unitdir}/system/dhcpd*.service ${D}${systemd_unitdir}/system/dhcrelay.service - sed -i -e 's,@SYSCONFDIR@,${sysconfdir},g' ${D}${systemd_unitdir}/system/dhcpd*.service - sed -i -e 's,@base_bindir@,${base_bindir},g' ${D}${systemd_unitdir}/system/dhcpd*.service - sed -i -e 's,@localstatedir@,${localstatedir},g' ${D}${systemd_unitdir}/system/dhcpd*.service - sed -i -e 's,@SYSCONFDIR@,${sysconfdir},g' ${D}${systemd_unitdir}/system/dhcrelay.service - - install -d ${D}${base_sbindir} - install -m 0755 ${WORKDIR}/dhclient-systemd-wrapper ${D}${base_sbindir}/dhclient-systemd-wrapper - install -m 0644 ${WORKDIR}/dhclient.service ${D}${systemd_unitdir}/system - sed -i -e 's,@SYSCONFDIR@,${sysconfdir},g' ${D}${systemd_unitdir}/system/dhclient.service - sed -i -e 's,@BASE_SBINDIR@,${base_sbindir},g' ${D}${systemd_unitdir}/system/dhclient.service -} - -PACKAGES += "dhcp-libs dhcp-server dhcp-server-config dhcp-client dhcp-relay dhcp-omshell" - -PACKAGES_remove = "${PN}" -RDEPENDS_${PN}-client += "${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'iproute2', '', d)}" -RDEPENDS_${PN}-dev = "" -RDEPENDS_${PN}-staticdev = "" -FILES_${PN}-libs = "${libdir}/libdhcpctl.so.0* ${libdir}/libomapi.so.0* ${libdir}/libdhcp.so.0*" - -FILES_${PN}-server = "${sbindir}/dhcpd ${sysconfdir}/init.d/dhcp-server" -RRECOMMENDS_${PN}-server = "dhcp-server-config" - -FILES_${PN}-server-config = "${sysconfdir}/default/dhcp-server ${sysconfdir}/dhcp/dhcpd.conf" - -FILES_${PN}-relay = "${sbindir}/dhcrelay ${sysconfdir}/init.d/dhcp-relay ${sysconfdir}/default/dhcp-relay" - -FILES_${PN}-client = "${base_sbindir}/dhclient \ - ${base_sbindir}/dhclient-script \ - ${sysconfdir}/dhcp/dhclient.conf \ - ${base_sbindir}/dhclient-systemd-wrapper \ - " - -FILES_${PN}-omshell = "${bindir}/omshell" - -pkg_postinst_dhcp-server() { - mkdir -p $D/${localstatedir}/lib/dhcp - touch $D/${localstatedir}/lib/dhcp/dhcpd.leases - touch $D/${localstatedir}/lib/dhcp/dhcpd6.leases -} - -pkg_postinst_dhcp-client() { - mkdir -p $D/${localstatedir}/lib/dhcp -} - -pkg_postrm_dhcp-server() { - rm -f $D/${localstatedir}/lib/dhcp/dhcpd.leases - rm -f $D/${localstatedir}/lib/dhcp/dhcpd6.leases - - if ! rmdir $D/${localstatedir}/lib/dhcp 2>/dev/null; then - echo "Not removing ${localstatedir}/lib/dhcp as it is non-empty." - fi -} - -pkg_postrm_dhcp-client() { - rm -f $D/${localstatedir}/lib/dhcp/dhclient.leases - rm -f $D/${localstatedir}/lib/dhcp/dhclient6.leases - - if ! rmdir $D/${localstatedir}/lib/dhcp 2>/dev/null; then - echo "Not removing ${localstatedir}/lib/dhcp as it is non-empty." - fi -} diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch deleted file mode 100644 index d1b57f0bb..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 7cc29144535a622fc671dc86eb1da65b0473a7c4 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Tue, 15 Aug 2017 16:14:22 +0800 -Subject: [PATCH 01/11] define macro _PATH_DHCPD_CONF and _PATH_DHCLIENT_CONF - -Upstream-Status: Inappropriate [OE specific] - -Rebase to 4.3.6 -Signed-off-by: Hongxu Jia ---- - includes/site.h | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -Index: dhcp-4.4.1/includes/site.h -=================================================================== ---- dhcp-4.4.1.orig/includes/site.h -+++ dhcp-4.4.1/includes/site.h -@@ -148,7 +148,8 @@ - /* Define this if you want the dhcpd.conf file to go somewhere other than - the default location. By default, it goes in /etc/dhcpd.conf. */ - --/* #define _PATH_DHCPD_CONF "/etc/dhcpd.conf" */ -+#define _PATH_DHCPD_CONF "/etc/dhcp/dhcpd.conf" -+#define _PATH_DHCLIENT_CONF "/etc/dhcp/dhclient.conf" - - /* Network API definitions. You do not need to choose one of these - if - you don't choose, one will be chosen for you in your system's config diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0001-workaround-busybox-limitation-in-linux-dhclient-script.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0001-workaround-busybox-limitation-in-linux-dhclient-script.patch deleted file mode 100644 index 2359381b9..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0001-workaround-busybox-limitation-in-linux-dhclient-script.patch +++ /dev/null @@ -1,65 +0,0 @@ -From eec0503cfc36f63d777f5cb3f2719cecedcb8468 Mon Sep 17 00:00:00 2001 -From: Haris Okanovic -Date: Mon, 7 Jan 2019 13:22:09 -0600 -Subject: [PATCH] Workaround busybox limitation in Linux dhclient-script - -Busybox is a lightweight implementation of coreutils commonly used on -space-constrained embedded Linux distributions. It's implementation of -chown and chmod doesn't provide a "--reference" option added to -client/scripts/linux as of commit 9261cb14. This change works around -that limitation by using stat to read ownership and permissions flags -and simple chown/chmod calls supported in both coreutils and busybox. - - modified: client/scripts/linux - -Signed-off-by: Haris Okanovic -Upstream-Status: Pending [ISC-Bugs #48771] ---- - client/scripts/linux | 17 +++++++++++++---- - 1 file changed, 13 insertions(+), 4 deletions(-) - -diff --git a/client/scripts/linux b/client/scripts/linux -index 0c429697..2435a44b 100755 ---- a/client/scripts/linux -+++ b/client/scripts/linux -@@ -32,6 +32,17 @@ - # if your system holds ip tool in a non-standard location. - ip=/sbin/ip - -+chown_chmod_by_reference() { -+ local reference_file="$1" -+ local target_file="$2" -+ -+ local owner=$(stat -c "%u:%g" "$reference_file") -+ local perm=$(stat -c "%a" "$reference_file") -+ -+ chown "$owner" "$target_file" -+ chmod "$perm" "$target_file" -+} -+ - # update /etc/resolv.conf based on received values - # This updated version mostly follows Debian script by Andrew Pollock et al. - make_resolv_conf() { -@@ -74,8 +85,7 @@ make_resolv_conf() { - fi - - if [ -f /etc/resolv.conf ]; then -- chown --reference=/etc/resolv.conf $new_resolv_conf -- chmod --reference=/etc/resolv.conf $new_resolv_conf -+ chown_chmod_by_reference /etc/resolv.conf $new_resolv_conf - fi - mv -f $new_resolv_conf /etc/resolv.conf - # DHCPv6 -@@ -101,8 +111,7 @@ make_resolv_conf() { - fi - - if [ -f /etc/resolv.conf ]; then -- chown --reference=/etc/resolv.conf $new_resolv_conf -- chmod --reference=/etc/resolv.conf $new_resolv_conf -+ chown_chmod_by_reference /etc/resolv.conf $new_resolv_conf - fi - mv -f $new_resolv_conf /etc/resolv.conf - fi --- -2.20.0 - diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0002-dhclient-dbus.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0002-dhclient-dbus.patch deleted file mode 100644 index 101c33f67..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0002-dhclient-dbus.patch +++ /dev/null @@ -1,117 +0,0 @@ -From be7540d31c356e80ee02e90e8bf162b7ac6e5ba5 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Tue, 15 Aug 2017 14:56:56 +0800 -Subject: [PATCH 02/11] dhclient dbus - -Upstream-Status: Inappropriate [distribution] - -Rebase to 4.3.6 -Signed-off-by: Hongxu Jia ---- - client/scripts/bsdos | 5 +++++ - client/scripts/freebsd | 5 +++++ - client/scripts/linux | 5 +++++ - client/scripts/netbsd | 5 +++++ - client/scripts/openbsd | 5 +++++ - client/scripts/solaris | 5 +++++ - 6 files changed, 30 insertions(+) - -diff --git a/client/scripts/bsdos b/client/scripts/bsdos -index d69d0d8..095b143 100755 ---- a/client/scripts/bsdos -+++ b/client/scripts/bsdos -@@ -45,6 +45,11 @@ exit_with_hooks() { - . /etc/dhclient-exit-hooks - fi - # probably should do something with exit status of the local script -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - -diff --git a/client/scripts/freebsd b/client/scripts/freebsd -index 8f3e2a2..ad7fb44 100755 ---- a/client/scripts/freebsd -+++ b/client/scripts/freebsd -@@ -89,6 +89,11 @@ exit_with_hooks() { - . /etc/dhclient-exit-hooks - fi - # probably should do something with exit status of the local script -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - -diff --git a/client/scripts/linux b/client/scripts/linux -index 5fb1612..3d447b6 100755 ---- a/client/scripts/linux -+++ b/client/scripts/linux -@@ -174,6 +174,11 @@ exit_with_hooks() { - exit_status=$? - fi - -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - -diff --git a/client/scripts/netbsd b/client/scripts/netbsd -index 07383b7..aaba8e8 100755 ---- a/client/scripts/netbsd -+++ b/client/scripts/netbsd -@@ -45,6 +45,11 @@ exit_with_hooks() { - . /etc/dhclient-exit-hooks - fi - # probably should do something with exit status of the local script -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - -diff --git a/client/scripts/openbsd b/client/scripts/openbsd -index e7f4746..56b980c 100644 ---- a/client/scripts/openbsd -+++ b/client/scripts/openbsd -@@ -45,6 +45,11 @@ exit_with_hooks() { - . /etc/dhclient-exit-hooks - fi - # probably should do something with exit status of the local script -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - -diff --git a/client/scripts/solaris b/client/scripts/solaris -index af553b9..4a2aa69 100755 ---- a/client/scripts/solaris -+++ b/client/scripts/solaris -@@ -26,6 +26,11 @@ exit_with_hooks() { - . /etc/dhclient-exit-hooks - fi - # probably should do something with exit status of the local script -+ if [ x$dhc_dbus != x -a $exit_status -eq 0 ]; then -+ dbus-send --system --dest=com.redhat.dhcp \ -+ --type=method_call /com/redhat/dhcp/$interface com.redhat.dhcp.set \ -+ 'string:'"`env | grep -Ev '^(PATH|SHLVL|_|PWD|dhc_dbus)\='`" -+ fi - exit $exit_status - } - --- -1.8.3.1 - diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0003-link-with-lcrypto.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0003-link-with-lcrypto.patch deleted file mode 100644 index 5b35933a5..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0003-link-with-lcrypto.patch +++ /dev/null @@ -1,35 +0,0 @@ -From d80bd792323dbd56269309f85b4506eb6b1b60e9 Mon Sep 17 00:00:00 2001 -From: Andrei Gherzan -Date: Tue, 15 Aug 2017 15:05:47 +0800 -Subject: [PATCH 03/11] link with lcrypto - -From 4.2.0 final release, -lcrypto check was removed and we compile -static libraries -from bind that are linked to libcrypto. This is why i added a patch in -order to add --lcrypto to LIBS. - -Upstream-Status: Pending -Signed-off-by: Andrei Gherzan - -Rebase to 4.3.6 -Signed-off-by: Hongxu Jia ---- - configure.ac | 4 ++++ - 1 file changed, 4 insertions(+) - -Index: dhcp-4.4.1/configure.ac -=================================================================== ---- dhcp-4.4.1.orig/configure.ac -+++ dhcp-4.4.1/configure.ac -@@ -612,6 +612,10 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[]], - # Look for optional headers. - AC_CHECK_HEADERS(sys/socket.h net/if_dl.h net/if6.h regex.h) - -+# find an MD5 library -+AC_SEARCH_LIBS(MD5_Init, [crypto]) -+AC_SEARCH_LIBS(MD5Init, [crypto]) -+ - # Solaris needs some libraries for functions - AC_SEARCH_LIBS(socket, [socket]) - AC_SEARCH_LIBS(inet_ntoa, [nsl]) diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0004-Fix-out-of-tree-builds.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0004-Fix-out-of-tree-builds.patch deleted file mode 100644 index 7b57730ff..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0004-Fix-out-of-tree-builds.patch +++ /dev/null @@ -1,95 +0,0 @@ -From cccec0344d68dac4100b6f260ee24e7c2da9dfda Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Tue, 15 Aug 2017 15:08:22 +0800 -Subject: [PATCH 04/11] Fix out of tree builds - -Upstream-Status: Pending - -RP 2013/03/21 - -Rebase to 4.3.6 - -Signed-off-by: Hongxu Jia ---- - client/Makefile.am | 4 ++-- - common/Makefile.am | 3 ++- - dhcpctl/Makefile.am | 2 ++ - omapip/Makefile.am | 1 + - relay/Makefile.am | 2 +- - server/Makefile.am | 2 +- - 6 files changed, 9 insertions(+), 5 deletions(-) - -Index: dhcp-4.4.1/common/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/common/Makefile.am -+++ dhcp-4.4.1/common/Makefile.am -@@ -1,4 +1,5 @@ --AM_CPPFLAGS = -I$(top_srcdir) -DLOCALSTATEDIR='"@localstatedir@"' -+AM_CPPFLAGS = -I$(top_srcdir)/includes -I$(top_srcdir) -DLOCALSTATEDIR='"@localstatedir@"' -+ - AM_CFLAGS = $(LDAP_CFLAGS) - - lib_LIBRARIES = libdhcp.a -Index: dhcp-4.4.1/dhcpctl/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/dhcpctl/Makefile.am -+++ dhcp-4.4.1/dhcpctl/Makefile.am -@@ -3,6 +3,8 @@ BINDLIBDNSDIR=@BINDLIBDNSDIR@ - BINDLIBISCCFGDIR=@BINDLIBISCCFGDIR@ - BINDLIBISCDIR=@BINDLIBISCDIR@ - -+AM_CPPFLAGS = -I$(top_srcdir)/includes -I$(top_srcdir) -+ - bin_PROGRAMS = omshell - lib_LIBRARIES = libdhcpctl.a - noinst_PROGRAMS = cltest -Index: dhcp-4.4.1/server/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/server/Makefile.am -+++ dhcp-4.4.1/server/Makefile.am -@@ -4,7 +4,7 @@ - # production code. Sadly, we are not there yet. - SUBDIRS = . tests - --AM_CPPFLAGS = -I.. -DLOCALSTATEDIR='"@localstatedir@"' -+AM_CPPFLAGS = -I$(top_srcdir) -DLOCALSTATEDIR='"@localstatedir@"' -I$(top_srcdir)/includes - - dist_sysconf_DATA = dhcpd.conf.example - sbin_PROGRAMS = dhcpd -Index: dhcp-4.4.1/client/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/client/Makefile.am -+++ dhcp-4.4.1/client/Makefile.am -@@ -5,7 +5,7 @@ - SUBDIRS = . tests - - AM_CPPFLAGS = -DCLIENT_PATH='"PATH=$(sbindir):/sbin:/bin:/usr/sbin:/usr/bin"' --AM_CPPFLAGS += -DLOCALSTATEDIR='"$(localstatedir)"' -+AM_CPPFLAGS += -DLOCALSTATEDIR='"$(localstatedir)"' -I$(top_srcdir)/includes - - dist_sysconf_DATA = dhclient.conf.example - sbin_PROGRAMS = dhclient -Index: dhcp-4.4.1/omapip/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/omapip/Makefile.am -+++ dhcp-4.4.1/omapip/Makefile.am -@@ -2,6 +2,7 @@ BINDLIBIRSDIR=@BINDLIBIRSDIR@ - BINDLIBDNSDIR=@BINDLIBDNSDIR@ - BINDLIBISCCFGDIR=@BINDLIBISCCFGDIR@ - BINDLIBISCDIR=@BINDLIBISCDIR@ -+AM_CPPFLAGS = -I$(top_srcdir)/includes - - lib_LIBRARIES = libomapi.a - noinst_PROGRAMS = svtest -Index: dhcp-4.4.1/relay/Makefile.am -=================================================================== ---- dhcp-4.4.1.orig/relay/Makefile.am -+++ dhcp-4.4.1/relay/Makefile.am -@@ -1,6 +1,6 @@ - SUBDIRS = . tests - --AM_CPPFLAGS = -DLOCALSTATEDIR='"@localstatedir@"' -+AM_CPPFLAGS = -DLOCALSTATEDIR='"@localstatedir@"' -I$(top_srcdir)/includes - - sbin_PROGRAMS = dhcrelay - dhcrelay_SOURCES = dhcrelay.c diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0005-dhcp-client-fix-invoke-dhclient-script-failed-on-Rea.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0005-dhcp-client-fix-invoke-dhclient-script-failed-on-Rea.patch deleted file mode 100644 index dd56381b1..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0005-dhcp-client-fix-invoke-dhclient-script-failed-on-Rea.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 2e8ff0e4f6d39e346ea86b8c514ab4ccc78fa359 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Tue, 15 Aug 2017 15:24:14 +0800 -Subject: [PATCH 05/11] dhcp-client: fix invoke dhclient-script failed on - Read-only file system - -In read-only file system, '/etc' is on the readonly partition, -and '/etc/resolv.conf' is symlinked to a separate writable -partition. - -In this situation, we create temp files 'resolv.conf.dhclient-new' -in /tmp dir. - -Upstream-Status: Pending - -Signed-off-by: Hongxu Jia ---- - client/scripts/linux | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/client/scripts/linux b/client/scripts/linux -index 3d447b6..3122a75 100755 ---- a/client/scripts/linux -+++ b/client/scripts/linux -@@ -40,7 +40,7 @@ make_resolv_conf() { - # DHCPv4 - if [ -n "$new_domain_search" ] || [ -n "$new_domain_name" ] || - [ -n "$new_domain_name_servers" ]; then -- new_resolv_conf=/etc/resolv.conf.dhclient-new -+ new_resolv_conf=/tmp/resolv.conf.dhclient-new - rm -f $new_resolv_conf - - if [ -n "$new_domain_name" ]; then --- -1.8.3.1 - diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0007-Add-configure-argument-to-make-the-libxml2-dependenc.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0007-Add-configure-argument-to-make-the-libxml2-dependenc.patch deleted file mode 100644 index feb0754ff..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0007-Add-configure-argument-to-make-the-libxml2-dependenc.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 7107511fd209f08f9a96f8938041ae48f3295895 Mon Sep 17 00:00:00 2001 -From: Christopher Larson -Date: Tue, 15 Aug 2017 16:17:49 +0800 -Subject: [PATCH 07/11] Add configure argument to make the libxml2 dependency - explicit and determinisitic. - -Upstream-Status: Pending - -Signed-off-by: Christopher Larson - -Rebase to 4.3.6 - -Signed-off-by: Hongxu Jia ---- - configure.ac | 11 +++++++++++ - 1 file changed, 11 insertions(+) - -Index: dhcp-4.4.1/configure.ac -=================================================================== ---- dhcp-4.4.1.orig/configure.ac -+++ dhcp-4.4.1/configure.ac -@@ -642,6 +642,17 @@ if test "$have_nanosleep" = "rt"; then - LIBS="-lrt $LIBS" - fi - -+AC_ARG_WITH(libxml2, -+ AS_HELP_STRING([--with-libxml2], [link against libxml2. this is needed if bind was built with xml2 support enabled]), -+ with_libxml2="$withval", with_libxml2="no") -+ -+if test x$with_libxml2 != xno; then -+ AC_SEARCH_LIBS(xmlTextWriterStartElement, [xml2], -+ [if test x$with_libxml2 != xauto; then -+ AC_MSG_FAILURE([*** Cannot find xmlTextWriterStartElement with -lxml2 and libxml2 was requested]) -+ fi]) -+fi -+ - # check for /dev/random (declares HAVE_DEV_RANDOM) - AC_MSG_CHECKING(for random device) - AC_ARG_WITH(randomdev, -Index: dhcp-4.4.1/configure.ac+lt -=================================================================== ---- dhcp-4.4.1.orig/configure.ac+lt -+++ dhcp-4.4.1/configure.ac+lt -@@ -909,6 +909,18 @@ elif test "$want_libtool" = "yes" -a "$u - fi - AM_CONDITIONAL(INSTALL_BIND, test "$want_install_bind" = "yes") - -+AC_ARG_WITH(libxml2, -+ AS_HELP_STRING([--with-libxml2], [link against libxml2. this is needed if bind was built with xml2 support enabled]), -+ with_libxml2="$withval", with_libxml2="no") -+ -+if test x$with_libxml2 != xno; then -+ AC_SEARCH_LIBS(xmlTextWriterStartElement, [xml2],, -+ [if test x$with_libxml2 != xauto; then -+ AC_MSG_FAILURE([*** Cannot find xmlTextWriterStartElement with -lxml2 and libxml2 was requested]) -+ fi]) -+fi -+ -+ - # OpenLDAP support. - AC_ARG_WITH(ldap, - AS_HELP_STRING([--with-ldap],[enable OpenLDAP support in dhcpd (default is no)]), diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0009-remove-dhclient-script-bash-dependency.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0009-remove-dhclient-script-bash-dependency.patch deleted file mode 100644 index 912b6d631..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0009-remove-dhclient-script-bash-dependency.patch +++ /dev/null @@ -1,28 +0,0 @@ -From f3f8b7726e50e24ef3edf5fa5a17e31d39118d7e Mon Sep 17 00:00:00 2001 -From: Andre McCurdy -Date: Tue, 15 Aug 2017 15:49:31 +0800 -Subject: [PATCH 09/11] remove dhclient-script bash dependency - -Upstream-Status: Inappropriate [OE specific] - -Signed-off-by: Andre McCurdy - -Rebase to 4.3.6 -Signed-off-by: Hongxu Jia ---- - client/scripts/linux | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/client/scripts/linux b/client/scripts/linux -index 3122a75..1712d7d 100755 ---- a/client/scripts/linux -+++ b/client/scripts/linux -@@ -1,4 +1,4 @@ --#!/bin/bash -+#!/bin/sh - # dhclient-script for Linux. Dan Halbert, March, 1997. - # Updated for Linux 2.[12] by Brian J. Murrell, January 1999. - # No guarantees about this. I'm a novice at the details of Linux --- -1.8.3.1 - diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0012-dhcp-correct-the-intention-for-xml2-lib-search.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0012-dhcp-correct-the-intention-for-xml2-lib-search.patch deleted file mode 100644 index 39ba65fbc..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0012-dhcp-correct-the-intention-for-xml2-lib-search.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 501543b3ef715488a142e3d301ff2733aa33eec7 Mon Sep 17 00:00:00 2001 -From: Awais Belal -Date: Wed, 25 Oct 2017 21:00:05 +0500 -Subject: [PATCH] dhcp: correct the intention for xml2 lib search - -A missing case breaks the build when libxml2 is -required and found appropriately. The third argument -to the function AC_SEARCH_LIB is action-if-found which -was mistakenly been used for the case where the library -is not found and hence breaks the configure phase -where it shoud actually pass. -We now pass on silently when action-if-found is -executed. - -Upstream-Status: Pending - -Signed-off-by: Awais Belal ---- - configure.ac | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -Index: dhcp-4.4.1/configure.ac -=================================================================== ---- dhcp-4.4.1.orig/configure.ac -+++ dhcp-4.4.1/configure.ac -@@ -647,7 +647,7 @@ AC_ARG_WITH(libxml2, - with_libxml2="$withval", with_libxml2="no") - - if test x$with_libxml2 != xno; then -- AC_SEARCH_LIBS(xmlTextWriterStartElement, [xml2], -+ AC_SEARCH_LIBS(xmlTextWriterStartElement, [xml2],, - [if test x$with_libxml2 != xauto; then - AC_MSG_FAILURE([*** Cannot find xmlTextWriterStartElement with -lxml2 and libxml2 was requested]) - fi]) diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp/0013-fixup_use_libbind.patch b/poky/meta/recipes-connectivity/dhcp/dhcp/0013-fixup_use_libbind.patch deleted file mode 100644 index fcec010bd..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp/0013-fixup_use_libbind.patch +++ /dev/null @@ -1,64 +0,0 @@ -lib and include path is hardcoded for use_libbind - -use libdir and includedir vars - -Upstream-Status: Pending -Signed-off-by: Armin Kuster - -Index: dhcp-4.4.1/configure.ac+lt -=================================================================== ---- dhcp-4.4.1.orig/configure.ac+lt -+++ dhcp-4.4.1/configure.ac+lt -@@ -801,22 +801,22 @@ no) - if test ! -d "$use_libbind"; then - AC_MSG_ERROR([Cannot find bind directory at $use_libbind]) - fi -- if test ! -d "$use_libbind/include" -o \ -- ! -f "$use_libbind/include/isc/buffer.h" -+ if test ! -d "$use_libbind/$includedir" -o \ -+ ! -f "$use_libbind/$includedir/isc/buffer.h" - then -- AC_MSG_ERROR([Cannot find bind includes at $use_libbind/include]) -+ AC_MSG_ERROR([Cannot find bind includes at $use_libbind/$includedir]) - fi -- if test ! -d "$use_libbind/lib" -o \ -- \( ! -f "$use_libbind/lib/libisc.a" -a \ -- ! -f "$use_libbind/lib/libisc.la" \) -+ if test ! -d "$use_libbind/$libdir" -o \ -+ \( ! -f "$use_libbind/$libdir/libisc.a" -a \ -+ ! -f "$use_libbind/$libdir/libisc.la" \) - then -- AC_MSG_ERROR([Cannot find bind libraries at $use_libbind/lib]) -+ AC_MSG_ERROR([Cannot find bind libraries at $use_libbind/$libdir]) - fi - BINDDIR="$use_libbind" -- BINDLIBIRSDIR="$BINDDIR/lib" -- BINDLIBDNSDIR="$BINDDIR/lib" -- BINDLIBISCCFGDIR="$BINDDIR/lib" -- BINDLIBISCDIR="$BINDDIR/lib" -+ BINDLIBIRSDIR="$BINDDIR/$libdir" -+ BINDLIBDNSDIR="$BINDDIR/$libdir" -+ BINDLIBISCCFGDIR="$BINDDIR/$libdir" -+ BINDLIBISCDIR="$BINDDIR/$libdir" - DISTCHECK_LIBBIND_CONFIGURE_FLAG="--with-libbind=$use_libbind" - ;; - esac -@@ -856,14 +856,14 @@ AC_ARG_ENABLE(libtool, - - if test "$use_libbind" != "no"; then - if test "$want_libtool" = "yes" -a \ -- ! -f "$use_libbind/lib/libisc.la" -+ ! -f "$use_libbind/$libdir/libisc.la" - then -- AC_MSG_ERROR([Cannot find dynamic libraries at $use_libbind/lib]) -+ AC_MSG_ERROR([Cannot find dynamic libraries at $use_libbind/$libdir]) - fi - if test "$want_libtool" = "no" -a \ -- ! -f "$use_libbind/lib/libisc.a" -+ ! -f "$use_libbind/$libdir/libisc.a" - then -- AC_MSG_ERROR([Cannot find static libraries at $use_libbind/lib]) -+ AC_MSG_ERROR([Cannot find static libraries at $use_libbind/$libdir]) - fi - fi - diff --git a/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb b/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb deleted file mode 100644 index cf4af8260..000000000 --- a/poky/meta/recipes-connectivity/dhcp/dhcp_4.4.2.bb +++ /dev/null @@ -1,23 +0,0 @@ -require dhcp.inc - -SRC_URI += "file://0001-define-macro-_PATH_DHCPD_CONF-and-_PATH_DHCLIENT_CON.patch \ - file://0002-dhclient-dbus.patch \ - file://0003-link-with-lcrypto.patch \ - file://0004-Fix-out-of-tree-builds.patch \ - file://0005-dhcp-client-fix-invoke-dhclient-script-failed-on-Rea.patch \ - file://0007-Add-configure-argument-to-make-the-libxml2-dependenc.patch \ - file://0009-remove-dhclient-script-bash-dependency.patch \ - file://0012-dhcp-correct-the-intention-for-xml2-lib-search.patch \ - file://0013-fixup_use_libbind.patch \ - file://0001-workaround-busybox-limitation-in-linux-dhclient-script.patch \ -" - -SRC_URI[md5sum] = "2afdaf8498dc1edaf3012efdd589b3e1" -SRC_URI[sha256sum] = "1a7ccd64a16e5e68f7b5e0f527fd07240a2892ea53fe245620f4f5f607004521" - -LDFLAGS_append = " -pthread" - -PACKAGECONFIG ?= "" -PACKAGECONFIG[bind-httpstats] = "--with-libxml2,--without-libxml2,libxml2" - -CFLAGS += "-fcommon" diff --git a/poky/meta/recipes-connectivity/dhcp/files/default-relay b/poky/meta/recipes-connectivity/dhcp/files/default-relay deleted file mode 100644 index 7961f014b..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/default-relay +++ /dev/null @@ -1,12 +0,0 @@ -# Defaults for dhcp-relay initscript -# sourced by /etc/init.d/dhcp-relay - -# What servers should the DHCP relay forward requests to? -# e.g: SERVERS="192.168.0.1" -SERVERS="" - -# On what interfaces should the DHCP relay (dhrelay) serve DHCP requests? -INTERFACES="" - -# Additional options that are passed to the DHCP relay daemon? -OPTIONS="" diff --git a/poky/meta/recipes-connectivity/dhcp/files/default-server b/poky/meta/recipes-connectivity/dhcp/files/default-server deleted file mode 100644 index 0385d1699..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/default-server +++ /dev/null @@ -1,7 +0,0 @@ -# Defaults for dhcp initscript -# sourced by /etc/init.d/dhcp-server -# installed at /etc/default/dhcp-server by the maintainer scripts - -# On what interfaces should the DHCP server (dhcpd) serve DHCP requests? -# Separate multiple interfaces with spaces, e.g. "eth0 eth1". -INTERFACES="" diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhclient-systemd-wrapper b/poky/meta/recipes-connectivity/dhcp/files/dhclient-systemd-wrapper deleted file mode 100644 index 7d0e224a1..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhclient-systemd-wrapper +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/sh - -# In case the interface is used for nfs, skip it. -nfsroot=0 -interfaces="" -exec 9<&0 < /proc/mounts -while read dev mtpt fstype rest; do - if test $mtpt = "/" ; then - case $fstype in - nfs | nfs4) - nfsroot=1 - nfs_addr=`echo $rest | sed -e 's/^.*addr=\([0-9.]*\).*$/\1/'` - break - ;; - *) - ;; - esac - fi -done -exec 0<&9 9<&- - -if [ $nfsroot -eq 0 ]; then - interfaces="$INTERFACES" -else - if [ -x /bin/ip -o -x /sbin/ip ] ; then - nfs_iface=`ip route get $nfs_addr | grep dev | sed -e 's/^.*dev \([-a-z0-9.]*\).*$/\1/'` - fi - for i in $INTERFACES; do - if test "x$i" = "x$nfs_iface"; then - echo "dhclient skipping nfsroot interface $i" - else - interfaces="$interfaces $i" - fi - done -fi - -if test "x$interfaces" != "x"; then - /sbin/dhclient -d -cf /etc/dhcp/dhclient.conf -q -lf /var/lib/dhcp/dhclient.leases $interfaces -fi diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhclient.conf b/poky/meta/recipes-connectivity/dhcp/files/dhclient.conf deleted file mode 100644 index 0e6dcf96c..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhclient.conf +++ /dev/null @@ -1,50 +0,0 @@ -# Configuration file for /sbin/dhclient, which is included in Debian's -# dhcp3-client package. -# -# This is a sample configuration file for dhclient. See dhclient.conf's -# man page for more information about the syntax of this file -# and a more comprehensive list of the parameters understood by -# dhclient. -# -# Normally, if the DHCP server provides reasonable information and does -# not leave anything out (like the domain name, for example), then -# few changes must be made to this file, if any. -# - -#send host-name "andare.fugue.com"; -#send dhcp-client-identifier 1:0:a0:24:ab:fb:9c; -#send dhcp-lease-time 3600; -#supersede domain-name "fugue.com home.vix.com"; -#prepend domain-name-servers 127.0.0.1; -request subnet-mask, broadcast-address, time-offset, routers, - domain-name, domain-name-servers, host-name, - netbios-name-servers, netbios-scope; -#require subnet-mask, domain-name-servers; -#timeout 60; -#retry 60; -#reboot 10; -#select-timeout 5; -#initial-interval 2; -#script "/etc/dhcp3/dhclient-script"; -#media "-link0 -link1 -link2", "link0 link1"; -#reject 192.33.137.209; - -#alias { -# interface "eth0"; -# fixed-address 192.5.5.213; -# option subnet-mask 255.255.255.255; -#} - -#lease { -# interface "eth0"; -# fixed-address 192.33.137.200; -# medium "link0 link1"; -# option host-name "andare.swiftmedia.com"; -# option subnet-mask 255.255.255.0; -# option broadcast-address 192.33.137.255; -# option routers 192.33.137.250; -# option domain-name-servers 127.0.0.1; -# renew 2 2000/1/12 00:00:01; -# rebind 2 2000/1/12 00:00:01; -# expire 2 2000/1/12 00:00:01; -#} diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhclient.service b/poky/meta/recipes-connectivity/dhcp/files/dhclient.service deleted file mode 100644 index 9ddb4d1df..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhclient.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Dynamic Host Configuration Protocol (DHCP) -Wants=network.target -Before=network.target -After=systemd-udevd.service - -[Service] -EnvironmentFile=-@SYSCONFDIR@/default/dhcp-client -ExecStart=@BASE_SBINDIR@/dhclient-systemd-wrapper -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhcpd.conf b/poky/meta/recipes-connectivity/dhcp/files/dhcpd.conf deleted file mode 100644 index 0001c0f00..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhcpd.conf +++ /dev/null @@ -1,108 +0,0 @@ -# -# Sample configuration file for ISC dhcpd for Debian -# -# $Id: dhcpd.conf,v 1.1.1.1 2002/05/21 00:07:44 peloy Exp $ -# - -# The ddns-updates-style parameter controls whether or not the server will -# attempt to do a DNS update when a lease is confirmed. We default to the -# behavior of the version 2 packages ('none', since DHCP v2 didn't -# have support for DDNS.) -ddns-update-style none; - -# option definitions common to all supported networks... -option domain-name "example.org"; -option domain-name-servers ns1.example.org, ns2.example.org; - -default-lease-time 600; -max-lease-time 7200; - -# If this DHCP server is the official DHCP server for the local -# network, the authoritative directive should be uncommented. -#authoritative; - -# Use this to send dhcp log messages to a different log file (you also -# have to hack syslog.conf to complete the redirection). -log-facility local7; - -# No service will be given on this subnet, but declaring it helps the -# DHCP server to understand the network topology. - -#subnet 10.152.187.0 netmask 255.255.255.0 { -#} - -# This is a very basic subnet declaration. - -#subnet 10.254.239.0 netmask 255.255.255.224 { -# range 10.254.239.10 10.254.239.20; -# option routers rtr-239-0-1.example.org, rtr-239-0-2.example.org; -#} - -# This declaration allows BOOTP clients to get dynamic addresses, -# which we don't really recommend. - -#subnet 10.254.239.32 netmask 255.255.255.224 { -# range dynamic-bootp 10.254.239.40 10.254.239.60; -# option broadcast-address 10.254.239.31; -# option routers rtr-239-32-1.example.org; -#} - -# A slightly different configuration for an internal subnet. -#subnet 10.5.5.0 netmask 255.255.255.224 { -# range 10.5.5.26 10.5.5.30; -# option domain-name-servers ns1.internal.example.org; -# option domain-name "internal.example.org"; -# option routers 10.5.5.1; -# option broadcast-address 10.5.5.31; -# default-lease-time 600; -# max-lease-time 7200; -#} - -# Hosts which require special configuration options can be listed in -# host statements. If no address is specified, the address will be -# allocated dynamically (if possible), but the host-specific information -# will still come from the host declaration. - -#host passacaglia { -# hardware ethernet 0:0:c0:5d:bd:95; -# filename "vmunix.passacaglia"; -# server-name "toccata.fugue.com"; -#} - -# Fixed IP addresses can also be specified for hosts. These addresses -# should not also be listed as being available for dynamic assignment. -# Hosts for which fixed IP addresses have been specified can boot using -# BOOTP or DHCP. Hosts for which no fixed address is specified can only -# be booted with DHCP, unless there is an address range on the subnet -# to which a BOOTP client is connected which has the dynamic-bootp flag -# set. -#host fantasia { -# hardware ethernet 08:00:07:26:c0:a5; -# fixed-address fantasia.fugue.com; -#} - -# You can declare a class of clients and then do address allocation -# based on that. The example below shows a case where all clients -# in a certain class get addresses on the 10.17.224/24 subnet, and all -# other clients get addresses on the 10.0.29/24 subnet. - -#class "foo" { -# match if substring (option vendor-class-identifier, 0, 4) = "SUNW"; -#} - -#shared-network 224-29 { -# subnet 10.17.224.0 netmask 255.255.255.0 { -# option routers rtr-224.example.org; -# } -# subnet 10.0.29.0 netmask 255.255.255.0 { -# option routers rtr-29.example.org; -# } -# pool { -# allow members of "foo"; -# range 10.17.224.10 10.17.224.250; -# } -# pool { -# deny members of "foo"; -# range 10.0.29.10 10.0.29.230; -# } -#} diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhcpd.service b/poky/meta/recipes-connectivity/dhcp/files/dhcpd.service deleted file mode 100644 index ae4f93eca..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhcpd.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=DHCPv4 Server Daemon -Documentation=man:dhcpd(8) man:dhcpd.conf(5) -After=network.target -After=time-sync.target - -[Service] -PIDFile=@localstatedir@/run/dhcpd.pid -EnvironmentFile=@SYSCONFDIR@/default/dhcp-server -EnvironmentFile=-@SYSCONFDIR@/sysconfig/dhcp-server -ExecStartPre=@base_bindir@/touch @localstatedir@/lib/dhcp/dhcpd.leases -ExecStart=@SBINDIR@/dhcpd -f -cf @SYSCONFDIR@/dhcp/dhcpd.conf -pf @localstatedir@/run/dhcpd.pid $DHCPDARGS -q $INTERFACES - -[Install] -WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhcpd6.service b/poky/meta/recipes-connectivity/dhcp/files/dhcpd6.service deleted file mode 100644 index 52a6224dc..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhcpd6.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=DHCPv6 Server Daemon -Documentation=man:dhcpd(8) man:dhcpd.conf(5) -After=network.target -After=time-sync.target - -[Service] -PIDFile=@localstatedir@/run/dhcpd6.pid -EnvironmentFile=@SYSCONFDIR@/default/dhcp-server -EnvironmentFile=-@SYSCONFDIR@/sysconfig/dhcpd6 -ExecStartPre=@base_bindir@/touch @localstatedir@/lib/dhcp/dhcpd6.leases -ExecStart=@SBINDIR@/dhcpd -f -6 -cf @SYSCONFDIR@/dhcp/dhcpd6.conf -pf @localstatedir@/run/dhcpd6.pid $DHCPDARGS -q $INTERFACES - -[Install] -WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/dhcp/files/dhcrelay.service b/poky/meta/recipes-connectivity/dhcp/files/dhcrelay.service deleted file mode 100644 index 15ff927d3..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/dhcrelay.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=DHCP Relay Agent Daemon -After=network.target - -[Service] -EnvironmentFile=@SYSCONFDIR@/default/dhcp-relay -ExecStart=@SBINDIR@/dhcrelay -d --no-pid -q $SERVERS - -[Install] -WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/dhcp/files/init-relay b/poky/meta/recipes-connectivity/dhcp/files/init-relay deleted file mode 100644 index 019a7e84c..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/init-relay +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh -# -# $Id: dhcp3-relay,v 1.1 2004/04/16 15:41:08 ml Exp $ -# - -# It is not safe to start if we don't have a default configuration... -if [ ! -f /etc/default/dhcp-relay ]; then - echo "/etc/default/dhcp-relay does not exist! - Aborting..." - echo "create this file to fix the problem." - exit 1 -fi - -# Read init script configuration (interfaces the daemon should listen on -# and the DHCP server we should forward requests to.) -. /etc/default/dhcp-relay - -# Build command line for interfaces (will be passed to dhrelay below.) -IFCMD="" -if test "$INTERFACES" != ""; then - for I in $INTERFACES; do - IFCMD=${IFCMD}"-i "${I}" " - done -fi - -DHCRELAYPID=/var/run/dhcrelay.pid - -case "$1" in - start) - start-stop-daemon -S -x /usr/sbin/dhcrelay -- -q $OPTIONS $IFCMD $SERVERS - ;; - stop) - start-stop-daemon -K -x /usr/sbin/dhcrelay - ;; - restart | force-reload) - $0 stop - sleep 2 - $0 start - ;; - *) - echo "Usage: /etc/init.d/dhcp-relay {start|stop|restart|force-reload}" - exit 1 -esac - -exit 0 diff --git a/poky/meta/recipes-connectivity/dhcp/files/init-server b/poky/meta/recipes-connectivity/dhcp/files/init-server deleted file mode 100644 index 5e693adf7..000000000 --- a/poky/meta/recipes-connectivity/dhcp/files/init-server +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/sh -# -# $Id: dhcp3-server.init.d,v 1.4 2003/07/13 19:12:41 mdz Exp $ -# - -test -f /usr/sbin/dhcpd || exit 0 - -# It is not safe to start if we don't have a default configuration... -if [ ! -f /etc/default/dhcp-server ]; then - echo "/etc/default/dhcp-server does not exist! - Aborting..." - exit 0 -fi - -# Read init script configuration (so far only interfaces the daemon -# should listen on.) -. /etc/default/dhcp-server - -case "$1" in - start) - echo -n "Starting DHCP server: " - test -d /var/lib/dhcp/ || mkdir -p /var/lib/dhcp/ - test -f /var/lib/dhcp/dhcpd.leases || touch /var/lib/dhcp/dhcpd.leases - start-stop-daemon -S -x /usr/sbin/dhcpd -- -q $INTERFACES -user dhcp -group dhcp - echo "." - ;; - stop) - echo -n "Stopping DHCP server: dhcpd3" - start-stop-daemon -K -x /usr/sbin/dhcpd - echo "." - ;; - restart | force-reload) - $0 stop - sleep 2 - $0 start - if [ "$?" != "0" ]; then - exit 1 - fi - ;; - *) - echo "Usage: /etc/init.d/dhcp-server {start|stop|restart|force-reload}" - exit 1 -esac - -exit 0 diff --git a/poky/meta/recipes-connectivity/dhcpcd/dhcpcd_9.2.0.bb b/poky/meta/recipes-connectivity/dhcpcd/dhcpcd_9.2.0.bb new file mode 100644 index 000000000..292cb5b00 --- /dev/null +++ b/poky/meta/recipes-connectivity/dhcpcd/dhcpcd_9.2.0.bb @@ -0,0 +1,39 @@ +SECTION = "console/network" +SUMMARY = "dhcpcd - a DHCP client" +DESCRIPTION = "dhcpcd runs on your machine and silently configures your \ + computer to work on the attached networks without trouble \ + and mostly without configuration." + +HOMEPAGE = "http://roy.marples.name/projects/dhcpcd/" + +LICENSE = "BSD-2-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=9674cc803c5d71306941e6e8b5c002f2" + +UPSTREAM_CHECK_URI = "https://roy.marples.name/downloads/dhcpcd/" + +SRC_URI = "https://roy.marples.name/downloads/${BPN}/${BPN}-${PV}.tar.xz \ + file://0001-remove-INCLUDEDIR-to-prevent-build-issues.patch \ + file://dhcpcd.service \ + file://dhcpcd@.service \ + " + +SRC_URI[sha256sum] = "fcb2d19672d445bbfd38678fdee4f556ef967a3ea6bd81092d10545df2cb9666" + +inherit pkgconfig autotools-brokensep systemd + +SYSTEMD_SERVICE_${PN} = "dhcpcd.service" + +PACKAGECONFIG ?= "udev ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)}" + +PACKAGECONFIG[udev] = "--with-udev,--without-udev,udev,udev" +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6" + +EXTRA_OECONF = "--enable-ipv4" + +do_install_append () { + # install systemd unit files + install -d ${D}${systemd_unitdir}/system + install -m 0644 ${WORKDIR}/dhcpcd*.service ${D}${systemd_unitdir}/system +} + +FILES_${PN}-dbg += "${libdir}/dhcpcd/dev/.debug" diff --git a/poky/meta/recipes-connectivity/dhcpcd/files/0001-remove-INCLUDEDIR-to-prevent-build-issues.patch b/poky/meta/recipes-connectivity/dhcpcd/files/0001-remove-INCLUDEDIR-to-prevent-build-issues.patch new file mode 100644 index 000000000..37d234443 --- /dev/null +++ b/poky/meta/recipes-connectivity/dhcpcd/files/0001-remove-INCLUDEDIR-to-prevent-build-issues.patch @@ -0,0 +1,45 @@ +From aa9e3982c1e75ad49945a62f5e262279c7a905a4 Mon Sep 17 00:00:00 2001 +From: Stefano Cappa +Date: Sun, 13 Jan 2019 01:50:52 +0100 +Subject: [PATCH] remove INCLUDEDIR to prevent build issues + +Upstream-Status: Pending + +Signed-off-by: Stefano Cappa +--- + configure | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/configure b/configure +index 6c81e0db..32dea2b4 100755 +--- a/configure ++++ b/configure +@@ -20,7 +20,6 @@ BUILD= + HOST= + HOSTCC= + TARGET= +-INCLUDEDIR= + DEBUG= + FORK= + STATIC= +@@ -72,7 +71,6 @@ for x do + --mandir) MANDIR=$var;; + --datadir) DATADIR=$var;; + --with-ccopts|CFLAGS) CFLAGS=$var;; +- -I|--includedir) INCLUDEDIR="$INCLUDEDIR${INCLUDEDIR:+ }-I$var";; + CC) CC=$var;; + CPPFLAGS) CPPFLAGS=$var;; + PKG_CONFIG) PKG_CONFIG=$var;; +@@ -309,9 +307,6 @@ if [ -n "$CPPFLAGS" ]; then + echo "CPPFLAGS=" >>$CONFIG_MK + echo "CPPFLAGS+= $CPPFLAGS" >>$CONFIG_MK + fi +-if [ -n "$INCLUDEDIR" ]; then +- echo "CPPFLAGS+= $INCLUDEDIR" >>$CONFIG_MK +-fi + if [ -n "$LDFLAGS" ]; then + echo "LDFLAGS=" >>$CONFIG_MK + echo "LDFLAGS+= $LDFLAGS" >>$CONFIG_MK +-- +2.17.2 (Apple Git-113) + diff --git a/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd.service b/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd.service new file mode 100644 index 000000000..86b5a43c3 --- /dev/null +++ b/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd.service @@ -0,0 +1,10 @@ +[Unit] +Description=A minimalistic network configuration daemon with DHCPv4, rdisc and DHCPv6 support +Wants=network.target +Before=network.target + +[Service] +ExecStart=/usr/sbin/dhcpcd -q --nobackground + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd@.service b/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd@.service new file mode 100644 index 000000000..c81bb05ea --- /dev/null +++ b/poky/meta/recipes-connectivity/dhcpcd/files/dhcpcd@.service @@ -0,0 +1,15 @@ +[Unit] +Description=dhcpcd on %I +Wants=network.target +Before=network.target +BindsTo=sys-subsystem-net-devices-%i.device +After=sys-subsystem-net-devices-%i.device + +[Service] +Type=forking +PIDFile=/run/dhcpcd-%I.pid +ExecStart=/usr/sbin/dhcpcd -q %I +ExecStop=/usr/sbin/dhcpcd -x %I + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/iw/iw_5.4.bb b/poky/meta/recipes-connectivity/iw/iw_5.4.bb deleted file mode 100644 index 9f58e4970..000000000 --- a/poky/meta/recipes-connectivity/iw/iw_5.4.bb +++ /dev/null @@ -1,32 +0,0 @@ -SUMMARY = "nl80211 based CLI configuration utility for wireless devices" -DESCRIPTION = "iw is a new nl80211 based CLI configuration utility for \ -wireless devices. It supports almost all new drivers that have been added \ -to the kernel recently. " -HOMEPAGE = "http://wireless.kernel.org/en/users/Documentation/iw" -SECTION = "base" -LICENSE = "BSD-2-Clause" -LIC_FILES_CHKSUM = "file://COPYING;md5=878618a5c4af25e9b93ef0be1a93f774" - -DEPENDS = "libnl" - -SRC_URI = "http://www.kernel.org/pub/software/network/iw/${BP}.tar.gz \ - file://0001-iw-version.sh-don-t-use-git-describe-for-versioning.patch \ - file://separate-objdir.patch \ -" - -SRC_URI[md5sum] = "08a4f581a39dc62fa85d3af796d844b6" -SRC_URI[sha256sum] = "943cd2446a6c7242fded3766d054ab2a214a3514b9a8b7e942fed8fb13c1370c" - -inherit pkgconfig - -EXTRA_OEMAKE = "\ - -f '${S}/Makefile' \ - \ - 'PREFIX=${prefix}' \ - 'SBINDIR=${sbindir}' \ - 'MANDIR=${mandir}' \ -" - -do_install() { - oe_runmake 'DESTDIR=${D}' install -} diff --git a/poky/meta/recipes-connectivity/iw/iw_5.8.bb b/poky/meta/recipes-connectivity/iw/iw_5.8.bb new file mode 100644 index 000000000..97ca66d66 --- /dev/null +++ b/poky/meta/recipes-connectivity/iw/iw_5.8.bb @@ -0,0 +1,32 @@ +SUMMARY = "nl80211 based CLI configuration utility for wireless devices" +DESCRIPTION = "iw is a new nl80211 based CLI configuration utility for \ +wireless devices. It supports almost all new drivers that have been added \ +to the kernel recently. " +HOMEPAGE = "http://wireless.kernel.org/en/users/Documentation/iw" +SECTION = "base" +LICENSE = "BSD-2-Clause" +LIC_FILES_CHKSUM = "file://COPYING;md5=878618a5c4af25e9b93ef0be1a93f774" + +DEPENDS = "libnl" + +SRC_URI = "http://www.kernel.org/pub/software/network/iw/${BP}.tar.gz \ + file://0001-iw-version.sh-don-t-use-git-describe-for-versioning.patch \ + file://separate-objdir.patch \ +" + +SRC_URI[md5sum] = "98129d64212bdbb408f009c56ed5c62a" +SRC_URI[sha256sum] = "cd9125c7e560926d66b09977fe0f75e5365ffd05a15df67d86a421dc76f96a96" + +inherit pkgconfig + +EXTRA_OEMAKE = "\ + -f '${S}/Makefile' \ + \ + 'PREFIX=${prefix}' \ + 'SBINDIR=${sbindir}' \ + 'MANDIR=${mandir}' \ +" + +do_install() { + oe_runmake 'DESTDIR=${D}' install +} diff --git a/poky/meta/recipes-connectivity/kea/files/kea-dhcp-ddns.service b/poky/meta/recipes-connectivity/kea/files/kea-dhcp-ddns.service new file mode 100644 index 000000000..91aa2eb14 --- /dev/null +++ b/poky/meta/recipes-connectivity/kea/files/kea-dhcp-ddns.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kea DHCP-DDNS Server +Wants=network-online.target +After=network-online.target +After=time-sync.target + +[Service] +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/run/kea/ +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/kea +ExecStart=@SBINDIR@/kea-dhcp-ddns -c @SYSCONFDIR@/kea/kea-dhcp-ddns.conf + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/kea/files/kea-dhcp4.service b/poky/meta/recipes-connectivity/kea/files/kea-dhcp4.service new file mode 100644 index 000000000..b851ea71c --- /dev/null +++ b/poky/meta/recipes-connectivity/kea/files/kea-dhcp4.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kea DHCPv4 Server +Wants=network-online.target +After=network-online.target +After=time-sync.target + +[Service] +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/run/kea/ +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/lib/kea +ExecStart=@SBINDIR@/kea-dhcp4 -c @SYSCONFDIR@/kea/kea-dhcp4.conf + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/kea/files/kea-dhcp6.service b/poky/meta/recipes-connectivity/kea/files/kea-dhcp6.service new file mode 100644 index 000000000..0f9f0ef8d --- /dev/null +++ b/poky/meta/recipes-connectivity/kea/files/kea-dhcp6.service @@ -0,0 +1,13 @@ +[Unit] +Description=Kea DHCPv6 Server +Wants=network-online.target +After=network-online.target +After=time-sync.target + +[Service] +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/run/kea/ +ExecStartPre=@BASE_BINDIR@/mkdir -p @LOCALSTATEDIR@/lib/kea +ExecStart=@SBINDIR@/kea-dhcp6 -c @SYSCONFDIR@/kea/kea-dhcp6.conf + +[Install] +WantedBy=multi-user.target diff --git a/poky/meta/recipes-connectivity/kea/kea_1.7.10.bb b/poky/meta/recipes-connectivity/kea/kea_1.7.10.bb new file mode 100644 index 000000000..e2560b239 --- /dev/null +++ b/poky/meta/recipes-connectivity/kea/kea_1.7.10.bb @@ -0,0 +1,59 @@ +SUMMARY = "ISC Kea DHCP Server" +DESCRIPTION = "Kea is the next generation of DHCP software developed by ISC. It supports both DHCPv4 and DHCPv6 protocols along with their extensions, e.g. prefix delegation and dynamic updates to DNS." +HOMEPAGE = "http://kea.isc.org" +SECTION = "connectivity" +LICENSE = "MPL-2.0 & Apache-2.0" +LIC_FILES_CHKSUM = "file://COPYING;md5=68d95543d2096459290a4e6b9ceccffa" + +DEPENDS = "boost log4cplus openssl" + +SRC_URI = "\ + http://ftp.isc.org/isc/kea/${PV}/${BP}.tar.gz \ + file://kea-dhcp4.service \ + file://kea-dhcp6.service \ + file://kea-dhcp-ddns.service \ +" +SRC_URI[sha256sum] = "4e121f0e58b175a827581c69cb1d60778647049fa47f142940dddc9ce58f3c82" + +inherit autotools systemd + +SYSTEMD_SERVICE_${PN} = "kea-dhcp4.service kea-dhcp6.service kea-dhcp-ddns.service" +SYSTEMD_AUTO_ENABLE = "disable" + +DEBUG_OPTIMIZATION_remove_mips = " -Og" +DEBUG_OPTIMIZATION_append_mips = " -O" +BUILD_OPTIMIZATION_remove_mips = " -Og" +BUILD_OPTIMIZATION_append_mips = " -O" + +DEBUG_OPTIMIZATION_remove_mipsel = " -Og" +DEBUG_OPTIMIZATION_append_mipsel = " -O" +BUILD_OPTIMIZATION_remove_mipsel = " -Og" +BUILD_OPTIMIZATION_append_mipsel = " -O" + +EXTRA_OECONF = "--with-boost-libs=-lboost_system \ + --with-log4cplus=${STAGING_DIR_TARGET}${prefix} \ + --with-openssl=${STAGING_DIR_TARGET}${prefix}" + +do_configure_prepend() { + # replace abs_top_builddir to avoid introducing the build path + # don't expand the abs_top_builddir on the target as the abs_top_builddir is meanlingless on the target + find ${S} -type f -name *.sh.in | xargs sed -i "s:@abs_top_builddir@:@abs_top_builddir_placeholder@:g" + sed -i "s:@abs_top_srcdir@:@abs_top_srcdir_placeholder@:g" ${S}/src/bin/admin/kea-admin.in +} + +do_install_append() { + install -d ${D}${systemd_system_unitdir} + install -m 0644 ${WORKDIR}/kea-dhcp*service ${D}${systemd_system_unitdir} + sed -i -e 's,@SBINDIR@,${sbindir},g' -e 's,@BASE_BINDIR@,${base_bindir},g' \ + -e 's,@LOCALSTATEDIR@,${localstatedir},g' -e 's,@SYSCONFDIR@,${sysconfdir},g' \ + ${D}${systemd_system_unitdir}/kea-dhcp*service +} + +do_install_append() { + rm -rf "${D}${localstatedir}" +} + +FILES_${PN}-staticdev += "${libdir}/kea/hooks/*.a ${libdir}/hooks/*.a" +FILES_${PN} += "${libdir}/hooks/*.so" + +PARALLEL_MAKEINST = "" diff --git a/poky/meta/recipes-connectivity/libuv/libuv_1.38.1.bb b/poky/meta/recipes-connectivity/libuv/libuv_1.38.1.bb deleted file mode 100644 index 874546373..000000000 --- a/poky/meta/recipes-connectivity/libuv/libuv_1.38.1.bb +++ /dev/null @@ -1,19 +0,0 @@ -SUMMARY = "A multi-platform support library with a focus on asynchronous I/O" -HOMEPAGE = "https://github.com/libuv/libuv" -BUGTRACKER = "https://github.com/libuv/libuv/issues" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://LICENSE;md5=a68902a430e32200263d182d44924d47" - -SRCREV = "e8b989ea1f7f9d4083511a2caec7791e9abd1871" -SRC_URI = "git://github.com/libuv/libuv;branch=v1.x" - -S = "${WORKDIR}/git" - -inherit autotools - -do_configure() { - ${S}/autogen.sh || bbnote "${PN} failed to autogen.sh" - oe_runconf -} - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/libuv/libuv_1.39.0.bb b/poky/meta/recipes-connectivity/libuv/libuv_1.39.0.bb new file mode 100644 index 000000000..b6ce4757f --- /dev/null +++ b/poky/meta/recipes-connectivity/libuv/libuv_1.39.0.bb @@ -0,0 +1,19 @@ +SUMMARY = "A multi-platform support library with a focus on asynchronous I/O" +HOMEPAGE = "https://github.com/libuv/libuv" +BUGTRACKER = "https://github.com/libuv/libuv/issues" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;md5=a68902a430e32200263d182d44924d47" + +SRCREV = "25f4b8b8a3c0f934158cd37a37b0525d75ca488e" +SRC_URI = "git://github.com/libuv/libuv;branch=v1.x" + +S = "${WORKDIR}/git" + +inherit autotools + +do_configure() { + ${S}/autogen.sh || bbnote "${PN} failed to autogen.sh" + oe_runconf +} + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb b/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb deleted file mode 100644 index 67959576e..000000000 --- a/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.82.bb +++ /dev/null @@ -1,68 +0,0 @@ -SUMMARY = "name server information handler" -DESCRIPTION = "Resolvconf is a framework for keeping track of the system's \ -information about currently available nameservers. It sets \ -itself up as the intermediary between programs that supply \ -nameserver information and programs that need nameserver \ -information." -SECTION = "console/network" -LICENSE = "GPLv2+" -LIC_FILES_CHKSUM = "file://COPYING;md5=c93c0550bd3173f4504b2cbd8991e50b" -AUTHOR = "Thomas Hood" -HOMEPAGE = "http://packages.debian.org/resolvconf" -RDEPENDS_${PN} = "bash" - -SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https \ - file://fix-path-for-busybox.patch \ - file://99_resolvconf \ - " - -SRCREV = "cb19bbfbe7e52174332f68bf2f295b39d119fad3" - -S = "${WORKDIR}/git" - -# the package is taken from snapshots.debian.org; that source is static and goes stale -# so we check the latest upstream from a directory that does get updated -UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/r/resolvconf/" - -inherit allarch - -do_compile () { - : -} - -do_install () { - install -d ${D}${sysconfdir}/default/volatiles - install -m 0644 ${WORKDIR}/99_resolvconf ${D}${sysconfdir}/default/volatiles - if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then - install -d ${D}${sysconfdir}/tmpfiles.d - echo "d /run/${BPN}/interface - - - -" \ - > ${D}${sysconfdir}/tmpfiles.d/resolvconf.conf - fi - install -d ${D}${base_libdir}/${BPN} - install -d ${D}${sysconfdir}/${BPN} - ln -snf ${localstatedir}/run/${BPN} ${D}${sysconfdir}/${BPN}/run - install -d ${D}${sysconfdir} ${D}${base_sbindir} - install -d ${D}${mandir}/man8 ${D}${docdir}/${P} - cp -pPR etc/* ${D}${sysconfdir}/ - chown -R root:root ${D}${sysconfdir}/ - install -m 0755 bin/resolvconf ${D}${base_sbindir}/ - install -m 0755 bin/list-records ${D}${base_libdir}/${BPN} - install -d ${D}/${sysconfdir}/network/if-up.d - install -m 0755 debian/resolvconf.000resolvconf.if-up ${D}/${sysconfdir}/network/if-up.d/000resolvconf - install -d ${D}/${sysconfdir}/network/if-down.d - install -m 0755 debian/resolvconf.resolvconf.if-down ${D}/${sysconfdir}/network/if-down.d/resolvconf - install -m 0644 README ${D}${docdir}/${P}/ - install -m 0644 man/resolvconf.8 ${D}${mandir}/man8/ -} - -pkg_postinst_${PN} () { - if [ -z "$D" ]; then - if command -v systemd-tmpfiles >/dev/null; then - systemd-tmpfiles --create ${sysconfdir}/tmpfiles.d/resolvconf.conf - elif [ -e ${sysconfdir}/init.d/populate-volatile.sh ]; then - ${sysconfdir}/init.d/populate-volatile.sh update - fi - fi -} - -FILES_${PN} += "${base_libdir}/${BPN}" diff --git a/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.83.bb b/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.83.bb new file mode 100644 index 000000000..f0ffc8224 --- /dev/null +++ b/poky/meta/recipes-connectivity/resolvconf/resolvconf_1.83.bb @@ -0,0 +1,68 @@ +SUMMARY = "name server information handler" +DESCRIPTION = "Resolvconf is a framework for keeping track of the system's \ +information about currently available nameservers. It sets \ +itself up as the intermediary between programs that supply \ +nameserver information and programs that need nameserver \ +information." +SECTION = "console/network" +LICENSE = "GPLv2+" +LIC_FILES_CHKSUM = "file://COPYING;md5=c93c0550bd3173f4504b2cbd8991e50b" +AUTHOR = "Thomas Hood" +HOMEPAGE = "http://packages.debian.org/resolvconf" +RDEPENDS_${PN} = "bash" + +SRC_URI = "git://salsa.debian.org/debian/resolvconf.git;protocol=https \ + file://fix-path-for-busybox.patch \ + file://99_resolvconf \ + " + +SRCREV = "d001dd2b7ce4c854eaa29e46b9640ab66c6e70bb" + +S = "${WORKDIR}/git" + +# the package is taken from snapshots.debian.org; that source is static and goes stale +# so we check the latest upstream from a directory that does get updated +UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/r/resolvconf/" + +inherit allarch + +do_compile () { + : +} + +do_install () { + install -d ${D}${sysconfdir}/default/volatiles + install -m 0644 ${WORKDIR}/99_resolvconf ${D}${sysconfdir}/default/volatiles + if ${@bb.utils.contains('DISTRO_FEATURES', 'systemd', 'true', 'false', d)}; then + install -d ${D}${sysconfdir}/tmpfiles.d + echo "d /run/${BPN}/interface - - - -" \ + > ${D}${sysconfdir}/tmpfiles.d/resolvconf.conf + fi + install -d ${D}${base_libdir}/${BPN} + install -d ${D}${sysconfdir}/${BPN} + ln -snf ${localstatedir}/run/${BPN} ${D}${sysconfdir}/${BPN}/run + install -d ${D}${sysconfdir} ${D}${base_sbindir} + install -d ${D}${mandir}/man8 ${D}${docdir}/${P} + cp -pPR etc/* ${D}${sysconfdir}/ + chown -R root:root ${D}${sysconfdir}/ + install -m 0755 bin/resolvconf ${D}${base_sbindir}/ + install -m 0755 bin/list-records ${D}${base_libdir}/${BPN} + install -d ${D}/${sysconfdir}/network/if-up.d + install -m 0755 debian/resolvconf.000resolvconf.if-up ${D}/${sysconfdir}/network/if-up.d/000resolvconf + install -d ${D}/${sysconfdir}/network/if-down.d + install -m 0755 debian/resolvconf.resolvconf.if-down ${D}/${sysconfdir}/network/if-down.d/resolvconf + install -m 0644 README ${D}${docdir}/${P}/ + install -m 0644 man/resolvconf.8 ${D}${mandir}/man8/ +} + +pkg_postinst_${PN} () { + if [ -z "$D" ]; then + if command -v systemd-tmpfiles >/dev/null; then + systemd-tmpfiles --create ${sysconfdir}/tmpfiles.d/resolvconf.conf + elif [ -e ${sysconfdir}/init.d/populate-volatile.sh ]; then + ${sysconfdir}/init.d/populate-volatile.sh update + fi + fi +} + +FILES_${PN} += "${base_libdir}/${BPN}" diff --git a/poky/meta/recipes-core/ell/ell_0.32.bb b/poky/meta/recipes-core/ell/ell_0.32.bb deleted file mode 100644 index 07dc4d4cb..000000000 --- a/poky/meta/recipes-core/ell/ell_0.32.bb +++ /dev/null @@ -1,21 +0,0 @@ -SUMMARY = "Embedded Linux Library" -DESCRIPTION = "The Embedded Linux Library (ELL) provides core, \ -low-level functionality for system daemons. It typically has no \ -dependencies other than the Linux kernel, C standard library, and \ -libdl (for dynamic linking). While ELL is designed to be efficient \ -and compact enough for use on embedded Linux platforms, it is not \ -limited to resource-constrained systems." -SECTION = "libs" -LICENSE = "LGPLv2.1" -LIC_FILES_CHKSUM = "file://COPYING;md5=fb504b67c50331fc78734fed90fb0e09" - -DEPENDS = "dbus" - -inherit autotools pkgconfig - -SRC_URI = "https://mirrors.edge.kernel.org/pub/linux/libs/${BPN}/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "42fdb9e24ff561a101389d51445cab1ff7d55f5385dc22a05b0493088cf99e30" - -do_configure_prepend () { - mkdir -p ${S}/build-aux -} diff --git a/poky/meta/recipes-core/ell/ell_0.33.bb b/poky/meta/recipes-core/ell/ell_0.33.bb new file mode 100644 index 000000000..2fa05104f --- /dev/null +++ b/poky/meta/recipes-core/ell/ell_0.33.bb @@ -0,0 +1,21 @@ +SUMMARY = "Embedded Linux Library" +DESCRIPTION = "The Embedded Linux Library (ELL) provides core, \ +low-level functionality for system daemons. It typically has no \ +dependencies other than the Linux kernel, C standard library, and \ +libdl (for dynamic linking). While ELL is designed to be efficient \ +and compact enough for use on embedded Linux platforms, it is not \ +limited to resource-constrained systems." +SECTION = "libs" +LICENSE = "LGPLv2.1" +LIC_FILES_CHKSUM = "file://COPYING;md5=fb504b67c50331fc78734fed90fb0e09" + +DEPENDS = "dbus" + +inherit autotools pkgconfig + +SRC_URI = "https://mirrors.edge.kernel.org/pub/linux/libs/${BPN}/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "d9e40e641164150394b74b719b9726fc734f24b2cde679cf5f3be6915c34eded" + +do_configure_prepend () { + mkdir -p ${S}/build-aux +} diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-init-env.in-do-not-add-C-CXX-parameters.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-init-env.in-do-not-add-C-CXX-parameters.patch deleted file mode 100644 index d45b75869..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-init-env.in-do-not-add-C-CXX-parameters.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 9b912a47f790a7b282ec0c2295a188c5d8fb6a7c Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Fri, 6 Mar 2020 21:04:05 +0000 -Subject: [PATCH] init-env.in: do not add C/CXX parameters - -These are taken from the cross environment and include -sysroot paths, so are not reproducible. - -Upstream-Status: Inappropriate [oe-core specific] -Signed-off-by: Alexander Kanavin ---- - gettext-tools/tests/init-env.in | 4 ---- - 1 file changed, 4 deletions(-) - -diff --git a/gettext-tools/tests/init-env.in b/gettext-tools/tests/init-env.in -index cc84ffd..b69c990 100644 ---- a/gettext-tools/tests/init-env.in -+++ b/gettext-tools/tests/init-env.in -@@ -3,10 +3,6 @@ top_builddir=../.. - - OBJEXT="@OBJEXT@" - EXEEXT="@EXEEXT@" --CC="@CC@" --CFLAGS="@CFLAGS@" --CXX="@CXX@" --CXXFLAGS="@CXXFLAGS@" - CPPFLAGS="@CPPFLAGS@" - LDFLAGS="@LDFLAGS@" - LTLIBINTL="@LTLIBINTL@" diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-tests-autopoint-3-unset-MAKEFLAGS.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-tests-autopoint-3-unset-MAKEFLAGS.patch deleted file mode 100644 index b0bc6b97a..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/0001-tests-autopoint-3-unset-MAKEFLAGS.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 38b256e5aa7dfeb42acffd89565e53a2c0bab3e3 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Tue, 7 Jan 2020 16:44:38 +0100 -Subject: [PATCH] tests/autopoint-3: unset MAKEFLAGS - -This is needed when running ptests, as the MAKEFLAGS value (set up by run-ptest) -is messing up the test. - -Upstream-Status: Inappropriate [oe-core specific] -Signed-off-by: Alexander Kanavin ---- - gettext-tools/tests/autopoint-3 | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/gettext-tools/tests/autopoint-3 b/gettext-tools/tests/autopoint-3 -index e13552b..55188df 100755 ---- a/gettext-tools/tests/autopoint-3 -+++ b/gettext-tools/tests/autopoint-3 -@@ -126,6 +126,7 @@ test $? = 0 || { cat autopoint.err; Exit 1; } - ${CONFIG_SHELL} ./configure >/dev/null 2>autpoint.err - test $? = 0 || { cat autopoint.err; Exit 1; } - -+unset MAKEFLAGS - ${MAKE} >/dev/null 2>autopoint.err - test $? = 0 || { cat autopoint.err; Exit 1; } - diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/add-with-bisonlocaledir.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/add-with-bisonlocaledir.patch deleted file mode 100644 index 35a131067..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/add-with-bisonlocaledir.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 04bd40fe2c48c6e01ab418a04d27c4aff644ad96 Mon Sep 17 00:00:00 2001 -From: Hongxu Jia -Date: Wed, 17 Feb 2016 23:54:02 -0500 -Subject: [PATCH] m4/bison-i18n.m4: add --with-bisonlocaledir to assign - BISON_LOCALEDIR - -The variable BISON_LOCALEDIR is assigned only by the output of -'bison --print-localedir', we add option --with-bisonlocaledir -to assign it explicitly. It is helpful for user to split the -native compile and cross compile. - -For backward compatibility, if option not used, it still -make use of the output of 'bison --print-localedir'. - -Upstream-Status: Submitted [bison-patches@gnu.org] - -Signed-off-by: Hongxu Jia ---- - gettext-tools/gnulib-m4/bison-i18n.m4 | 10 ++++++++-- - 1 file changed, 8 insertions(+), 2 deletions(-) - -diff --git a/gettext-tools/gnulib-m4/bison-i18n.m4 b/gettext-tools/gnulib-m4/bison-i18n.m4 -index f5cfd3a..fb6ac4d 100644 ---- a/gettext-tools/gnulib-m4/bison-i18n.m4 -+++ b/gettext-tools/gnulib-m4/bison-i18n.m4 -@@ -14,11 +14,16 @@ dnl sets BISON_LOCALEDIR to indicate where to find the bison-runtime.mo files - dnl and defines YYENABLE_NLS if there are bison-runtime.mo files at all. - AC_DEFUN([BISON_I18N], - [ -+ dnl Default is not to set bisonlocaledir -+ AC_ARG_WITH([bisonlocaledir], -+ [ --with-bisonlocaledir sets BISON_LOCALEDIR to indicate where to find the bison-runtime.mo files], -+ BISON_LOCALEDIR=$withval, -+ BISON_LOCALEDIR=) -+ - if test -z "$USE_NLS"; then - echo "The BISON-I18N macro is used without being preceded by AM-GNU-GETTEXT." 1>&2 - exit 1 - fi -- BISON_LOCALEDIR= - BISON_USE_NLS=no - if test "$USE_NLS" = yes; then - dnl Determine bison's localedir. -@@ -26,9 +31,10 @@ AC_DEFUN([BISON_I18N], - dnl But even is YACC is called "yacc", it may be a script that invokes bison - dnl and accepts the --print-localedir option. - dnl YACC's default value is empty; BISON's default value is :. -- if (${YACC-${BISON-:}} --print-localedir) >/dev/null 2>&1; then -+ if test -z "$BISON_LOCALEDIR" -a ${YACC-${BISON-:}} --print-localedir >/dev/null 2>&1; then - BISON_LOCALEDIR=`${YACC-${BISON-:}} --print-localedir` - fi -+ AC_MSG_RESULT([$BISON_LOCALEDIR]) - AC_SUBST([BISON_LOCALEDIR]) - if test -n "$BISON_LOCALEDIR"; then - dnl There is no need to enable internationalization if the user doesn't --- -1.9.1 - diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/parallel.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/parallel.patch deleted file mode 100644 index d96a376b7..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/parallel.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 4a2a0a93b469093b60ffd0bec55d33d1e03d4713 Mon Sep 17 00:00:00 2001 -From: Joe Slater -Date: Thu, 7 Jun 2012 16:37:01 -0700 -Subject: [PATCH] instal libgettextlib.a before removing it - -In a multiple job build, Makefile can simultaneously -be installing and removing libgettextlib.a. We serialize -the operations. - -Upstream-Status: Pending - -Signed-off-by: Joe Slater - ---- - gettext-tools/gnulib-lib/Makefile.am | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/gettext-tools/gnulib-lib/Makefile.am b/gettext-tools/gnulib-lib/Makefile.am -index 2126699..d2dd7e4 100644 ---- a/gettext-tools/gnulib-lib/Makefile.am -+++ b/gettext-tools/gnulib-lib/Makefile.am -@@ -58,6 +58,10 @@ endif - # Rules generated and collected by gnulib-tool. - include Makefile.gnulib - -+# defined in Makefile.gnulib but missing this dependency -+# -+install-exec-clean: install-libLTLIBRARIES -+ - # OS/2 does not support a DLL name longer than 8 characters. - if OS2 - libgettextlib_la_LDFLAGS += -os2dllname gtlib diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/run-ptest b/poky/meta/recipes-core/gettext/gettext-0.20.2/run-ptest deleted file mode 100644 index f17f3c87a..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/run-ptest +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -# -#This script is used to run gettext test suites -cd tests - -make -k runtest-TESTS top_srcdir=.. srcdir=. abs_srcdir=$PWD top_builddir=$PWD/../ abs_top_srcdir=$PWD/../ | cat diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/serial-tests-config.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/serial-tests-config.patch deleted file mode 100644 index 93f7c0333..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/serial-tests-config.patch +++ /dev/null @@ -1,56 +0,0 @@ -From ed64a5724ef7d6eb4e9a876f817ea266a536e195 Mon Sep 17 00:00:00 2001 -From: "Hongjun.Yang" -Date: Thu, 28 Jul 2016 12:36:15 +0800 -Subject: [PATCH] fix for ptest - -Add serial-tests support, ptest need it - -Upstream-Status: Inappropriate [oe specific] - -Signed-off-by: Changqing Li - ---- - configure.ac | 2 +- - gettext-runtime/configure.ac | 2 +- - gettext-tools/configure.ac | 2 +- - 3 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/configure.ac b/configure.ac -index 38db6fd..f019ae0 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -22,7 +22,7 @@ AC_INIT([gettext], - [bug-gettext@gnu.org]) - AC_CONFIG_SRCDIR([gettext-tools/src/msgfmt.c]) - AC_CONFIG_AUX_DIR([build-aux]) --AM_INIT_AUTOMAKE([1.13 silent-rules parallel-tests tar-ustar]) -+AM_INIT_AUTOMAKE([1.13 silent-rules serial-tests tar-ustar]) - - dnl Override automake's tar command used for creating distributions. - am__tar='${AMTAR} chf - --format=ustar --owner=root --group=root "$$tardir"' -diff --git a/gettext-runtime/configure.ac b/gettext-runtime/configure.ac -index de203e7..138a07f 100644 ---- a/gettext-runtime/configure.ac -+++ b/gettext-runtime/configure.ac -@@ -22,7 +22,7 @@ AC_INIT([gettext-runtime], - [bug-gettext@gnu.org]) - AC_CONFIG_SRCDIR([intl/dcigettext.c]) - AC_CONFIG_AUX_DIR([../build-aux]) --AM_INIT_AUTOMAKE([1.11.1 silent-rules parallel-tests]) -+AM_INIT_AUTOMAKE([1.11.1 silent-rules serial-tests]) - AC_CONFIG_HEADERS([config.h]) - - dnl Installation directories. -diff --git a/gettext-tools/configure.ac b/gettext-tools/configure.ac -index cf1dd73..b544d6d 100644 ---- a/gettext-tools/configure.ac -+++ b/gettext-tools/configure.ac -@@ -22,7 +22,7 @@ AC_INIT([gettext-tools], - [bug-gettext@gnu.org]) - AC_CONFIG_SRCDIR([src/msgfmt.c]) - AC_CONFIG_AUX_DIR([../build-aux]) --AM_INIT_AUTOMAKE([1.11.1 silent-rules parallel-tests]) -+AM_INIT_AUTOMAKE([1.11.1 silent-rules serial-tests]) - AC_CONFIG_HEADERS([config.h]) - - dnl Installation directories. diff --git a/poky/meta/recipes-core/gettext/gettext-0.20.2/use-pkgconfig.patch b/poky/meta/recipes-core/gettext/gettext-0.20.2/use-pkgconfig.patch deleted file mode 100644 index feb56719d..000000000 --- a/poky/meta/recipes-core/gettext/gettext-0.20.2/use-pkgconfig.patch +++ /dev/null @@ -1,699 +0,0 @@ -From c832880c5242b454a2c9e61b6a2dc26aecdc51bd Mon Sep 17 00:00:00 2001 -From: Ross Burton -Date: Tue, 23 Jan 2018 00:54:13 +0000 -Subject: [PATCH] gettext: beat library detection into shape - -For reasons which I just can't fathom gnulib doesn't use the expected tools to -find libraries but badly reinvents the wheel. This will trivially lead to host -contamination (explicit searches of /usr/lib) or incorrect RPATHs (bad -canonicalisation resulting in relative paths). - -Simply delete all the crazy, and replace with a single call to pkg-config. - -Upstream-Status: Inappropriate [upstream still refuse to consider pkg-config] -Signed-off-by: Ross Burton - ---- - gettext-tools/gnulib-m4/libxml.m4 | 99 +---------------- - .../gnulib-local/lib/term-styled-ostream.oo.c | 12 +-- - libtextstyle/gnulib-local/m4/libcroco.m4 | 99 +++-------------- - libtextstyle/gnulib-local/m4/libglib.m4 | 100 +++--------------- - libtextstyle/gnulib-m4/libcroco.m4 | 99 +++-------------- - libtextstyle/gnulib-m4/libglib.m4 | 100 +++--------------- - libtextstyle/lib/term-styled-ostream.c | 12 +-- - libtextstyle/lib/term-styled-ostream.oo.c | 12 +-- - 8 files changed, 83 insertions(+), 450 deletions(-) - -diff --git a/gettext-tools/gnulib-m4/libxml.m4 b/gettext-tools/gnulib-m4/libxml.m4 -index 05b9550..031ee65 100644 ---- a/gettext-tools/gnulib-m4/libxml.m4 -+++ b/gettext-tools/gnulib-m4/libxml.m4 -@@ -13,6 +13,7 @@ dnl gl_LIBXML(FORCE-INCLUDED) - dnl forces the use of the included or an external libxml. - AC_DEFUN([gl_LIBXML], - [ -+ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) - AC_REQUIRE([AM_ICONV_LINK]) - - ifelse([$1], , [ -@@ -30,100 +31,10 @@ AC_DEFUN([gl_LIBXML], - INCXML= - ifelse([$1], [yes], , [ - if test "$gl_cv_libxml_use_included" != yes; then -- dnl Figure out whether we can use a preinstalled libxml2, or have to use -- dnl the included one. -- AC_CACHE_VAL([gl_cv_libxml], [ -- gl_cv_libxml=no -- gl_cv_LIBXML= -- gl_cv_LTLIBXML= -- gl_cv_INCXML= -- gl_save_LIBS="$LIBS" -- LIBS="$LIBS $LIBICONV" -- dnl Search for libxml2 and define LIBXML2, LTLIBXML2 and INCXML2 -- dnl accordingly. -- dnl Don't use xml2-config nor pkg-config, since it doesn't work when -- dnl cross-compiling or when the C compiler in use is different from the -- dnl one that built the library. -- dnl Use a test program that tries to invoke xmlFree. On Cygwin 1.7.x, -- dnl libxml2 is built in such a way that uses of xmlFree work fine with -- dnl -Wl,--enable-auto-import but lead to a link error with -- dnl -Wl,--disable-auto-import. -- AC_LIB_LINKFLAGS_BODY([xml2]) -- LIBS="$gl_save_LIBS $LIBXML2 $LIBICONV" -- AC_TRY_LINK([#include -- #include -- #include -- ], -- [xmlCheckVersion (0); -- xmlFree ((void *) 0); -- xmlXPathSetContextNode ((void *)0, (void *)0); -- ], -- [gl_cv_libxml=yes -- gl_cv_LIBXML="$LIBXML2 $LIBICONV" -- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" -- ]) -- if test "$gl_cv_libxml" != yes; then -- gl_save_CPPFLAGS="$CPPFLAGS" -- CPPFLAGS="$CPPFLAGS $INCXML2" -- AC_TRY_LINK([#include -- #include -- #include -- ], -- [xmlCheckVersion (0); -- xmlFree ((void *) 0); -- xmlXPathSetContextNode ((void *)0, (void *)0); -- ], -- [gl_cv_libxml=yes -- gl_cv_LIBXML="$LIBXML2 $LIBICONV" -- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" -- gl_cv_INCXML="$INCXML2" -- ]) -- if test "$gl_cv_libxml" != yes; then -- dnl Often the include files are installed in /usr/include/libxml2. -- dnl In libxml2-2.5, is self-contained. -- dnl In libxml2-2.6, it includes which is -- dnl self-contained. -- libxml2_include_dir= -- AC_TRY_CPP([#include ], -- [gl_ABSOLUTE_HEADER([libxml2/libxml/xmlexports.h]) -- libxml2_include_dir=`echo "$gl_cv_absolute_libxml2_libxml_xmlexports_h" | sed -e 's,.libxml.xmlexports\.h$,,'` -- ]) -- if test -z "$libxml2_include_dir"; then -- AC_TRY_CPP([#include ], -- [gl_ABSOLUTE_HEADER([libxml2/libxml/xmlversion.h]) -- libxml2_include_dir=`echo "$gl_cv_absolute_libxml2_libxml_xmlversion_h" | sed -e 's,.libxml.xmlversion\.h$,,'` -- ]) -- fi -- if test -n "$libxml2_include_dir" && test -d "$libxml2_include_dir"; then -- CPPFLAGS="$gl_save_CPPFLAGS -I$libxml2_include_dir" -- AC_TRY_LINK([#include -- #include -- #include -- ], -- [xmlCheckVersion (0); -- xmlFree ((void *) 0); -- xmlXPathSetContextNode ((void *)0, (void *)0); -- ], -- [gl_cv_libxml=yes -- gl_cv_LIBXML="$LIBXML2 $LIBICONV" -- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" -- gl_cv_INCXML="-I$libxml2_include_dir" -- ]) -- fi -- fi -- CPPFLAGS="$gl_save_CPPFLAGS" -- fi -- LIBS="$gl_save_LIBS" -- ]) -- AC_MSG_CHECKING([for libxml]) -- AC_MSG_RESULT([$gl_cv_libxml]) -- if test $gl_cv_libxml = yes; then -- LIBXML="$gl_cv_LIBXML" -- LTLIBXML="$gl_cv_LTLIBXML" -- INCXML="$gl_cv_INCXML" -- else -- gl_cv_libxml_use_included=yes -- fi -+ PKG_CHECK_MODULES([XML], [libxml-2.0]) -+ LIBXML=$XML_LIBS -+ LTLIBXML=$XML_LIBS -+ INCXML=$XML_CFLAGS - fi - ]) - AC_SUBST([LIBXML]) -diff --git a/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c b/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c -index 2cfd4a8..d42c8b4 100644 ---- a/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c -+++ b/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c -@@ -22,15 +22,15 @@ - - #include - --#include --#include --#include --#include -+#include -+#include -+#include -+#include - /* has a broken double-inclusion guard in libcroco-0.6.1. */ - #ifndef __CR_FONTS_H__ --# include -+# include - #endif --#include -+#include - - #include "term-ostream.h" - #include "hash.h" -diff --git a/libtextstyle/gnulib-local/m4/libcroco.m4 b/libtextstyle/gnulib-local/m4/libcroco.m4 -index bc53cc6..10b2455 100644 ---- a/libtextstyle/gnulib-local/m4/libcroco.m4 -+++ b/libtextstyle/gnulib-local/m4/libcroco.m4 -@@ -1,99 +1,34 @@ --# libcroco.m4 serial 3 --dnl Copyright (C) 2006-2007, 2019 Free Software Foundation, Inc. -+# libcroco.m4 serial 2 (gettext-0.17) -+dnl Copyright (C) 2006, 2015-2016 Free Software Foundation, Inc. - dnl This file is free software; the Free Software Foundation - dnl gives unlimited permission to copy and/or distribute it, - dnl with or without modifications, as long as this notice is preserved. - - dnl From Bruno Haible. - --dnl gl_LIBCROCO --dnl gives the user the option to decide whether to use the included or --dnl an external libcroco. --dnl gl_LIBCROCO(FORCE-INCLUDED) --dnl forces the use of the included or an external libcroco. - AC_DEFUN([gl_LIBCROCO], - [ -- ifelse([$1], [yes], , [ -- dnl libcroco depends on libglib. -- AC_REQUIRE([gl_LIBGLIB]) -- ]) -+ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -+ dnl libcroco depends on libglib. -+ AC_REQUIRE([gl_LIBGLIB]) - -- ifelse([$1], , [ -- AC_MSG_CHECKING([whether included libcroco is requested]) -- AC_ARG_WITH([included-libcroco], -- [ --with-included-libcroco use the libcroco included here], -- [gl_cv_libcroco_force_included=$withval], -- [gl_cv_libcroco_force_included=no]) -- AC_MSG_RESULT([$gl_cv_libcroco_force_included]) -- ], [gl_cv_libcroco_force_included=$1]) -+ AC_MSG_CHECKING([whether included libcroco is requested]) -+ AC_ARG_WITH([included-libcroco], -+ [ --with-included-libcroco use the libcroco included here], -+ [gl_cv_libcroco_force_included=$withval], -+ [gl_cv_libcroco_force_included=no]) -+ AC_MSG_RESULT([$gl_cv_libcroco_force_included]) - - gl_cv_libcroco_use_included="$gl_cv_libcroco_force_included" - LIBCROCO= - LTLIBCROCO= - INCCROCO= -- ifelse([$1], [yes], , [ -- if test "$gl_cv_libcroco_use_included" != yes; then -- dnl Figure out whether we can use a preinstalled libcroco-0.6, or have to -- dnl use the included one. -- AC_CACHE_VAL([gl_cv_libcroco], [ -- gl_cv_libcroco=no -- gl_cv_LIBCROCO= -- gl_cv_LTLIBCROCO= -- gl_cv_INCCROCO= -- gl_save_LIBS="$LIBS" -- dnl Search for libcroco and define LIBCROCO_0_6, LTLIBCROCO_0_6 and -- dnl INCCROCO_0_6 accordingly. -- dnl Don't use croco-0.6-config nor pkg-config, since it doesn't work when -- dnl cross-compiling or when the C compiler in use is different from the -- dnl one that built the library. -- AC_LIB_LINKFLAGS_BODY([croco-0.6], [glib-2.0]) -- LIBS="$gl_save_LIBS $LIBCROCO_0_6" -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- ]) -- if test "$gl_cv_libcroco" != yes; then -- gl_save_CPPFLAGS="$CPPFLAGS" -- CPPFLAGS="$CPPFLAGS $INCCROCO_0_6" -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- gl_cv_INCCROCO="$INCCROCO_0_6" -- ]) -- if test "$gl_cv_libcroco" != yes; then -- dnl Often the include files are installed in -- dnl /usr/include/libcroco-0.6/libcroco. -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_ABSOLUTE_HEADER([libcroco-0.6/libcroco/libcroco-config.h]) -- libcroco_include_dir=`echo "$gl_cv_absolute_libcroco_0_6_libcroco_libcroco_config_h" | sed -e 's,.libcroco-config\.h$,,'` -- if test -d "$libcroco_include_dir"; then -- gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- gl_cv_INCCROCO="-I$libcroco_include_dir" -- fi -- ]) -- fi -- CPPFLAGS="$gl_save_CPPFLAGS" -- fi -- LIBS="$gl_save_LIBS" -- ]) -- AC_MSG_CHECKING([for libcroco]) -- AC_MSG_RESULT([$gl_cv_libcroco]) -- if test $gl_cv_libcroco = yes; then -- LIBCROCO="$gl_cv_LIBCROCO" -- LTLIBCROCO="$gl_cv_LTLIBCROCO" -- INCCROCO="$gl_cv_INCCROCO" -- else -- gl_cv_libcroco_use_included=yes -- fi -- fi -- ]) -+ if test "$gl_cv_libcroco_use_included" != yes; then -+ PKG_CHECK_MODULES([CROCO], [libcroco-0.6]) -+ LIBCROCO=$CROCO_LIBS -+ LTLIBCROCO=$CROCO_LIBS -+ INCCROCO=$CROCO_CFLAGS -+ fi - AC_SUBST([LIBCROCO]) - AC_SUBST([LTLIBCROCO]) - AC_SUBST([INCCROCO]) -diff --git a/libtextstyle/gnulib-local/m4/libglib.m4 b/libtextstyle/gnulib-local/m4/libglib.m4 -index 5853772..767fba2 100644 ---- a/libtextstyle/gnulib-local/m4/libglib.m4 -+++ b/libtextstyle/gnulib-local/m4/libglib.m4 -@@ -6,100 +6,26 @@ dnl with or without modifications, as long as this notice is preserved. - - dnl From Bruno Haible. - --dnl gl_LIBGLIB --dnl gives the user the option to decide whether to use the included or --dnl an external libglib. --dnl gl_LIBGLIB(FORCE-INCLUDED) --dnl forces the use of the included or an external libglib. - AC_DEFUN([gl_LIBGLIB], - [ -- ifelse([$1], , [ -- AC_MSG_CHECKING([whether included glib is requested]) -- AC_ARG_WITH([included-glib], -- [ --with-included-glib use the glib2 included here], -- [gl_cv_libglib_force_included=$withval], -- [gl_cv_libglib_force_included=no]) -- AC_MSG_RESULT([$gl_cv_libglib_force_included]) -- ], [gl_cv_libglib_force_included=$1]) -+ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -+ AC_MSG_CHECKING([whether included glib is requested]) -+ AC_ARG_WITH([included-glib], -+ [ --with-included-glib use the glib2 included here], -+ [gl_cv_libglib_force_included=$withval], -+ [gl_cv_libglib_force_included=no]) -+ AC_MSG_RESULT([$gl_cv_libglib_force_included]) - - gl_cv_libglib_use_included="$gl_cv_libglib_force_included" - LIBGLIB= - LTLIBGLIB= - INCGLIB= -- ifelse([$1], [yes], , [ -- if test "$gl_cv_libglib_use_included" != yes; then -- dnl Figure out whether we can use a preinstalled libglib-2.0, or have to use -- dnl the included one. -- AC_CACHE_VAL([gl_cv_libglib], [ -- gl_cv_libglib=no -- gl_cv_LIBGLIB= -- gl_cv_LTLIBGLIB= -- gl_cv_INCGLIB= -- gl_save_LIBS="$LIBS" -- dnl Search for libglib2 and define LIBGLIB_2_0, LTLIBGLIB_2_0 and -- dnl INCGLIB_2_0 accordingly. -- dnl Don't use glib-config nor pkg-config, since it doesn't work when -- dnl cross-compiling or when the C compiler in use is different from the -- dnl one that built the library. -- AC_LIB_LINKFLAGS_BODY([glib-2.0]) -- LIBS="$gl_save_LIBS $LIBGLIB_2_0" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- ]) -- if test "$gl_cv_libglib" != yes; then -- gl_save_CPPFLAGS="$CPPFLAGS" -- CPPFLAGS="$CPPFLAGS $INCGLIB_2_0" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- gl_cv_INCGLIB="$INCGLIB_2_0" -- ]) -- if test "$gl_cv_libglib" != yes; then -- dnl Often the include files are installed in /usr/include/glib-2.0 -- dnl and /usr/lib/glib-2.0/include. -- if test -n "$LIBGLIB_2_0_PREFIX"; then -- CPPFLAGS="$gl_save_CPPFLAGS -I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- gl_cv_INCGLIB="-I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" -- ]) -- fi -- fi -- CPPFLAGS="$gl_save_CPPFLAGS" -- fi -- LIBS="$gl_save_LIBS" -- ]) -- AC_MSG_CHECKING([for glib]) -- AC_MSG_RESULT([$gl_cv_libglib]) -- if test $gl_cv_libglib = yes; then -- LIBGLIB="$gl_cv_LIBGLIB" -- LTLIBGLIB="$gl_cv_LTLIBGLIB" -- INCGLIB="$gl_cv_INCGLIB" -- else -- gl_cv_libglib_use_included=yes -- fi -- fi -- ]) -+ if test "$gl_cv_libglib_use_included" != yes; then -+ PKG_CHECK_MODULES([GLIB], [glib-2.0]) -+ LIBGLIB="$GLIB_LIBS" -+ LTLIBGLIB="$GLIB_LIBS" -+ INCGLIB="$GLIB_CFLAGS" -+ fi - AC_SUBST([LIBGLIB]) - AC_SUBST([LTLIBGLIB]) - AC_SUBST([INCGLIB]) -diff --git a/libtextstyle/gnulib-m4/libcroco.m4 b/libtextstyle/gnulib-m4/libcroco.m4 -index bc53cc6..10b2455 100644 ---- a/libtextstyle/gnulib-m4/libcroco.m4 -+++ b/libtextstyle/gnulib-m4/libcroco.m4 -@@ -1,99 +1,34 @@ --# libcroco.m4 serial 3 --dnl Copyright (C) 2006-2007, 2019 Free Software Foundation, Inc. -+# libcroco.m4 serial 2 (gettext-0.17) -+dnl Copyright (C) 2006, 2015-2016 Free Software Foundation, Inc. - dnl This file is free software; the Free Software Foundation - dnl gives unlimited permission to copy and/or distribute it, - dnl with or without modifications, as long as this notice is preserved. - - dnl From Bruno Haible. - --dnl gl_LIBCROCO --dnl gives the user the option to decide whether to use the included or --dnl an external libcroco. --dnl gl_LIBCROCO(FORCE-INCLUDED) --dnl forces the use of the included or an external libcroco. - AC_DEFUN([gl_LIBCROCO], - [ -- ifelse([$1], [yes], , [ -- dnl libcroco depends on libglib. -- AC_REQUIRE([gl_LIBGLIB]) -- ]) -+ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -+ dnl libcroco depends on libglib. -+ AC_REQUIRE([gl_LIBGLIB]) - -- ifelse([$1], , [ -- AC_MSG_CHECKING([whether included libcroco is requested]) -- AC_ARG_WITH([included-libcroco], -- [ --with-included-libcroco use the libcroco included here], -- [gl_cv_libcroco_force_included=$withval], -- [gl_cv_libcroco_force_included=no]) -- AC_MSG_RESULT([$gl_cv_libcroco_force_included]) -- ], [gl_cv_libcroco_force_included=$1]) -+ AC_MSG_CHECKING([whether included libcroco is requested]) -+ AC_ARG_WITH([included-libcroco], -+ [ --with-included-libcroco use the libcroco included here], -+ [gl_cv_libcroco_force_included=$withval], -+ [gl_cv_libcroco_force_included=no]) -+ AC_MSG_RESULT([$gl_cv_libcroco_force_included]) - - gl_cv_libcroco_use_included="$gl_cv_libcroco_force_included" - LIBCROCO= - LTLIBCROCO= - INCCROCO= -- ifelse([$1], [yes], , [ -- if test "$gl_cv_libcroco_use_included" != yes; then -- dnl Figure out whether we can use a preinstalled libcroco-0.6, or have to -- dnl use the included one. -- AC_CACHE_VAL([gl_cv_libcroco], [ -- gl_cv_libcroco=no -- gl_cv_LIBCROCO= -- gl_cv_LTLIBCROCO= -- gl_cv_INCCROCO= -- gl_save_LIBS="$LIBS" -- dnl Search for libcroco and define LIBCROCO_0_6, LTLIBCROCO_0_6 and -- dnl INCCROCO_0_6 accordingly. -- dnl Don't use croco-0.6-config nor pkg-config, since it doesn't work when -- dnl cross-compiling or when the C compiler in use is different from the -- dnl one that built the library. -- AC_LIB_LINKFLAGS_BODY([croco-0.6], [glib-2.0]) -- LIBS="$gl_save_LIBS $LIBCROCO_0_6" -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- ]) -- if test "$gl_cv_libcroco" != yes; then -- gl_save_CPPFLAGS="$CPPFLAGS" -- CPPFLAGS="$CPPFLAGS $INCCROCO_0_6" -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- gl_cv_INCCROCO="$INCCROCO_0_6" -- ]) -- if test "$gl_cv_libcroco" != yes; then -- dnl Often the include files are installed in -- dnl /usr/include/libcroco-0.6/libcroco. -- AC_TRY_LINK([#include ], -- [const char *version = LIBCROCO_VERSION; return !version;], -- [gl_ABSOLUTE_HEADER([libcroco-0.6/libcroco/libcroco-config.h]) -- libcroco_include_dir=`echo "$gl_cv_absolute_libcroco_0_6_libcroco_libcroco_config_h" | sed -e 's,.libcroco-config\.h$,,'` -- if test -d "$libcroco_include_dir"; then -- gl_cv_libcroco=yes -- gl_cv_LIBCROCO="$LIBCROCO_0_6" -- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" -- gl_cv_INCCROCO="-I$libcroco_include_dir" -- fi -- ]) -- fi -- CPPFLAGS="$gl_save_CPPFLAGS" -- fi -- LIBS="$gl_save_LIBS" -- ]) -- AC_MSG_CHECKING([for libcroco]) -- AC_MSG_RESULT([$gl_cv_libcroco]) -- if test $gl_cv_libcroco = yes; then -- LIBCROCO="$gl_cv_LIBCROCO" -- LTLIBCROCO="$gl_cv_LTLIBCROCO" -- INCCROCO="$gl_cv_INCCROCO" -- else -- gl_cv_libcroco_use_included=yes -- fi -- fi -- ]) -+ if test "$gl_cv_libcroco_use_included" != yes; then -+ PKG_CHECK_MODULES([CROCO], [libcroco-0.6]) -+ LIBCROCO=$CROCO_LIBS -+ LTLIBCROCO=$CROCO_LIBS -+ INCCROCO=$CROCO_CFLAGS -+ fi - AC_SUBST([LIBCROCO]) - AC_SUBST([LTLIBCROCO]) - AC_SUBST([INCCROCO]) -diff --git a/libtextstyle/gnulib-m4/libglib.m4 b/libtextstyle/gnulib-m4/libglib.m4 -index 5853772..767fba2 100644 ---- a/libtextstyle/gnulib-m4/libglib.m4 -+++ b/libtextstyle/gnulib-m4/libglib.m4 -@@ -6,100 +6,26 @@ dnl with or without modifications, as long as this notice is preserved. - - dnl From Bruno Haible. - --dnl gl_LIBGLIB --dnl gives the user the option to decide whether to use the included or --dnl an external libglib. --dnl gl_LIBGLIB(FORCE-INCLUDED) --dnl forces the use of the included or an external libglib. - AC_DEFUN([gl_LIBGLIB], - [ -- ifelse([$1], , [ -- AC_MSG_CHECKING([whether included glib is requested]) -- AC_ARG_WITH([included-glib], -- [ --with-included-glib use the glib2 included here], -- [gl_cv_libglib_force_included=$withval], -- [gl_cv_libglib_force_included=no]) -- AC_MSG_RESULT([$gl_cv_libglib_force_included]) -- ], [gl_cv_libglib_force_included=$1]) -+ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) -+ AC_MSG_CHECKING([whether included glib is requested]) -+ AC_ARG_WITH([included-glib], -+ [ --with-included-glib use the glib2 included here], -+ [gl_cv_libglib_force_included=$withval], -+ [gl_cv_libglib_force_included=no]) -+ AC_MSG_RESULT([$gl_cv_libglib_force_included]) - - gl_cv_libglib_use_included="$gl_cv_libglib_force_included" - LIBGLIB= - LTLIBGLIB= - INCGLIB= -- ifelse([$1], [yes], , [ -- if test "$gl_cv_libglib_use_included" != yes; then -- dnl Figure out whether we can use a preinstalled libglib-2.0, or have to use -- dnl the included one. -- AC_CACHE_VAL([gl_cv_libglib], [ -- gl_cv_libglib=no -- gl_cv_LIBGLIB= -- gl_cv_LTLIBGLIB= -- gl_cv_INCGLIB= -- gl_save_LIBS="$LIBS" -- dnl Search for libglib2 and define LIBGLIB_2_0, LTLIBGLIB_2_0 and -- dnl INCGLIB_2_0 accordingly. -- dnl Don't use glib-config nor pkg-config, since it doesn't work when -- dnl cross-compiling or when the C compiler in use is different from the -- dnl one that built the library. -- AC_LIB_LINKFLAGS_BODY([glib-2.0]) -- LIBS="$gl_save_LIBS $LIBGLIB_2_0" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- ]) -- if test "$gl_cv_libglib" != yes; then -- gl_save_CPPFLAGS="$CPPFLAGS" -- CPPFLAGS="$CPPFLAGS $INCGLIB_2_0" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- gl_cv_INCGLIB="$INCGLIB_2_0" -- ]) -- if test "$gl_cv_libglib" != yes; then -- dnl Often the include files are installed in /usr/include/glib-2.0 -- dnl and /usr/lib/glib-2.0/include. -- if test -n "$LIBGLIB_2_0_PREFIX"; then -- CPPFLAGS="$gl_save_CPPFLAGS -I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" -- AC_TRY_LINK([#include --#ifndef G_BEGIN_DECLS --error this glib.h includes a glibconfig.h from a glib version 1.x --#endif --], -- [g_string_new ("foo");], -- [gl_cv_libglib=yes -- gl_cv_LIBGLIB="$LIBGLIB_2_0" -- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" -- gl_cv_INCGLIB="-I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" -- ]) -- fi -- fi -- CPPFLAGS="$gl_save_CPPFLAGS" -- fi -- LIBS="$gl_save_LIBS" -- ]) -- AC_MSG_CHECKING([for glib]) -- AC_MSG_RESULT([$gl_cv_libglib]) -- if test $gl_cv_libglib = yes; then -- LIBGLIB="$gl_cv_LIBGLIB" -- LTLIBGLIB="$gl_cv_LTLIBGLIB" -- INCGLIB="$gl_cv_INCGLIB" -- else -- gl_cv_libglib_use_included=yes -- fi -- fi -- ]) -+ if test "$gl_cv_libglib_use_included" != yes; then -+ PKG_CHECK_MODULES([GLIB], [glib-2.0]) -+ LIBGLIB="$GLIB_LIBS" -+ LTLIBGLIB="$GLIB_LIBS" -+ INCGLIB="$GLIB_CFLAGS" -+ fi - AC_SUBST([LIBGLIB]) - AC_SUBST([LTLIBGLIB]) - AC_SUBST([INCGLIB]) -diff --git a/libtextstyle/lib/term-styled-ostream.c b/libtextstyle/lib/term-styled-ostream.c -index 3675b5f..811e546 100644 ---- a/libtextstyle/lib/term-styled-ostream.c -+++ b/libtextstyle/lib/term-styled-ostream.c -@@ -28,15 +28,15 @@ - - #include - --#include --#include --#include --#include -+#include -+#include -+#include -+#include - /* has a broken double-inclusion guard in libcroco-0.6.1. */ - #ifndef __CR_FONTS_H__ --# include -+# include - #endif --#include -+#include - - #include "term-ostream.h" - #include "hash.h" -diff --git a/libtextstyle/lib/term-styled-ostream.oo.c b/libtextstyle/lib/term-styled-ostream.oo.c -index 2cfd4a8..d42c8b4 100644 ---- a/libtextstyle/lib/term-styled-ostream.oo.c -+++ b/libtextstyle/lib/term-styled-ostream.oo.c -@@ -22,15 +22,15 @@ - - #include - --#include --#include --#include --#include -+#include -+#include -+#include -+#include - /* has a broken double-inclusion guard in libcroco-0.6.1. */ - #ifndef __CR_FONTS_H__ --# include -+# include - #endif --#include -+#include - - #include "term-ostream.h" - #include "hash.h" diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/0001-init-env.in-do-not-add-C-CXX-parameters.patch b/poky/meta/recipes-core/gettext/gettext-0.21/0001-init-env.in-do-not-add-C-CXX-parameters.patch new file mode 100644 index 000000000..d45b75869 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/0001-init-env.in-do-not-add-C-CXX-parameters.patch @@ -0,0 +1,29 @@ +From 9b912a47f790a7b282ec0c2295a188c5d8fb6a7c Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Fri, 6 Mar 2020 21:04:05 +0000 +Subject: [PATCH] init-env.in: do not add C/CXX parameters + +These are taken from the cross environment and include +sysroot paths, so are not reproducible. + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Alexander Kanavin +--- + gettext-tools/tests/init-env.in | 4 ---- + 1 file changed, 4 deletions(-) + +diff --git a/gettext-tools/tests/init-env.in b/gettext-tools/tests/init-env.in +index cc84ffd..b69c990 100644 +--- a/gettext-tools/tests/init-env.in ++++ b/gettext-tools/tests/init-env.in +@@ -3,10 +3,6 @@ top_builddir=../.. + + OBJEXT="@OBJEXT@" + EXEEXT="@EXEEXT@" +-CC="@CC@" +-CFLAGS="@CFLAGS@" +-CXX="@CXX@" +-CXXFLAGS="@CXXFLAGS@" + CPPFLAGS="@CPPFLAGS@" + LDFLAGS="@LDFLAGS@" + LTLIBINTL="@LTLIBINTL@" diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/0001-tests-autopoint-3-unset-MAKEFLAGS.patch b/poky/meta/recipes-core/gettext/gettext-0.21/0001-tests-autopoint-3-unset-MAKEFLAGS.patch new file mode 100644 index 000000000..b0bc6b97a --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/0001-tests-autopoint-3-unset-MAKEFLAGS.patch @@ -0,0 +1,26 @@ +From 38b256e5aa7dfeb42acffd89565e53a2c0bab3e3 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Tue, 7 Jan 2020 16:44:38 +0100 +Subject: [PATCH] tests/autopoint-3: unset MAKEFLAGS + +This is needed when running ptests, as the MAKEFLAGS value (set up by run-ptest) +is messing up the test. + +Upstream-Status: Inappropriate [oe-core specific] +Signed-off-by: Alexander Kanavin +--- + gettext-tools/tests/autopoint-3 | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/gettext-tools/tests/autopoint-3 b/gettext-tools/tests/autopoint-3 +index e13552b..55188df 100755 +--- a/gettext-tools/tests/autopoint-3 ++++ b/gettext-tools/tests/autopoint-3 +@@ -126,6 +126,7 @@ test $? = 0 || { cat autopoint.err; Exit 1; } + ${CONFIG_SHELL} ./configure >/dev/null 2>autpoint.err + test $? = 0 || { cat autopoint.err; Exit 1; } + ++unset MAKEFLAGS + ${MAKE} >/dev/null 2>autopoint.err + test $? = 0 || { cat autopoint.err; Exit 1; } + diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/mingw.patch b/poky/meta/recipes-core/gettext/gettext-0.21/mingw.patch new file mode 100644 index 000000000..b062c784f --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/mingw.patch @@ -0,0 +1,28 @@ +From 7cf68dffb2adb76375bfb0781e277510523a1f3e Mon Sep 17 00:00:00 2001 +From: Michele Locati +Date: Thu, 30 Jul 2020 18:58:02 +0200 +Subject: [PATCH] build: Fix build failure on Cygwin and mingw. + +* gettext-tools/woe32dll/gettextsrc-exports.c: Export formatstring_ruby. + +Upstream-Status: Backport +Signed-off-by: Alexander Kanavin +--- + gettext-tools/woe32dll/gettextsrc-exports.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/gettext-tools/woe32dll/gettextsrc-exports.c b/gettext-tools/woe32dll/gettextsrc-exports.c +index 4477ae8..6d76089 100644 +--- a/gettext-tools/woe32dll/gettextsrc-exports.c ++++ b/gettext-tools/woe32dll/gettextsrc-exports.c +@@ -50,6 +50,7 @@ VARIABLE(formatstring_python) + VARIABLE(formatstring_python_brace) + VARIABLE(formatstring_qt) + VARIABLE(formatstring_qt_plural) ++VARIABLE(formatstring_ruby) + VARIABLE(formatstring_scheme) + VARIABLE(formatstring_sh) + VARIABLE(formatstring_smalltalk) +-- +1.9.1 + diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/parallel.patch b/poky/meta/recipes-core/gettext/gettext-0.21/parallel.patch new file mode 100644 index 000000000..d96a376b7 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/parallel.patch @@ -0,0 +1,32 @@ +From 4a2a0a93b469093b60ffd0bec55d33d1e03d4713 Mon Sep 17 00:00:00 2001 +From: Joe Slater +Date: Thu, 7 Jun 2012 16:37:01 -0700 +Subject: [PATCH] instal libgettextlib.a before removing it + +In a multiple job build, Makefile can simultaneously +be installing and removing libgettextlib.a. We serialize +the operations. + +Upstream-Status: Pending + +Signed-off-by: Joe Slater + +--- + gettext-tools/gnulib-lib/Makefile.am | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/gettext-tools/gnulib-lib/Makefile.am b/gettext-tools/gnulib-lib/Makefile.am +index 2126699..d2dd7e4 100644 +--- a/gettext-tools/gnulib-lib/Makefile.am ++++ b/gettext-tools/gnulib-lib/Makefile.am +@@ -58,6 +58,10 @@ endif + # Rules generated and collected by gnulib-tool. + include Makefile.gnulib + ++# defined in Makefile.gnulib but missing this dependency ++# ++install-exec-clean: install-libLTLIBRARIES ++ + # OS/2 does not support a DLL name longer than 8 characters. + if OS2 + libgettextlib_la_LDFLAGS += -os2dllname gtlib diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/run-ptest b/poky/meta/recipes-core/gettext/gettext-0.21/run-ptest new file mode 100644 index 000000000..f17f3c87a --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/run-ptest @@ -0,0 +1,6 @@ +#!/bin/sh +# +#This script is used to run gettext test suites +cd tests + +make -k runtest-TESTS top_srcdir=.. srcdir=. abs_srcdir=$PWD top_builddir=$PWD/../ abs_top_srcdir=$PWD/../ | cat diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/serial-tests-config.patch b/poky/meta/recipes-core/gettext/gettext-0.21/serial-tests-config.patch new file mode 100644 index 000000000..93f7c0333 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/serial-tests-config.patch @@ -0,0 +1,56 @@ +From ed64a5724ef7d6eb4e9a876f817ea266a536e195 Mon Sep 17 00:00:00 2001 +From: "Hongjun.Yang" +Date: Thu, 28 Jul 2016 12:36:15 +0800 +Subject: [PATCH] fix for ptest + +Add serial-tests support, ptest need it + +Upstream-Status: Inappropriate [oe specific] + +Signed-off-by: Changqing Li + +--- + configure.ac | 2 +- + gettext-runtime/configure.ac | 2 +- + gettext-tools/configure.ac | 2 +- + 3 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 38db6fd..f019ae0 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -22,7 +22,7 @@ AC_INIT([gettext], + [bug-gettext@gnu.org]) + AC_CONFIG_SRCDIR([gettext-tools/src/msgfmt.c]) + AC_CONFIG_AUX_DIR([build-aux]) +-AM_INIT_AUTOMAKE([1.13 silent-rules parallel-tests tar-ustar]) ++AM_INIT_AUTOMAKE([1.13 silent-rules serial-tests tar-ustar]) + + dnl Override automake's tar command used for creating distributions. + am__tar='${AMTAR} chf - --format=ustar --owner=root --group=root "$$tardir"' +diff --git a/gettext-runtime/configure.ac b/gettext-runtime/configure.ac +index de203e7..138a07f 100644 +--- a/gettext-runtime/configure.ac ++++ b/gettext-runtime/configure.ac +@@ -22,7 +22,7 @@ AC_INIT([gettext-runtime], + [bug-gettext@gnu.org]) + AC_CONFIG_SRCDIR([intl/dcigettext.c]) + AC_CONFIG_AUX_DIR([../build-aux]) +-AM_INIT_AUTOMAKE([1.11.1 silent-rules parallel-tests]) ++AM_INIT_AUTOMAKE([1.11.1 silent-rules serial-tests]) + AC_CONFIG_HEADERS([config.h]) + + dnl Installation directories. +diff --git a/gettext-tools/configure.ac b/gettext-tools/configure.ac +index cf1dd73..b544d6d 100644 +--- a/gettext-tools/configure.ac ++++ b/gettext-tools/configure.ac +@@ -22,7 +22,7 @@ AC_INIT([gettext-tools], + [bug-gettext@gnu.org]) + AC_CONFIG_SRCDIR([src/msgfmt.c]) + AC_CONFIG_AUX_DIR([../build-aux]) +-AM_INIT_AUTOMAKE([1.11.1 silent-rules parallel-tests]) ++AM_INIT_AUTOMAKE([1.11.1 silent-rules serial-tests]) + AC_CONFIG_HEADERS([config.h]) + + dnl Installation directories. diff --git a/poky/meta/recipes-core/gettext/gettext-0.21/use-pkgconfig.patch b/poky/meta/recipes-core/gettext/gettext-0.21/use-pkgconfig.patch new file mode 100644 index 000000000..fc77feb27 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-0.21/use-pkgconfig.patch @@ -0,0 +1,699 @@ +From 15647f679834be633fb4a9aeff4671b9cb95ccb8 Mon Sep 17 00:00:00 2001 +From: Ross Burton +Date: Tue, 23 Jan 2018 00:54:13 +0000 +Subject: [PATCH] gettext: beat library detection into shape + +For reasons which I just can't fathom gnulib doesn't use the expected tools to +find libraries but badly reinvents the wheel. This will trivially lead to host +contamination (explicit searches of /usr/lib) or incorrect RPATHs (bad +canonicalisation resulting in relative paths). + +Simply delete all the crazy, and replace with a single call to pkg-config. + +Upstream-Status: Inappropriate [upstream still refuse to consider pkg-config] +Signed-off-by: Ross Burton + +--- + gettext-tools/gnulib-m4/libxml.m4 | 99 +---------------- + .../gnulib-local/lib/term-styled-ostream.oo.c | 12 +-- + libtextstyle/gnulib-local/m4/libcroco.m4 | 99 +++-------------- + libtextstyle/gnulib-local/m4/libglib.m4 | 100 +++--------------- + libtextstyle/gnulib-m4/libcroco.m4 | 99 +++-------------- + libtextstyle/gnulib-m4/libglib.m4 | 100 +++--------------- + libtextstyle/lib/term-styled-ostream.c | 12 +-- + libtextstyle/lib/term-styled-ostream.oo.c | 12 +-- + 8 files changed, 83 insertions(+), 450 deletions(-) + +diff --git a/gettext-tools/gnulib-m4/libxml.m4 b/gettext-tools/gnulib-m4/libxml.m4 +index 2f80c37..30ce58e 100644 +--- a/gettext-tools/gnulib-m4/libxml.m4 ++++ b/gettext-tools/gnulib-m4/libxml.m4 +@@ -13,6 +13,7 @@ dnl gl_LIBXML(FORCE-INCLUDED) + dnl forces the use of the included or an external libxml. + AC_DEFUN([gl_LIBXML], + [ ++ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) + AC_REQUIRE([AM_ICONV_LINK]) + + ifelse([$1], , [ +@@ -30,100 +31,10 @@ AC_DEFUN([gl_LIBXML], + INCXML= + ifelse([$1], [yes], , [ + if test "$gl_cv_libxml_use_included" != yes; then +- dnl Figure out whether we can use a preinstalled libxml2, or have to use +- dnl the included one. +- AC_CACHE_VAL([gl_cv_libxml], [ +- gl_cv_libxml=no +- gl_cv_LIBXML= +- gl_cv_LTLIBXML= +- gl_cv_INCXML= +- gl_save_LIBS="$LIBS" +- LIBS="$LIBS $LIBICONV" +- dnl Search for libxml2 and define LIBXML2, LTLIBXML2 and INCXML2 +- dnl accordingly. +- dnl Don't use xml2-config nor pkg-config, since it doesn't work when +- dnl cross-compiling or when the C compiler in use is different from the +- dnl one that built the library. +- dnl Use a test program that tries to invoke xmlFree. On Cygwin 1.7.x, +- dnl libxml2 is built in such a way that uses of xmlFree work fine with +- dnl -Wl,--enable-auto-import but lead to a link error with +- dnl -Wl,--disable-auto-import. +- AC_LIB_LINKFLAGS_BODY([xml2]) +- LIBS="$gl_save_LIBS $LIBXML2 $LIBICONV" +- AC_TRY_LINK([#include +- #include +- #include +- ], +- [xmlCheckVersion (0); +- xmlFree ((void *) 0); +- xmlXPathSetContextNode ((void *)0, (void *)0); +- ], +- [gl_cv_libxml=yes +- gl_cv_LIBXML="$LIBXML2 $LIBICONV" +- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" +- ]) +- if test "$gl_cv_libxml" != yes; then +- gl_save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $INCXML2" +- AC_TRY_LINK([#include +- #include +- #include +- ], +- [xmlCheckVersion (0); +- xmlFree ((void *) 0); +- xmlXPathSetContextNode ((void *)0, (void *)0); +- ], +- [gl_cv_libxml=yes +- gl_cv_LIBXML="$LIBXML2 $LIBICONV" +- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" +- gl_cv_INCXML="$INCXML2" +- ]) +- if test "$gl_cv_libxml" != yes; then +- dnl Often the include files are installed in /usr/include/libxml2. +- dnl In libxml2-2.5, is self-contained. +- dnl In libxml2-2.6, it includes which is +- dnl self-contained. +- libxml2_include_dir= +- AC_TRY_CPP([#include ], +- [gl_ABSOLUTE_HEADER([libxml2/libxml/xmlexports.h]) +- libxml2_include_dir=`echo "$gl_cv_absolute_libxml2_libxml_xmlexports_h" | sed -e 's,.libxml.xmlexports\.h$,,'` +- ]) +- if test -z "$libxml2_include_dir"; then +- AC_TRY_CPP([#include ], +- [gl_ABSOLUTE_HEADER([libxml2/libxml/xmlversion.h]) +- libxml2_include_dir=`echo "$gl_cv_absolute_libxml2_libxml_xmlversion_h" | sed -e 's,.libxml.xmlversion\.h$,,'` +- ]) +- fi +- if test -n "$libxml2_include_dir" && test -d "$libxml2_include_dir"; then +- CPPFLAGS="$gl_save_CPPFLAGS -I$libxml2_include_dir" +- AC_TRY_LINK([#include +- #include +- #include +- ], +- [xmlCheckVersion (0); +- xmlFree ((void *) 0); +- xmlXPathSetContextNode ((void *)0, (void *)0); +- ], +- [gl_cv_libxml=yes +- gl_cv_LIBXML="$LIBXML2 $LIBICONV" +- gl_cv_LTLIBXML="$LTLIBXML2 $LTLIBICONV" +- gl_cv_INCXML="-I$libxml2_include_dir" +- ]) +- fi +- fi +- CPPFLAGS="$gl_save_CPPFLAGS" +- fi +- LIBS="$gl_save_LIBS" +- ]) +- AC_MSG_CHECKING([for libxml]) +- AC_MSG_RESULT([$gl_cv_libxml]) +- if test $gl_cv_libxml = yes; then +- LIBXML="$gl_cv_LIBXML" +- LTLIBXML="$gl_cv_LTLIBXML" +- INCXML="$gl_cv_INCXML" +- else +- gl_cv_libxml_use_included=yes +- fi ++ PKG_CHECK_MODULES([XML], [libxml-2.0]) ++ LIBXML=$XML_LIBS ++ LTLIBXML=$XML_LIBS ++ INCXML=$XML_CFLAGS + fi + ]) + AC_SUBST([LIBXML]) +diff --git a/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c b/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c +index 2ff978f..5ffb17a 100644 +--- a/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c ++++ b/libtextstyle/gnulib-local/lib/term-styled-ostream.oo.c +@@ -22,15 +22,15 @@ + + #include + +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++#include + /* has a broken double-inclusion guard in libcroco-0.6.1. */ + #ifndef __CR_FONTS_H__ +-# include ++# include + #endif +-#include ++#include + + #include "term-ostream.h" + #include "mem-hash-map.h" +diff --git a/libtextstyle/gnulib-local/m4/libcroco.m4 b/libtextstyle/gnulib-local/m4/libcroco.m4 +index bc53cc6..10b2455 100644 +--- a/libtextstyle/gnulib-local/m4/libcroco.m4 ++++ b/libtextstyle/gnulib-local/m4/libcroco.m4 +@@ -1,99 +1,34 @@ +-# libcroco.m4 serial 3 +-dnl Copyright (C) 2006-2007, 2019 Free Software Foundation, Inc. ++# libcroco.m4 serial 2 (gettext-0.17) ++dnl Copyright (C) 2006, 2015-2016 Free Software Foundation, Inc. + dnl This file is free software; the Free Software Foundation + dnl gives unlimited permission to copy and/or distribute it, + dnl with or without modifications, as long as this notice is preserved. + + dnl From Bruno Haible. + +-dnl gl_LIBCROCO +-dnl gives the user the option to decide whether to use the included or +-dnl an external libcroco. +-dnl gl_LIBCROCO(FORCE-INCLUDED) +-dnl forces the use of the included or an external libcroco. + AC_DEFUN([gl_LIBCROCO], + [ +- ifelse([$1], [yes], , [ +- dnl libcroco depends on libglib. +- AC_REQUIRE([gl_LIBGLIB]) +- ]) ++ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) ++ dnl libcroco depends on libglib. ++ AC_REQUIRE([gl_LIBGLIB]) + +- ifelse([$1], , [ +- AC_MSG_CHECKING([whether included libcroco is requested]) +- AC_ARG_WITH([included-libcroco], +- [ --with-included-libcroco use the libcroco included here], +- [gl_cv_libcroco_force_included=$withval], +- [gl_cv_libcroco_force_included=no]) +- AC_MSG_RESULT([$gl_cv_libcroco_force_included]) +- ], [gl_cv_libcroco_force_included=$1]) ++ AC_MSG_CHECKING([whether included libcroco is requested]) ++ AC_ARG_WITH([included-libcroco], ++ [ --with-included-libcroco use the libcroco included here], ++ [gl_cv_libcroco_force_included=$withval], ++ [gl_cv_libcroco_force_included=no]) ++ AC_MSG_RESULT([$gl_cv_libcroco_force_included]) + + gl_cv_libcroco_use_included="$gl_cv_libcroco_force_included" + LIBCROCO= + LTLIBCROCO= + INCCROCO= +- ifelse([$1], [yes], , [ +- if test "$gl_cv_libcroco_use_included" != yes; then +- dnl Figure out whether we can use a preinstalled libcroco-0.6, or have to +- dnl use the included one. +- AC_CACHE_VAL([gl_cv_libcroco], [ +- gl_cv_libcroco=no +- gl_cv_LIBCROCO= +- gl_cv_LTLIBCROCO= +- gl_cv_INCCROCO= +- gl_save_LIBS="$LIBS" +- dnl Search for libcroco and define LIBCROCO_0_6, LTLIBCROCO_0_6 and +- dnl INCCROCO_0_6 accordingly. +- dnl Don't use croco-0.6-config nor pkg-config, since it doesn't work when +- dnl cross-compiling or when the C compiler in use is different from the +- dnl one that built the library. +- AC_LIB_LINKFLAGS_BODY([croco-0.6], [glib-2.0]) +- LIBS="$gl_save_LIBS $LIBCROCO_0_6" +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- ]) +- if test "$gl_cv_libcroco" != yes; then +- gl_save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $INCCROCO_0_6" +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- gl_cv_INCCROCO="$INCCROCO_0_6" +- ]) +- if test "$gl_cv_libcroco" != yes; then +- dnl Often the include files are installed in +- dnl /usr/include/libcroco-0.6/libcroco. +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_ABSOLUTE_HEADER([libcroco-0.6/libcroco/libcroco-config.h]) +- libcroco_include_dir=`echo "$gl_cv_absolute_libcroco_0_6_libcroco_libcroco_config_h" | sed -e 's,.libcroco-config\.h$,,'` +- if test -d "$libcroco_include_dir"; then +- gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- gl_cv_INCCROCO="-I$libcroco_include_dir" +- fi +- ]) +- fi +- CPPFLAGS="$gl_save_CPPFLAGS" +- fi +- LIBS="$gl_save_LIBS" +- ]) +- AC_MSG_CHECKING([for libcroco]) +- AC_MSG_RESULT([$gl_cv_libcroco]) +- if test $gl_cv_libcroco = yes; then +- LIBCROCO="$gl_cv_LIBCROCO" +- LTLIBCROCO="$gl_cv_LTLIBCROCO" +- INCCROCO="$gl_cv_INCCROCO" +- else +- gl_cv_libcroco_use_included=yes +- fi +- fi +- ]) ++ if test "$gl_cv_libcroco_use_included" != yes; then ++ PKG_CHECK_MODULES([CROCO], [libcroco-0.6]) ++ LIBCROCO=$CROCO_LIBS ++ LTLIBCROCO=$CROCO_LIBS ++ INCCROCO=$CROCO_CFLAGS ++ fi + AC_SUBST([LIBCROCO]) + AC_SUBST([LTLIBCROCO]) + AC_SUBST([INCCROCO]) +diff --git a/libtextstyle/gnulib-local/m4/libglib.m4 b/libtextstyle/gnulib-local/m4/libglib.m4 +index 5853772..767fba2 100644 +--- a/libtextstyle/gnulib-local/m4/libglib.m4 ++++ b/libtextstyle/gnulib-local/m4/libglib.m4 +@@ -6,100 +6,26 @@ dnl with or without modifications, as long as this notice is preserved. + + dnl From Bruno Haible. + +-dnl gl_LIBGLIB +-dnl gives the user the option to decide whether to use the included or +-dnl an external libglib. +-dnl gl_LIBGLIB(FORCE-INCLUDED) +-dnl forces the use of the included or an external libglib. + AC_DEFUN([gl_LIBGLIB], + [ +- ifelse([$1], , [ +- AC_MSG_CHECKING([whether included glib is requested]) +- AC_ARG_WITH([included-glib], +- [ --with-included-glib use the glib2 included here], +- [gl_cv_libglib_force_included=$withval], +- [gl_cv_libglib_force_included=no]) +- AC_MSG_RESULT([$gl_cv_libglib_force_included]) +- ], [gl_cv_libglib_force_included=$1]) ++ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) ++ AC_MSG_CHECKING([whether included glib is requested]) ++ AC_ARG_WITH([included-glib], ++ [ --with-included-glib use the glib2 included here], ++ [gl_cv_libglib_force_included=$withval], ++ [gl_cv_libglib_force_included=no]) ++ AC_MSG_RESULT([$gl_cv_libglib_force_included]) + + gl_cv_libglib_use_included="$gl_cv_libglib_force_included" + LIBGLIB= + LTLIBGLIB= + INCGLIB= +- ifelse([$1], [yes], , [ +- if test "$gl_cv_libglib_use_included" != yes; then +- dnl Figure out whether we can use a preinstalled libglib-2.0, or have to use +- dnl the included one. +- AC_CACHE_VAL([gl_cv_libglib], [ +- gl_cv_libglib=no +- gl_cv_LIBGLIB= +- gl_cv_LTLIBGLIB= +- gl_cv_INCGLIB= +- gl_save_LIBS="$LIBS" +- dnl Search for libglib2 and define LIBGLIB_2_0, LTLIBGLIB_2_0 and +- dnl INCGLIB_2_0 accordingly. +- dnl Don't use glib-config nor pkg-config, since it doesn't work when +- dnl cross-compiling or when the C compiler in use is different from the +- dnl one that built the library. +- AC_LIB_LINKFLAGS_BODY([glib-2.0]) +- LIBS="$gl_save_LIBS $LIBGLIB_2_0" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- ]) +- if test "$gl_cv_libglib" != yes; then +- gl_save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $INCGLIB_2_0" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- gl_cv_INCGLIB="$INCGLIB_2_0" +- ]) +- if test "$gl_cv_libglib" != yes; then +- dnl Often the include files are installed in /usr/include/glib-2.0 +- dnl and /usr/lib/glib-2.0/include. +- if test -n "$LIBGLIB_2_0_PREFIX"; then +- CPPFLAGS="$gl_save_CPPFLAGS -I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- gl_cv_INCGLIB="-I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" +- ]) +- fi +- fi +- CPPFLAGS="$gl_save_CPPFLAGS" +- fi +- LIBS="$gl_save_LIBS" +- ]) +- AC_MSG_CHECKING([for glib]) +- AC_MSG_RESULT([$gl_cv_libglib]) +- if test $gl_cv_libglib = yes; then +- LIBGLIB="$gl_cv_LIBGLIB" +- LTLIBGLIB="$gl_cv_LTLIBGLIB" +- INCGLIB="$gl_cv_INCGLIB" +- else +- gl_cv_libglib_use_included=yes +- fi +- fi +- ]) ++ if test "$gl_cv_libglib_use_included" != yes; then ++ PKG_CHECK_MODULES([GLIB], [glib-2.0]) ++ LIBGLIB="$GLIB_LIBS" ++ LTLIBGLIB="$GLIB_LIBS" ++ INCGLIB="$GLIB_CFLAGS" ++ fi + AC_SUBST([LIBGLIB]) + AC_SUBST([LTLIBGLIB]) + AC_SUBST([INCGLIB]) +diff --git a/libtextstyle/gnulib-m4/libcroco.m4 b/libtextstyle/gnulib-m4/libcroco.m4 +index bc53cc6..10b2455 100644 +--- a/libtextstyle/gnulib-m4/libcroco.m4 ++++ b/libtextstyle/gnulib-m4/libcroco.m4 +@@ -1,99 +1,34 @@ +-# libcroco.m4 serial 3 +-dnl Copyright (C) 2006-2007, 2019 Free Software Foundation, Inc. ++# libcroco.m4 serial 2 (gettext-0.17) ++dnl Copyright (C) 2006, 2015-2016 Free Software Foundation, Inc. + dnl This file is free software; the Free Software Foundation + dnl gives unlimited permission to copy and/or distribute it, + dnl with or without modifications, as long as this notice is preserved. + + dnl From Bruno Haible. + +-dnl gl_LIBCROCO +-dnl gives the user the option to decide whether to use the included or +-dnl an external libcroco. +-dnl gl_LIBCROCO(FORCE-INCLUDED) +-dnl forces the use of the included or an external libcroco. + AC_DEFUN([gl_LIBCROCO], + [ +- ifelse([$1], [yes], , [ +- dnl libcroco depends on libglib. +- AC_REQUIRE([gl_LIBGLIB]) +- ]) ++ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) ++ dnl libcroco depends on libglib. ++ AC_REQUIRE([gl_LIBGLIB]) + +- ifelse([$1], , [ +- AC_MSG_CHECKING([whether included libcroco is requested]) +- AC_ARG_WITH([included-libcroco], +- [ --with-included-libcroco use the libcroco included here], +- [gl_cv_libcroco_force_included=$withval], +- [gl_cv_libcroco_force_included=no]) +- AC_MSG_RESULT([$gl_cv_libcroco_force_included]) +- ], [gl_cv_libcroco_force_included=$1]) ++ AC_MSG_CHECKING([whether included libcroco is requested]) ++ AC_ARG_WITH([included-libcroco], ++ [ --with-included-libcroco use the libcroco included here], ++ [gl_cv_libcroco_force_included=$withval], ++ [gl_cv_libcroco_force_included=no]) ++ AC_MSG_RESULT([$gl_cv_libcroco_force_included]) + + gl_cv_libcroco_use_included="$gl_cv_libcroco_force_included" + LIBCROCO= + LTLIBCROCO= + INCCROCO= +- ifelse([$1], [yes], , [ +- if test "$gl_cv_libcroco_use_included" != yes; then +- dnl Figure out whether we can use a preinstalled libcroco-0.6, or have to +- dnl use the included one. +- AC_CACHE_VAL([gl_cv_libcroco], [ +- gl_cv_libcroco=no +- gl_cv_LIBCROCO= +- gl_cv_LTLIBCROCO= +- gl_cv_INCCROCO= +- gl_save_LIBS="$LIBS" +- dnl Search for libcroco and define LIBCROCO_0_6, LTLIBCROCO_0_6 and +- dnl INCCROCO_0_6 accordingly. +- dnl Don't use croco-0.6-config nor pkg-config, since it doesn't work when +- dnl cross-compiling or when the C compiler in use is different from the +- dnl one that built the library. +- AC_LIB_LINKFLAGS_BODY([croco-0.6], [glib-2.0]) +- LIBS="$gl_save_LIBS $LIBCROCO_0_6" +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- ]) +- if test "$gl_cv_libcroco" != yes; then +- gl_save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $INCCROCO_0_6" +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- gl_cv_INCCROCO="$INCCROCO_0_6" +- ]) +- if test "$gl_cv_libcroco" != yes; then +- dnl Often the include files are installed in +- dnl /usr/include/libcroco-0.6/libcroco. +- AC_TRY_LINK([#include ], +- [const char *version = LIBCROCO_VERSION; return !version;], +- [gl_ABSOLUTE_HEADER([libcroco-0.6/libcroco/libcroco-config.h]) +- libcroco_include_dir=`echo "$gl_cv_absolute_libcroco_0_6_libcroco_libcroco_config_h" | sed -e 's,.libcroco-config\.h$,,'` +- if test -d "$libcroco_include_dir"; then +- gl_cv_libcroco=yes +- gl_cv_LIBCROCO="$LIBCROCO_0_6" +- gl_cv_LTLIBCROCO="$LTLIBCROCO_0_6" +- gl_cv_INCCROCO="-I$libcroco_include_dir" +- fi +- ]) +- fi +- CPPFLAGS="$gl_save_CPPFLAGS" +- fi +- LIBS="$gl_save_LIBS" +- ]) +- AC_MSG_CHECKING([for libcroco]) +- AC_MSG_RESULT([$gl_cv_libcroco]) +- if test $gl_cv_libcroco = yes; then +- LIBCROCO="$gl_cv_LIBCROCO" +- LTLIBCROCO="$gl_cv_LTLIBCROCO" +- INCCROCO="$gl_cv_INCCROCO" +- else +- gl_cv_libcroco_use_included=yes +- fi +- fi +- ]) ++ if test "$gl_cv_libcroco_use_included" != yes; then ++ PKG_CHECK_MODULES([CROCO], [libcroco-0.6]) ++ LIBCROCO=$CROCO_LIBS ++ LTLIBCROCO=$CROCO_LIBS ++ INCCROCO=$CROCO_CFLAGS ++ fi + AC_SUBST([LIBCROCO]) + AC_SUBST([LTLIBCROCO]) + AC_SUBST([INCCROCO]) +diff --git a/libtextstyle/gnulib-m4/libglib.m4 b/libtextstyle/gnulib-m4/libglib.m4 +index 5853772..767fba2 100644 +--- a/libtextstyle/gnulib-m4/libglib.m4 ++++ b/libtextstyle/gnulib-m4/libglib.m4 +@@ -6,100 +6,26 @@ dnl with or without modifications, as long as this notice is preserved. + + dnl From Bruno Haible. + +-dnl gl_LIBGLIB +-dnl gives the user the option to decide whether to use the included or +-dnl an external libglib. +-dnl gl_LIBGLIB(FORCE-INCLUDED) +-dnl forces the use of the included or an external libglib. + AC_DEFUN([gl_LIBGLIB], + [ +- ifelse([$1], , [ +- AC_MSG_CHECKING([whether included glib is requested]) +- AC_ARG_WITH([included-glib], +- [ --with-included-glib use the glib2 included here], +- [gl_cv_libglib_force_included=$withval], +- [gl_cv_libglib_force_included=no]) +- AC_MSG_RESULT([$gl_cv_libglib_force_included]) +- ], [gl_cv_libglib_force_included=$1]) ++ AC_REQUIRE([PKG_PROG_PKG_CONFIG]) ++ AC_MSG_CHECKING([whether included glib is requested]) ++ AC_ARG_WITH([included-glib], ++ [ --with-included-glib use the glib2 included here], ++ [gl_cv_libglib_force_included=$withval], ++ [gl_cv_libglib_force_included=no]) ++ AC_MSG_RESULT([$gl_cv_libglib_force_included]) + + gl_cv_libglib_use_included="$gl_cv_libglib_force_included" + LIBGLIB= + LTLIBGLIB= + INCGLIB= +- ifelse([$1], [yes], , [ +- if test "$gl_cv_libglib_use_included" != yes; then +- dnl Figure out whether we can use a preinstalled libglib-2.0, or have to use +- dnl the included one. +- AC_CACHE_VAL([gl_cv_libglib], [ +- gl_cv_libglib=no +- gl_cv_LIBGLIB= +- gl_cv_LTLIBGLIB= +- gl_cv_INCGLIB= +- gl_save_LIBS="$LIBS" +- dnl Search for libglib2 and define LIBGLIB_2_0, LTLIBGLIB_2_0 and +- dnl INCGLIB_2_0 accordingly. +- dnl Don't use glib-config nor pkg-config, since it doesn't work when +- dnl cross-compiling or when the C compiler in use is different from the +- dnl one that built the library. +- AC_LIB_LINKFLAGS_BODY([glib-2.0]) +- LIBS="$gl_save_LIBS $LIBGLIB_2_0" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- ]) +- if test "$gl_cv_libglib" != yes; then +- gl_save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $INCGLIB_2_0" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- gl_cv_INCGLIB="$INCGLIB_2_0" +- ]) +- if test "$gl_cv_libglib" != yes; then +- dnl Often the include files are installed in /usr/include/glib-2.0 +- dnl and /usr/lib/glib-2.0/include. +- if test -n "$LIBGLIB_2_0_PREFIX"; then +- CPPFLAGS="$gl_save_CPPFLAGS -I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" +- AC_TRY_LINK([#include +-#ifndef G_BEGIN_DECLS +-error this glib.h includes a glibconfig.h from a glib version 1.x +-#endif +-], +- [g_string_new ("foo");], +- [gl_cv_libglib=yes +- gl_cv_LIBGLIB="$LIBGLIB_2_0" +- gl_cv_LTLIBGLIB="$LTLIBGLIB_2_0" +- gl_cv_INCGLIB="-I$LIBGLIB_2_0_PREFIX/include/glib-2.0 -I$LIBGLIB_2_0_PREFIX/$acl_libdirstem/glib-2.0/include" +- ]) +- fi +- fi +- CPPFLAGS="$gl_save_CPPFLAGS" +- fi +- LIBS="$gl_save_LIBS" +- ]) +- AC_MSG_CHECKING([for glib]) +- AC_MSG_RESULT([$gl_cv_libglib]) +- if test $gl_cv_libglib = yes; then +- LIBGLIB="$gl_cv_LIBGLIB" +- LTLIBGLIB="$gl_cv_LTLIBGLIB" +- INCGLIB="$gl_cv_INCGLIB" +- else +- gl_cv_libglib_use_included=yes +- fi +- fi +- ]) ++ if test "$gl_cv_libglib_use_included" != yes; then ++ PKG_CHECK_MODULES([GLIB], [glib-2.0]) ++ LIBGLIB="$GLIB_LIBS" ++ LTLIBGLIB="$GLIB_LIBS" ++ INCGLIB="$GLIB_CFLAGS" ++ fi + AC_SUBST([LIBGLIB]) + AC_SUBST([LTLIBGLIB]) + AC_SUBST([INCGLIB]) +diff --git a/libtextstyle/lib/term-styled-ostream.c b/libtextstyle/lib/term-styled-ostream.c +index 5484800..16793fa 100644 +--- a/libtextstyle/lib/term-styled-ostream.c ++++ b/libtextstyle/lib/term-styled-ostream.c +@@ -28,15 +28,15 @@ + + #include + +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++#include + /* has a broken double-inclusion guard in libcroco-0.6.1. */ + #ifndef __CR_FONTS_H__ +-# include ++# include + #endif +-#include ++#include + + #include "term-ostream.h" + #include "mem-hash-map.h" +diff --git a/libtextstyle/lib/term-styled-ostream.oo.c b/libtextstyle/lib/term-styled-ostream.oo.c +index 2ff978f..5ffb17a 100644 +--- a/libtextstyle/lib/term-styled-ostream.oo.c ++++ b/libtextstyle/lib/term-styled-ostream.oo.c +@@ -22,15 +22,15 @@ + + #include + +-#include +-#include +-#include +-#include ++#include ++#include ++#include ++#include + /* has a broken double-inclusion guard in libcroco-0.6.1. */ + #ifndef __CR_FONTS_H__ +-# include ++# include + #endif +-#include ++#include + + #include "term-ostream.h" + #include "mem-hash-map.h" diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/COPYING b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/COPYING deleted file mode 100644 index 3671ab698..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/COPYING +++ /dev/null @@ -1,4 +0,0 @@ -dnl Copyright (C) 1995-2016 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/Makefile.in.in b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/Makefile.in.in deleted file mode 100644 index de980e040..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/Makefile.in.in +++ /dev/null @@ -1,505 +0,0 @@ -# Makefile for PO directory in any package using GNU gettext. -# Copyright (C) 1995-2000 Ulrich Drepper -# Copyright (C) 2000-2020 Free Software Foundation, Inc. -# -# Copying and distribution of this file, with or without modification, -# are permitted in any medium without royalty provided the copyright -# notice and this notice are preserved. This file is offered as-is, -# without any warranty. -# -# Origin: gettext-0.20.2 -GETTEXT_MACRO_VERSION = 0.20 - -PACKAGE = @PACKAGE@ -VERSION = @VERSION@ -PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ - -SED = @SED@ -SHELL = /bin/sh -@SET_MAKE@ - -srcdir = @srcdir@ -top_srcdir = @top_srcdir@ -VPATH = @srcdir@ - -prefix = @prefix@ -exec_prefix = @exec_prefix@ -datarootdir = @datarootdir@ -datadir = @datadir@ -localedir = @localedir@ -gettextsrcdir = $(datadir)/gettext/po - -INSTALL = @INSTALL@ -INSTALL_DATA = @INSTALL_DATA@ - -# We use $(mkdir_p). -# In automake <= 1.9.x, $(mkdir_p) is defined either as "mkdir -p --" or as -# "$(mkinstalldirs)" or as "$(install_sh) -d". For these automake versions, -# @install_sh@ does not start with $(SHELL), so we add it. -# In automake >= 1.10, @mkdir_p@ is derived from ${MKDIR_P}, which is defined -# either as "/path/to/mkdir -p" or ".../install-sh -c -d". For these automake -# versions, $(mkinstalldirs) and $(install_sh) are unused. -mkinstalldirs = $(SHELL) @install_sh@ -d -install_sh = $(SHELL) @install_sh@ -MKDIR_P = @MKDIR_P@ -mkdir_p = @mkdir_p@ - -# When building gettext-tools, we prefer to use the built programs -# rather than installed programs. However, we can't do that when we -# are cross compiling. -CROSS_COMPILING = @CROSS_COMPILING@ - -GMSGFMT_ = @GMSGFMT@ -GMSGFMT_no = @GMSGFMT@ -GMSGFMT_yes = @GMSGFMT_015@ -GMSGFMT = $(GMSGFMT_$(USE_MSGCTXT)) -XGETTEXT_ = @XGETTEXT@ -XGETTEXT_no = @XGETTEXT@ -XGETTEXT_yes = @XGETTEXT_015@ -XGETTEXT = $(XGETTEXT_$(USE_MSGCTXT)) -MSGMERGE = @MSGMERGE@ -MSGMERGE_UPDATE = @MSGMERGE@ --update -MSGMERGE_FOR_MSGFMT_OPTION = @MSGMERGE_FOR_MSGFMT_OPTION@ -MSGINIT = msginit -MSGCONV = msgconv -MSGFILTER = msgfilter - -POFILES = @POFILES@ -GMOFILES = @GMOFILES@ -UPDATEPOFILES = @UPDATEPOFILES@ -DUMMYPOFILES = @DUMMYPOFILES@ -DISTFILES.common = Makefile.in.in remove-potcdate.sin \ -$(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3) -DISTFILES = $(DISTFILES.common) Makevars POTFILES.in \ -$(POFILES) $(GMOFILES) \ -$(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3) - -POTFILES = \ - -CATALOGS = @CATALOGS@ - -POFILESDEPS_ = $(srcdir)/$(DOMAIN).pot -POFILESDEPS_yes = $(POFILESDEPS_) -POFILESDEPS_no = -POFILESDEPS = $(POFILESDEPS_$(PO_DEPENDS_ON_POT)) - -DISTFILESDEPS_ = update-po -DISTFILESDEPS_yes = $(DISTFILESDEPS_) -DISTFILESDEPS_no = -DISTFILESDEPS = $(DISTFILESDEPS_$(DIST_DEPENDS_ON_UPDATE_PO)) - -# Makevars gets inserted here. (Don't remove this line!) - -all: all-@USE_NLS@ - - -.SUFFIXES: -.SUFFIXES: .po .gmo .sed .sin .nop .po-create .po-update - -# The .pot file, stamp-po, .po files, and .gmo files appear in release tarballs. -# The GNU Coding Standards say in -# : -# "GNU distributions usually contain some files which are not source files -# ... . Since these files normally appear in the source directory, they -# should always appear in the source directory, not in the build directory. -# So Makefile rules to update them should put the updated files in the -# source directory." -# Therefore we put these files in the source directory, not the build directory. - -# During .po -> .gmo conversion, take into account the most recent changes to -# the .pot file. This eliminates the need to update the .po files when the -# .pot file has changed, which would be troublesome if the .po files are put -# under version control. -$(GMOFILES): $(srcdir)/$(DOMAIN).pot -.po.gmo: - @lang=`echo $* | sed -e 's,.*/,,'`; \ - test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ - echo "$${cdcmd}rm -f $${lang}.gmo && $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) -o $${lang}.1po $${lang}.po $(DOMAIN).pot && $(GMSGFMT) -c --statistics --verbose -o $${lang}.gmo $${lang}.1po && rm -f $${lang}.1po"; \ - cd $(srcdir) && \ - rm -f $${lang}.gmo && \ - $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) -o $${lang}.1po $${lang}.po $(DOMAIN).pot && \ - $(GMSGFMT) -c --statistics --verbose -o t-$${lang}.gmo $${lang}.1po && \ - mv t-$${lang}.gmo $${lang}.gmo && \ - rm -f $${lang}.1po - -.sin.sed: - sed -e '/^#/d' $< > t-$@ - mv t-$@ $@ - - -all-yes: $(srcdir)/stamp-po -all-no: - -# Ensure that the gettext macros and this Makefile.in.in are in sync. -CHECK_MACRO_VERSION = \ - test "$(GETTEXT_MACRO_VERSION)" = "@GETTEXT_MACRO_VERSION@" \ - || { echo "*** error: gettext infrastructure mismatch: using a Makefile.in.in from gettext version $(GETTEXT_MACRO_VERSION) but the autoconf macros are from gettext version @GETTEXT_MACRO_VERSION@" 1>&2; \ - exit 1; \ - } - -# $(srcdir)/$(DOMAIN).pot is only created when needed. When xgettext finds no -# internationalized messages, no $(srcdir)/$(DOMAIN).pot is created (because -# we don't want to bother translators with empty POT files). We assume that -# LINGUAS is empty in this case, i.e. $(POFILES) and $(GMOFILES) are empty. -# In this case, $(srcdir)/stamp-po is a nop (i.e. a phony target). - -# $(srcdir)/stamp-po is a timestamp denoting the last time at which the CATALOGS -# have been loosely updated. Its purpose is that when a developer or translator -# checks out the package from a version control system, and the $(DOMAIN).pot -# file is not under version control, "make" will update the $(DOMAIN).pot and -# the $(CATALOGS), but subsequent invocations of "make" will do nothing. This -# timestamp would not be necessary if updating the $(CATALOGS) would always -# touch them; however, the rule for $(POFILES) has been designed to not touch -# files that don't need to be changed. -$(srcdir)/stamp-po: $(srcdir)/$(DOMAIN).pot - @$(CHECK_MACRO_VERSION) - test ! -f $(srcdir)/$(DOMAIN).pot || \ - test -z "$(GMOFILES)" || $(MAKE) $(GMOFILES) - @test ! -f $(srcdir)/$(DOMAIN).pot || { \ - echo "touch $(srcdir)/stamp-po" && \ - echo timestamp > $(srcdir)/stamp-poT && \ - mv $(srcdir)/stamp-poT $(srcdir)/stamp-po; \ - } - -# Note: Target 'all' must not depend on target '$(DOMAIN).pot-update', -# otherwise packages like GCC can not be built if only parts of the source -# have been downloaded. - -# This target rebuilds $(DOMAIN).pot; it is an expensive operation. -# Note that $(DOMAIN).pot is not touched if it doesn't need to be changed. -# The determination of whether the package xyz is a GNU one is based on the -# heuristic whether some file in the top level directory mentions "GNU xyz". -# If GNU 'find' is available, we avoid grepping through monster files. -$(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed - package_gnu="$(PACKAGE_GNU)"; \ - test -n "$$package_gnu" || { \ - if { if (LC_ALL=C find --version) 2>/dev/null | grep GNU >/dev/null; then \ - LC_ALL=C find -L $(top_srcdir) -maxdepth 1 -type f -size -10000000c -exec grep -i 'GNU @PACKAGE@' /dev/null '{}' ';' 2>/dev/null; \ - else \ - LC_ALL=C grep -i 'GNU @PACKAGE@' $(top_srcdir)/* 2>/dev/null; \ - fi; \ - } | grep -v 'libtool:' >/dev/null; then \ - package_gnu=yes; \ - else \ - package_gnu=no; \ - fi; \ - }; \ - if test "$$package_gnu" = "yes"; then \ - package_prefix='GNU '; \ - else \ - package_prefix=''; \ - fi; \ - if test -n '$(MSGID_BUGS_ADDRESS)' || test '$(PACKAGE_BUGREPORT)' = '@'PACKAGE_BUGREPORT'@'; then \ - msgid_bugs_address='$(MSGID_BUGS_ADDRESS)'; \ - else \ - msgid_bugs_address='$(PACKAGE_BUGREPORT)'; \ - fi; \ - case `$(XGETTEXT) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ - '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].* | 0.16 | 0.16.[0-1]*) \ - $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ - --add-comments=TRANSLATORS: \ - --files-from=$(srcdir)/POTFILES.in \ - --copyright-holder='$(COPYRIGHT_HOLDER)' \ - --msgid-bugs-address="$$msgid_bugs_address" \ - $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ - ;; \ - *) \ - $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ - --add-comments=TRANSLATORS: \ - --files-from=$(srcdir)/POTFILES.in \ - --copyright-holder='$(COPYRIGHT_HOLDER)' \ - --package-name="$${package_prefix}@PACKAGE@" \ - --package-version='@VERSION@' \ - --msgid-bugs-address="$$msgid_bugs_address" \ - $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ - ;; \ - esac - test ! -f $(DOMAIN).po || { \ - if test -f $(srcdir)/$(DOMAIN).pot-header; then \ - sed -e '1,/^#$$/d' < $(DOMAIN).po > $(DOMAIN).1po && \ - cat $(srcdir)/$(DOMAIN).pot-header $(DOMAIN).1po > $(DOMAIN).po && \ - rm -f $(DOMAIN).1po \ - || exit 1; \ - fi; \ - if test -f $(srcdir)/$(DOMAIN).pot; then \ - sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \ - sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \ - if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \ - rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \ - else \ - rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \ - mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ - fi; \ - else \ - mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ - fi; \ - } - -# This rule has no dependencies: we don't need to update $(DOMAIN).pot at -# every "make" invocation, only create it when it is missing. -# Only "make $(DOMAIN).pot-update" or "make dist" will force an update. -$(srcdir)/$(DOMAIN).pot: - $(MAKE) $(DOMAIN).pot-update - -# This target rebuilds a PO file if $(DOMAIN).pot has changed. -# Note that a PO file is not touched if it doesn't need to be changed. -$(POFILES): $(POFILESDEPS) - @test -f $(srcdir)/$(DOMAIN).pot || $(MAKE) $(srcdir)/$(DOMAIN).pot - @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \ - if test -f "$(srcdir)/$${lang}.po"; then \ - test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ - echo "$${cdcmd}$(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --lang=$${lang} --previous $${lang}.po $(DOMAIN).pot"; \ - cd $(srcdir) \ - && { case `$(MSGMERGE_UPDATE) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ - '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].*) \ - $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) $${lang}.po $(DOMAIN).pot;; \ - 0.1[6-7] | 0.1[6-7].*) \ - $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --previous $${lang}.po $(DOMAIN).pot;; \ - *) \ - $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --lang=$${lang} --previous $${lang}.po $(DOMAIN).pot;; \ - esac; \ - }; \ - else \ - $(MAKE) $${lang}.po-create; \ - fi - - -install: install-exec install-data -install-exec: -install-data: install-data-@USE_NLS@ - if test "$(PACKAGE)" = "gettext-tools"; then \ - $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ - for file in $(DISTFILES.common) Makevars.template; do \ - $(INSTALL_DATA) $(srcdir)/$$file \ - $(DESTDIR)$(gettextsrcdir)/$$file; \ - done; \ - for file in Makevars; do \ - rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ - done; \ - else \ - : ; \ - fi -install-data-no: all -install-data-yes: all - @catalogs='$(CATALOGS)'; \ - for cat in $$catalogs; do \ - cat=`basename $$cat`; \ - lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ - dir=$(localedir)/$$lang/LC_MESSAGES; \ - $(mkdir_p) $(DESTDIR)$$dir; \ - if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \ - $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \ - echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \ - for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ - if test -n "$$lc"; then \ - if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ - link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ - mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ - mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ - (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ - for file in *; do \ - if test -f $$file; then \ - ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ - fi; \ - done); \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ - else \ - if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ - :; \ - else \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ - mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ - fi; \ - fi; \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ - ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ - ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ - cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ - echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \ - fi; \ - done; \ - done - -install-strip: install - -installdirs: installdirs-exec installdirs-data -installdirs-exec: -installdirs-data: installdirs-data-@USE_NLS@ - if test "$(PACKAGE)" = "gettext-tools"; then \ - $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ - else \ - : ; \ - fi -installdirs-data-no: -installdirs-data-yes: - @catalogs='$(CATALOGS)'; \ - for cat in $$catalogs; do \ - cat=`basename $$cat`; \ - lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ - dir=$(localedir)/$$lang/LC_MESSAGES; \ - $(mkdir_p) $(DESTDIR)$$dir; \ - for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ - if test -n "$$lc"; then \ - if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ - link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ - mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ - mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ - (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ - for file in *; do \ - if test -f $$file; then \ - ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ - fi; \ - done); \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ - else \ - if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ - :; \ - else \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ - mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ - fi; \ - fi; \ - fi; \ - done; \ - done - -# Define this as empty until I found a useful application. -installcheck: - -uninstall: uninstall-exec uninstall-data -uninstall-exec: -uninstall-data: uninstall-data-@USE_NLS@ - if test "$(PACKAGE)" = "gettext-tools"; then \ - for file in $(DISTFILES.common) Makevars.template; do \ - rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ - done; \ - else \ - : ; \ - fi -uninstall-data-no: -uninstall-data-yes: - catalogs='$(CATALOGS)'; \ - for cat in $$catalogs; do \ - cat=`basename $$cat`; \ - lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ - for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \ - rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ - done; \ - done - -check: all - -info dvi ps pdf html tags TAGS ctags CTAGS ID: - -install-dvi install-ps install-pdf install-html: - -mostlyclean: - rm -f remove-potcdate.sed - rm -f $(srcdir)/stamp-poT - rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po - rm -fr *.o - -clean: mostlyclean - -distclean: clean - rm -f Makefile Makefile.in POTFILES - -maintainer-clean: distclean - @echo "This command is intended for maintainers to use;" - @echo "it deletes files that may require special tools to rebuild." - rm -f $(srcdir)/$(DOMAIN).pot $(srcdir)/stamp-po $(GMOFILES) - -distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir) -dist distdir: - test -z "$(DISTFILESDEPS)" || $(MAKE) $(DISTFILESDEPS) - @$(MAKE) dist2 -# This is a separate target because 'update-po' must be executed before. -dist2: $(srcdir)/stamp-po $(DISTFILES) - dists="$(DISTFILES)"; \ - if test "$(PACKAGE)" = "gettext-tools"; then \ - dists="$$dists Makevars.template"; \ - fi; \ - if test -f $(srcdir)/$(DOMAIN).pot; then \ - dists="$$dists $(DOMAIN).pot stamp-po"; \ - fi; \ - if test -f $(srcdir)/ChangeLog; then \ - dists="$$dists ChangeLog"; \ - fi; \ - for i in 0 1 2 3 4 5 6 7 8 9; do \ - if test -f $(srcdir)/ChangeLog.$$i; then \ - dists="$$dists ChangeLog.$$i"; \ - fi; \ - done; \ - if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \ - for file in $$dists; do \ - if test -f $$file; then \ - cp -p $$file $(distdir) || exit 1; \ - else \ - cp -p $(srcdir)/$$file $(distdir) || exit 1; \ - fi; \ - done - -update-po: Makefile - $(MAKE) $(DOMAIN).pot-update - test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES) - $(MAKE) update-gmo - -# General rule for creating PO files. - -.nop.po-create: - @lang=`echo $@ | sed -e 's/\.po-create$$//'`; \ - echo "File $$lang.po does not exist. If you are a translator, you can create it through 'msginit'." 1>&2; \ - exit 1 - -# General rule for updating PO files. - -.nop.po-update: - @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \ - if test "$(PACKAGE)" = "gettext-tools" && test "$(CROSS_COMPILING)" != "yes"; then PATH=`pwd`/../src:$$PATH; fi; \ - tmpdir=`pwd`; \ - echo "$$lang:"; \ - test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ - echo "$${cdcmd}$(MSGMERGE) $(MSGMERGE_OPTIONS) --lang=$$lang --previous $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \ - cd $(srcdir); \ - if { case `$(MSGMERGE) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ - '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].*) \ - $(MSGMERGE) $(MSGMERGE_OPTIONS) -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ - 0.1[6-7] | 0.1[6-7].*) \ - $(MSGMERGE) $(MSGMERGE_OPTIONS) --previous -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ - *) \ - $(MSGMERGE) $(MSGMERGE_OPTIONS) --lang=$$lang --previous -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ - esac; \ - }; then \ - if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ - rm -f $$tmpdir/$$lang.new.po; \ - else \ - if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ - :; \ - else \ - echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ - exit 1; \ - fi; \ - fi; \ - else \ - echo "msgmerge for $$lang.po failed!" 1>&2; \ - rm -f $$tmpdir/$$lang.new.po; \ - fi - -$(DUMMYPOFILES): - -update-gmo: Makefile $(GMOFILES) - @: - -# Recreate Makefile by invoking config.status. Explicitly invoke the shell, -# because execution permission bits may not work on the current file system. -# Use @SHELL@, which is the shell determined by autoconf for the use by its -# scripts, not $(SHELL) which is hardwired to /bin/sh and may be deficient. -Makefile: Makefile.in.in Makevars $(top_builddir)/config.status @POMAKEFILEDEPS@ - cd $(top_builddir) \ - && @SHELL@ ./config.status $(subdir)/$@.in po-directories - -force: - -# Tell versions [3.59,3.63) of GNU make not to export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/gettext.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/gettext.m4 deleted file mode 100644 index 4f25a27d9..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/gettext.m4 +++ /dev/null @@ -1,386 +0,0 @@ -# gettext.m4 serial 71 (gettext-0.20.2) -dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. -dnl -dnl This file can be used in projects which are not available under -dnl the GNU General Public License or the GNU Lesser General Public -dnl License but which still want to provide support for the GNU gettext -dnl functionality. -dnl Please note that the actual code of the GNU gettext library is covered -dnl by the GNU Lesser General Public License, and the rest of the GNU -dnl gettext package is covered by the GNU General Public License. -dnl They are *not* in the public domain. - -dnl Authors: -dnl Ulrich Drepper , 1995-2000. -dnl Bruno Haible , 2000-2006, 2008-2010. - -dnl Macro to add for using GNU gettext. - -dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). -dnl INTLSYMBOL must be one of 'external', 'use-libtool'. -dnl INTLSYMBOL should be 'external' for packages other than GNU gettext, and -dnl 'use-libtool' for the packages 'gettext-runtime' and 'gettext-tools'. -dnl If INTLSYMBOL is 'use-libtool', then a libtool library -dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, -dnl depending on --{enable,disable}-{shared,static} and on the presence of -dnl AM-DISABLE-SHARED). -dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext -dnl implementations (in libc or libintl) without the ngettext() function -dnl will be ignored. If NEEDSYMBOL is specified and is -dnl 'need-formatstring-macros', then GNU gettext implementations that don't -dnl support the ISO C 99 formatstring macros will be ignored. -dnl INTLDIR is used to find the intl libraries. If empty, -dnl the value '$(top_builddir)/intl/' is used. -dnl -dnl The result of the configuration is one of three cases: -dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled -dnl and used. -dnl Catalog format: GNU --> install in $(datadir) -dnl Catalog extension: .mo after installation, .gmo in source tree -dnl 2) GNU gettext has been found in the system's C library. -dnl Catalog format: GNU --> install in $(datadir) -dnl Catalog extension: .mo after installation, .gmo in source tree -dnl 3) No internationalization, always use English msgid. -dnl Catalog format: none -dnl Catalog extension: none -dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. -dnl The use of .gmo is historical (it was needed to avoid overwriting the -dnl GNU format catalogs when building on a platform with an X/Open gettext), -dnl but we keep it in order not to force irrelevant filename changes on the -dnl maintainers. -dnl -AC_DEFUN([AM_GNU_GETTEXT], -[ - dnl Argument checking. - ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [use-libtool], , - [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT -])])])]) - ifelse(ifelse([$1], [], [old])[]ifelse([$1], [no-libtool], [old]), [old], - [errprint([ERROR: Use of AM_GNU_GETTEXT without [external] argument is no longer supported. -])]) - ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , - [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT -])])])]) - define([gt_included_intl], - ifelse([$1], [external], [no], [yes])) - gt_NEEDS_INIT - AM_GNU_GETTEXT_NEED([$2]) - - AC_REQUIRE([AM_PO_SUBDIRS])dnl - ifelse(gt_included_intl, yes, [ - AC_REQUIRE([AM_INTL_SUBDIR])dnl - ]) - - dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. - AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) - AC_REQUIRE([AC_LIB_RPATH]) - - dnl Sometimes libintl requires libiconv, so first search for libiconv. - dnl Ideally we would do this search only after the - dnl if test "$USE_NLS" = "yes"; then - dnl if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then - dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT - dnl the configure script would need to contain the same shell code - dnl again, outside any 'if'. There are two solutions: - dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. - dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. - dnl Since AC_PROVIDE_IFELSE is not documented, we avoid it. - ifelse(gt_included_intl, yes, , [ - AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) - ]) - - dnl Sometimes, on Mac OS X, libintl requires linking with CoreFoundation. - gt_INTL_MACOSX - - dnl Set USE_NLS. - AC_REQUIRE([AM_NLS]) - - ifelse(gt_included_intl, yes, [ - BUILD_INCLUDED_LIBINTL=no - USE_INCLUDED_LIBINTL=no - ]) - LIBINTL= - LTLIBINTL= - POSUB= - - dnl Add a version number to the cache macros. - case " $gt_needs " in - *" need-formatstring-macros "*) gt_api_version=3 ;; - *" need-ngettext "*) gt_api_version=2 ;; - *) gt_api_version=1 ;; - esac - gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" - gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" - - dnl If we use NLS figure out what method - if test "$USE_NLS" = "yes"; then - gt_use_preinstalled_gnugettext=no - ifelse(gt_included_intl, yes, [ - AC_MSG_CHECKING([whether included gettext is requested]) - AC_ARG_WITH([included-gettext], - [ --with-included-gettext use the GNU gettext library included here], - nls_cv_force_use_gnu_gettext=$withval, - nls_cv_force_use_gnu_gettext=no) - AC_MSG_RESULT([$nls_cv_force_use_gnu_gettext]) - - nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" - if test "$nls_cv_force_use_gnu_gettext" != "yes"; then - ]) - dnl User does not insist on using GNU NLS library. Figure out what - dnl to use. If GNU gettext is available we use this. Else we have - dnl to fall back to GNU NLS library. - - if test $gt_api_version -ge 3; then - gt_revision_test_code=' -#ifndef __GNU_GETTEXT_SUPPORTED_REVISION -#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) -#endif -changequote(,)dnl -typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; -changequote([,])dnl -' - else - gt_revision_test_code= - fi - if test $gt_api_version -ge 2; then - gt_expression_test_code=' + * ngettext ("", "", 0)' - else - gt_expression_test_code= - fi - - AC_CACHE_CHECK([for GNU gettext in libc], [$gt_func_gnugettext_libc], - [AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#ifndef __GNU_GETTEXT_SUPPORTED_REVISION -extern int _nl_msg_cat_cntr; -extern int *_nl_domain_bindings; -#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_domain_bindings) -#else -#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 -#endif -$gt_revision_test_code - ]], - [[ -bindtextdomain ("", ""); -return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION - ]])], - [eval "$gt_func_gnugettext_libc=yes"], - [eval "$gt_func_gnugettext_libc=no"])]) - - if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then - dnl Sometimes libintl requires libiconv, so first search for libiconv. - ifelse(gt_included_intl, yes, , [ - AM_ICONV_LINK - ]) - dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL - dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) - dnl because that would add "-liconv" to LIBINTL and LTLIBINTL - dnl even if libiconv doesn't exist. - AC_LIB_LINKFLAGS_BODY([intl]) - AC_CACHE_CHECK([for GNU gettext in libintl], - [$gt_func_gnugettext_libintl], - [gt_save_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $INCINTL" - gt_save_LIBS="$LIBS" - LIBS="$LIBS $LIBINTL" - dnl Now see whether libintl exists and does not depend on libiconv. - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#ifndef __GNU_GETTEXT_SUPPORTED_REVISION -extern int _nl_msg_cat_cntr; -extern -#ifdef __cplusplus -"C" -#endif -const char *_nl_expand_alias (const char *); -#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) -#else -#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 -#endif -$gt_revision_test_code - ]], - [[ -bindtextdomain ("", ""); -return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION - ]])], - [eval "$gt_func_gnugettext_libintl=yes"], - [eval "$gt_func_gnugettext_libintl=no"]) - dnl Now see whether libintl exists and depends on libiconv. - if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then - LIBS="$LIBS $LIBICONV" - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#ifndef __GNU_GETTEXT_SUPPORTED_REVISION -extern int _nl_msg_cat_cntr; -extern -#ifdef __cplusplus -"C" -#endif -const char *_nl_expand_alias (const char *); -#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) -#else -#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 -#endif -$gt_revision_test_code - ]], - [[ -bindtextdomain ("", ""); -return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION - ]])], - [LIBINTL="$LIBINTL $LIBICONV" - LTLIBINTL="$LTLIBINTL $LTLIBICONV" - eval "$gt_func_gnugettext_libintl=yes" - ]) - fi - CPPFLAGS="$gt_save_CPPFLAGS" - LIBS="$gt_save_LIBS"]) - fi - - dnl If an already present or preinstalled GNU gettext() is found, - dnl use it. But if this macro is used in GNU gettext, and GNU - dnl gettext is already preinstalled in libintl, we update this - dnl libintl. (Cf. the install rule in intl/Makefile.in.) - if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ - || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ - && test "$PACKAGE" != gettext-runtime \ - && test "$PACKAGE" != gettext-tools; }; then - gt_use_preinstalled_gnugettext=yes - else - dnl Reset the values set by searching for libintl. - LIBINTL= - LTLIBINTL= - INCINTL= - fi - - ifelse(gt_included_intl, yes, [ - if test "$gt_use_preinstalled_gnugettext" != "yes"; then - dnl GNU gettext is not found in the C library. - dnl Fall back on included GNU gettext library. - nls_cv_use_gnu_gettext=yes - fi - fi - - if test "$nls_cv_use_gnu_gettext" = "yes"; then - dnl Mark actions used to generate GNU NLS library. - BUILD_INCLUDED_LIBINTL=yes - USE_INCLUDED_LIBINTL=yes - LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LIBICONV $LIBTHREAD" - LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LTLIBICONV $LTLIBTHREAD" - LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` - fi - - CATOBJEXT= - if test "$gt_use_preinstalled_gnugettext" = "yes" \ - || test "$nls_cv_use_gnu_gettext" = "yes"; then - dnl Mark actions to use GNU gettext tools. - CATOBJEXT=.gmo - fi - ]) - - if test -n "$INTL_MACOSX_LIBS"; then - if test "$gt_use_preinstalled_gnugettext" = "yes" \ - || test "$nls_cv_use_gnu_gettext" = "yes"; then - dnl Some extra flags are needed during linking. - LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" - LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" - fi - fi - - if test "$gt_use_preinstalled_gnugettext" = "yes" \ - || test "$nls_cv_use_gnu_gettext" = "yes"; then - AC_DEFINE([ENABLE_NLS], [1], - [Define to 1 if translation of program messages to the user's native language - is requested.]) - else - USE_NLS=no - fi - fi - - AC_MSG_CHECKING([whether to use NLS]) - AC_MSG_RESULT([$USE_NLS]) - if test "$USE_NLS" = "yes"; then - AC_MSG_CHECKING([where the gettext function comes from]) - if test "$gt_use_preinstalled_gnugettext" = "yes"; then - if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then - gt_source="external libintl" - else - gt_source="libc" - fi - else - gt_source="included intl directory" - fi - AC_MSG_RESULT([$gt_source]) - fi - - if test "$USE_NLS" = "yes"; then - - if test "$gt_use_preinstalled_gnugettext" = "yes"; then - if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then - AC_MSG_CHECKING([how to link with libintl]) - AC_MSG_RESULT([$LIBINTL]) - AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) - fi - - dnl For backward compatibility. Some packages may be using this. - AC_DEFINE([HAVE_GETTEXT], [1], - [Define if the GNU gettext() function is already present or preinstalled.]) - AC_DEFINE([HAVE_DCGETTEXT], [1], - [Define if the GNU dcgettext() function is already present or preinstalled.]) - fi - - dnl We need to process the po/ directory. - POSUB=po - fi - - ifelse(gt_included_intl, yes, [ - dnl In GNU gettext we have to set BUILD_INCLUDED_LIBINTL to 'yes' - dnl because some of the testsuite requires it. - BUILD_INCLUDED_LIBINTL=yes - - dnl Make all variables we use known to autoconf. - AC_SUBST([BUILD_INCLUDED_LIBINTL]) - AC_SUBST([USE_INCLUDED_LIBINTL]) - AC_SUBST([CATOBJEXT]) - ]) - - dnl For backward compatibility. Some Makefiles may be using this. - INTLLIBS="$LIBINTL" - AC_SUBST([INTLLIBS]) - - dnl Make all documented variables known to autoconf. - AC_SUBST([LIBINTL]) - AC_SUBST([LTLIBINTL]) - AC_SUBST([POSUB]) -]) - - -dnl gt_NEEDS_INIT ensures that the gt_needs variable is initialized. -m4_define([gt_NEEDS_INIT], -[ - m4_divert_text([DEFAULTS], [gt_needs=]) - m4_define([gt_NEEDS_INIT], []) -]) - - -dnl Usage: AM_GNU_GETTEXT_NEED([NEEDSYMBOL]) -AC_DEFUN([AM_GNU_GETTEXT_NEED], -[ - m4_divert_text([INIT_PREPARE], [gt_needs="$gt_needs $1"]) -]) - - -dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) -AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) - - -dnl Usage: AM_GNU_GETTEXT_REQUIRE_VERSION([gettext-version]) -AC_DEFUN([AM_GNU_GETTEXT_REQUIRE_VERSION], []) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/host-cpu-c-abi.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/host-cpu-c-abi.m4 deleted file mode 100644 index 6db2aa25a..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/host-cpu-c-abi.m4 +++ /dev/null @@ -1,675 +0,0 @@ -# host-cpu-c-abi.m4 serial 13 -dnl Copyright (C) 2002-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. - -dnl From Bruno Haible and Sam Steingold. - -dnl Sets the HOST_CPU variable to the canonical name of the CPU. -dnl Sets the HOST_CPU_C_ABI variable to the canonical name of the CPU with its -dnl C language ABI (application binary interface). -dnl Also defines __${HOST_CPU}__ and __${HOST_CPU_C_ABI}__ as C macros in -dnl config.h. -dnl -dnl This canonical name can be used to select a particular assembly language -dnl source file that will interoperate with C code on the given host. -dnl -dnl For example: -dnl * 'i386' and 'sparc' are different canonical names, because code for i386 -dnl will not run on SPARC CPUs and vice versa. They have different -dnl instruction sets. -dnl * 'sparc' and 'sparc64' are different canonical names, because code for -dnl 'sparc' and code for 'sparc64' cannot be linked together: 'sparc' code -dnl contains 32-bit instructions, whereas 'sparc64' code contains 64-bit -dnl instructions. A process on a SPARC CPU can be in 32-bit mode or in 64-bit -dnl mode, but not both. -dnl * 'mips' and 'mipsn32' are different canonical names, because they use -dnl different argument passing and return conventions for C functions, and -dnl although the instruction set of 'mips' is a large subset of the -dnl instruction set of 'mipsn32'. -dnl * 'mipsn32' and 'mips64' are different canonical names, because they use -dnl different sizes for the C types like 'int' and 'void *', and although -dnl the instruction sets of 'mipsn32' and 'mips64' are the same. -dnl * The same canonical name is used for different endiannesses. You can -dnl determine the endianness through preprocessor symbols: -dnl - 'arm': test __ARMEL__. -dnl - 'mips', 'mipsn32', 'mips64': test _MIPSEB vs. _MIPSEL. -dnl - 'powerpc64': test _BIG_ENDIAN vs. _LITTLE_ENDIAN. -dnl * The same name 'i386' is used for CPUs of type i386, i486, i586 -dnl (Pentium), AMD K7, Pentium II, Pentium IV, etc., because -dnl - Instructions that do not exist on all of these CPUs (cmpxchg, -dnl MMX, SSE, SSE2, 3DNow! etc.) are not frequently used. If your -dnl assembly language source files use such instructions, you will -dnl need to make the distinction. -dnl - Speed of execution of the common instruction set is reasonable across -dnl the entire family of CPUs. If you have assembly language source files -dnl that are optimized for particular CPU types (like GNU gmp has), you -dnl will need to make the distinction. -dnl See . -AC_DEFUN([gl_HOST_CPU_C_ABI], -[ - AC_REQUIRE([AC_CANONICAL_HOST]) - AC_REQUIRE([gl_C_ASM]) - AC_CACHE_CHECK([host CPU and C ABI], [gl_cv_host_cpu_c_abi], - [case "$host_cpu" in - -changequote(,)dnl - i[34567]86 ) -changequote([,])dnl - gl_cv_host_cpu_c_abi=i386 - ;; - - x86_64 ) - # On x86_64 systems, the C compiler may be generating code in one of - # these ABIs: - # - 64-bit instruction set, 64-bit pointers, 64-bit 'long': x86_64. - # - 64-bit instruction set, 64-bit pointers, 32-bit 'long': x86_64 - # with native Windows (mingw, MSVC). - # - 64-bit instruction set, 32-bit pointers, 32-bit 'long': x86_64-x32. - # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': i386. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if (defined __x86_64__ || defined __amd64__ \ - || defined _M_X64 || defined _M_AMD64) - int ok; - #else - error fail - #endif - ]])], - [AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __ILP32__ || defined _ILP32 - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=x86_64-x32], - [gl_cv_host_cpu_c_abi=x86_64])], - [gl_cv_host_cpu_c_abi=i386]) - ;; - -changequote(,)dnl - alphaev[4-8] | alphaev56 | alphapca5[67] | alphaev6[78] ) -changequote([,])dnl - gl_cv_host_cpu_c_abi=alpha - ;; - - arm* | aarch64 ) - # Assume arm with EABI. - # On arm64 systems, the C compiler may be generating code in one of - # these ABIs: - # - aarch64 instruction set, 64-bit pointers, 64-bit 'long': arm64. - # - aarch64 instruction set, 32-bit pointers, 32-bit 'long': arm64-ilp32. - # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': arm or armhf. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#ifdef __aarch64__ - int ok; - #else - error fail - #endif - ]])], - [AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __ILP32__ || defined _ILP32 - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=arm64-ilp32], - [gl_cv_host_cpu_c_abi=arm64])], - [# Don't distinguish little-endian and big-endian arm, since they - # don't require different machine code for simple operations and - # since the user can distinguish them through the preprocessor - # defines __ARMEL__ vs. __ARMEB__. - # But distinguish arm which passes floating-point arguments and - # return values in integer registers (r0, r1, ...) - this is - # gcc -mfloat-abi=soft or gcc -mfloat-abi=softfp - from arm which - # passes them in float registers (s0, s1, ...) and double registers - # (d0, d1, ...) - this is gcc -mfloat-abi=hard. GCC 4.6 or newer - # sets the preprocessor defines __ARM_PCS (for the first case) and - # __ARM_PCS_VFP (for the second case), but older GCC does not. - echo 'double ddd; void func (double dd) { ddd = dd; }' > conftest.c - # Look for a reference to the register d0 in the .s file. - AC_TRY_COMMAND(${CC-cc} $CFLAGS $CPPFLAGS $gl_c_asm_opt conftest.c) >/dev/null 2>&1 - if LC_ALL=C grep 'd0,' conftest.$gl_asmext >/dev/null; then - gl_cv_host_cpu_c_abi=armhf - else - gl_cv_host_cpu_c_abi=arm - fi - rm -f conftest* - ]) - ;; - - hppa1.0 | hppa1.1 | hppa2.0* | hppa64 ) - # On hppa, the C compiler may be generating 32-bit code or 64-bit - # code. In the latter case, it defines _LP64 and __LP64__. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#ifdef __LP64__ - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=hppa64], - [gl_cv_host_cpu_c_abi=hppa]) - ;; - - ia64* ) - # On ia64 on HP-UX, the C compiler may be generating 64-bit code or - # 32-bit code. In the latter case, it defines _ILP32. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#ifdef _ILP32 - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=ia64-ilp32], - [gl_cv_host_cpu_c_abi=ia64]) - ;; - - mips* ) - # We should also check for (_MIPS_SZPTR == 64), but gcc keeps this - # at 32. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined _MIPS_SZLONG && (_MIPS_SZLONG == 64) - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=mips64], - [# In the n32 ABI, _ABIN32 is defined, _ABIO32 is not defined (but - # may later get defined by ), and _MIPS_SIM == _ABIN32. - # In the 32 ABI, _ABIO32 is defined, _ABIN32 is not defined (but - # may later get defined by ), and _MIPS_SIM == _ABIO32. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if (_MIPS_SIM == _ABIN32) - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=mipsn32], - [gl_cv_host_cpu_c_abi=mips])]) - ;; - - powerpc* ) - # Different ABIs are in use on AIX vs. Mac OS X vs. Linux,*BSD. - # No need to distinguish them here; the caller may distinguish - # them based on the OS. - # On powerpc64 systems, the C compiler may still be generating - # 32-bit code. And on powerpc-ibm-aix systems, the C compiler may - # be generating 64-bit code. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __powerpc64__ || defined _ARCH_PPC64 - int ok; - #else - error fail - #endif - ]])], - [# On powerpc64, there are two ABIs on Linux: The AIX compatible - # one and the ELFv2 one. The latter defines _CALL_ELF=2. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined _CALL_ELF && _CALL_ELF == 2 - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=powerpc64-elfv2], - [gl_cv_host_cpu_c_abi=powerpc64]) - ], - [gl_cv_host_cpu_c_abi=powerpc]) - ;; - - rs6000 ) - gl_cv_host_cpu_c_abi=powerpc - ;; - - riscv32 | riscv64 ) - # There are 2 architectures (with variants): rv32* and rv64*. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if __riscv_xlen == 64 - int ok; - #else - error fail - #endif - ]])], - [cpu=riscv64], - [cpu=riscv32]) - # There are 6 ABIs: ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d. - # Size of 'long' and 'void *': - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __LP64__ - int ok; - #else - error fail - #endif - ]])], - [main_abi=lp64], - [main_abi=ilp32]) - # Float ABIs: - # __riscv_float_abi_double: - # 'float' and 'double' are passed in floating-point registers. - # __riscv_float_abi_single: - # 'float' are passed in floating-point registers. - # __riscv_float_abi_soft: - # No values are passed in floating-point registers. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __riscv_float_abi_double - int ok; - #else - error fail - #endif - ]])], - [float_abi=d], - [AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __riscv_float_abi_single - int ok; - #else - error fail - #endif - ]])], - [float_abi=f], - [float_abi='']) - ]) - gl_cv_host_cpu_c_abi="${cpu}-${main_abi}${float_abi}" - ;; - - s390* ) - # On s390x, the C compiler may be generating 64-bit (= s390x) code - # or 31-bit (= s390) code. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __LP64__ || defined __s390x__ - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=s390x], - [gl_cv_host_cpu_c_abi=s390]) - ;; - - sparc | sparc64 ) - # UltraSPARCs running Linux have `uname -m` = "sparc64", but the - # C compiler still generates 32-bit code. - AC_COMPILE_IFELSE( - [AC_LANG_SOURCE( - [[#if defined __sparcv9 || defined __arch64__ - int ok; - #else - error fail - #endif - ]])], - [gl_cv_host_cpu_c_abi=sparc64], - [gl_cv_host_cpu_c_abi=sparc]) - ;; - - *) - gl_cv_host_cpu_c_abi="$host_cpu" - ;; - esac - ]) - - dnl In most cases, $HOST_CPU and $HOST_CPU_C_ABI are the same. - HOST_CPU=`echo "$gl_cv_host_cpu_c_abi" | sed -e 's/-.*//'` - HOST_CPU_C_ABI="$gl_cv_host_cpu_c_abi" - AC_SUBST([HOST_CPU]) - AC_SUBST([HOST_CPU_C_ABI]) - - # This was - # AC_DEFINE_UNQUOTED([__${HOST_CPU}__]) - # AC_DEFINE_UNQUOTED([__${HOST_CPU_C_ABI}__]) - # earlier, but KAI C++ 3.2d doesn't like this. - sed -e 's/-/_/g' >> confdefs.h < -#include - ]], - [[iconv_t cd = iconv_open("",""); - iconv(cd,NULL,NULL,NULL,NULL); - iconv_close(cd);]])], - [am_cv_func_iconv=yes]) - if test "$am_cv_func_iconv" != yes; then - am_save_LIBS="$LIBS" - LIBS="$LIBS $LIBICONV" - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#include - ]], - [[iconv_t cd = iconv_open("",""); - iconv(cd,NULL,NULL,NULL,NULL); - iconv_close(cd);]])], - [am_cv_lib_iconv=yes] - [am_cv_func_iconv=yes]) - LIBS="$am_save_LIBS" - fi - ]) - if test "$am_cv_func_iconv" = yes; then - AC_CACHE_CHECK([for working iconv], [am_cv_func_iconv_works], [ - dnl This tests against bugs in AIX 5.1, AIX 6.1..7.1, HP-UX 11.11, - dnl Solaris 10. - am_save_LIBS="$LIBS" - if test $am_cv_lib_iconv = yes; then - LIBS="$LIBS $LIBICONV" - fi - am_cv_func_iconv_works=no - for ac_iconv_const in '' 'const'; do - AC_RUN_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#include - -#ifndef ICONV_CONST -# define ICONV_CONST $ac_iconv_const -#endif - ]], - [[int result = 0; - /* Test against AIX 5.1 bug: Failures are not distinguishable from successful - returns. */ - { - iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); - if (cd_utf8_to_88591 != (iconv_t)(-1)) - { - static ICONV_CONST char input[] = "\342\202\254"; /* EURO SIGN */ - char buf[10]; - ICONV_CONST char *inptr = input; - size_t inbytesleft = strlen (input); - char *outptr = buf; - size_t outbytesleft = sizeof (buf); - size_t res = iconv (cd_utf8_to_88591, - &inptr, &inbytesleft, - &outptr, &outbytesleft); - if (res == 0) - result |= 1; - iconv_close (cd_utf8_to_88591); - } - } - /* Test against Solaris 10 bug: Failures are not distinguishable from - successful returns. */ - { - iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); - if (cd_ascii_to_88591 != (iconv_t)(-1)) - { - static ICONV_CONST char input[] = "\263"; - char buf[10]; - ICONV_CONST char *inptr = input; - size_t inbytesleft = strlen (input); - char *outptr = buf; - size_t outbytesleft = sizeof (buf); - size_t res = iconv (cd_ascii_to_88591, - &inptr, &inbytesleft, - &outptr, &outbytesleft); - if (res == 0) - result |= 2; - iconv_close (cd_ascii_to_88591); - } - } - /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ - { - iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); - if (cd_88591_to_utf8 != (iconv_t)(-1)) - { - static ICONV_CONST char input[] = "\304"; - static char buf[2] = { (char)0xDE, (char)0xAD }; - ICONV_CONST char *inptr = input; - size_t inbytesleft = 1; - char *outptr = buf; - size_t outbytesleft = 1; - size_t res = iconv (cd_88591_to_utf8, - &inptr, &inbytesleft, - &outptr, &outbytesleft); - if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) - result |= 4; - iconv_close (cd_88591_to_utf8); - } - } -#if 0 /* This bug could be worked around by the caller. */ - /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ - { - iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); - if (cd_88591_to_utf8 != (iconv_t)(-1)) - { - static ICONV_CONST char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; - char buf[50]; - ICONV_CONST char *inptr = input; - size_t inbytesleft = strlen (input); - char *outptr = buf; - size_t outbytesleft = sizeof (buf); - size_t res = iconv (cd_88591_to_utf8, - &inptr, &inbytesleft, - &outptr, &outbytesleft); - if ((int)res > 0) - result |= 8; - iconv_close (cd_88591_to_utf8); - } - } -#endif - /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is - provided. */ - { - /* Try standardized names. */ - iconv_t cd1 = iconv_open ("UTF-8", "EUC-JP"); - /* Try IRIX, OSF/1 names. */ - iconv_t cd2 = iconv_open ("UTF-8", "eucJP"); - /* Try AIX names. */ - iconv_t cd3 = iconv_open ("UTF-8", "IBM-eucJP"); - /* Try HP-UX names. */ - iconv_t cd4 = iconv_open ("utf8", "eucJP"); - if (cd1 == (iconv_t)(-1) && cd2 == (iconv_t)(-1) - && cd3 == (iconv_t)(-1) && cd4 == (iconv_t)(-1)) - result |= 16; - if (cd1 != (iconv_t)(-1)) - iconv_close (cd1); - if (cd2 != (iconv_t)(-1)) - iconv_close (cd2); - if (cd3 != (iconv_t)(-1)) - iconv_close (cd3); - if (cd4 != (iconv_t)(-1)) - iconv_close (cd4); - } - return result; -]])], - [am_cv_func_iconv_works=yes], , - [case "$host_os" in - aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; - *) am_cv_func_iconv_works="guessing yes" ;; - esac]) - test "$am_cv_func_iconv_works" = no || break - done - LIBS="$am_save_LIBS" - ]) - case "$am_cv_func_iconv_works" in - *no) am_func_iconv=no am_cv_lib_iconv=no ;; - *) am_func_iconv=yes ;; - esac - else - am_func_iconv=no am_cv_lib_iconv=no - fi - if test "$am_func_iconv" = yes; then - AC_DEFINE([HAVE_ICONV], [1], - [Define if you have the iconv() function and it works.]) - fi - if test "$am_cv_lib_iconv" = yes; then - AC_MSG_CHECKING([how to link with libiconv]) - AC_MSG_RESULT([$LIBICONV]) - else - dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV - dnl either. - CPPFLAGS="$am_save_CPPFLAGS" - LIBICONV= - LTLIBICONV= - fi - AC_SUBST([LIBICONV]) - AC_SUBST([LTLIBICONV]) -]) - -dnl Define AM_ICONV using AC_DEFUN_ONCE for Autoconf >= 2.64, in order to -dnl avoid warnings like -dnl "warning: AC_REQUIRE: `AM_ICONV' was expanded before it was required". -dnl This is tricky because of the way 'aclocal' is implemented: -dnl - It requires defining an auxiliary macro whose name ends in AC_DEFUN. -dnl Otherwise aclocal's initial scan pass would miss the macro definition. -dnl - It requires a line break inside the AC_DEFUN_ONCE and AC_DEFUN expansions. -dnl Otherwise aclocal would emit many "Use of uninitialized value $1" -dnl warnings. -m4_define([gl_iconv_AC_DEFUN], - m4_version_prereq([2.64], - [[AC_DEFUN_ONCE( - [$1], [$2])]], - [m4_ifdef([gl_00GNULIB], - [[AC_DEFUN_ONCE( - [$1], [$2])]], - [[AC_DEFUN( - [$1], [$2])]])])) -gl_iconv_AC_DEFUN([AM_ICONV], -[ - AM_ICONV_LINK - if test "$am_cv_func_iconv" = yes; then - AC_MSG_CHECKING([for iconv declaration]) - AC_CACHE_VAL([am_cv_proto_iconv], [ - AC_COMPILE_IFELSE( - [AC_LANG_PROGRAM( - [[ -#include -#include -extern -#ifdef __cplusplus -"C" -#endif -#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) -size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); -#else -size_t iconv(); -#endif - ]], - [[]])], - [am_cv_proto_iconv_arg1=""], - [am_cv_proto_iconv_arg1="const"]) - am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) - am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` - AC_MSG_RESULT([ - $am_cv_proto_iconv]) - else - dnl When compiling GNU libiconv on a system that does not have iconv yet, - dnl pick the POSIX compliant declaration without 'const'. - am_cv_proto_iconv_arg1="" - fi - AC_DEFINE_UNQUOTED([ICONV_CONST], [$am_cv_proto_iconv_arg1], - [Define as const if the declaration of iconv() needs const.]) - dnl Also substitute ICONV_CONST in the gnulib generated . - m4_ifdef([gl_ICONV_H_DEFAULTS], - [AC_REQUIRE([gl_ICONV_H_DEFAULTS]) - if test -n "$am_cv_proto_iconv_arg1"; then - ICONV_CONST="const" - fi - ]) -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/intlmacosx.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/intlmacosx.m4 deleted file mode 100644 index ebd9937c1..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/intlmacosx.m4 +++ /dev/null @@ -1,65 +0,0 @@ -# intlmacosx.m4 serial 8 (gettext-0.20.2) -dnl Copyright (C) 2004-2014, 2016, 2019-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. -dnl -dnl This file can be used in projects which are not available under -dnl the GNU General Public License or the GNU Lesser General Public -dnl License but which still want to provide support for the GNU gettext -dnl functionality. -dnl Please note that the actual code of the GNU gettext library is covered -dnl by the GNU Lesser General Public License, and the rest of the GNU -dnl gettext package is covered by the GNU General Public License. -dnl They are *not* in the public domain. - -dnl Checks for special options needed on Mac OS X. -dnl Defines INTL_MACOSX_LIBS. -AC_DEFUN([gt_INTL_MACOSX], -[ - dnl Check for API introduced in Mac OS X 10.4. - AC_CACHE_CHECK([for CFPreferencesCopyAppValue], - [gt_cv_func_CFPreferencesCopyAppValue], - [gt_save_LIBS="$LIBS" - LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[#include ]], - [[CFPreferencesCopyAppValue(NULL, NULL)]])], - [gt_cv_func_CFPreferencesCopyAppValue=yes], - [gt_cv_func_CFPreferencesCopyAppValue=no]) - LIBS="$gt_save_LIBS"]) - if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then - AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], [1], - [Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in the CoreFoundation framework.]) - fi - dnl Don't check for the API introduced in Mac OS X 10.5, CFLocaleCopyCurrent, - dnl because in macOS 10.13.4 it has the following behaviour: - dnl When two or more languages are specified in the - dnl "System Preferences > Language & Region > Preferred Languages" panel, - dnl it returns en_CC where CC is the territory (even when English is not among - dnl the preferred languages!). What we want instead is what - dnl CFLocaleCopyCurrent returned in earlier macOS releases and what - dnl CFPreferencesCopyAppValue still returns, namely ll_CC where ll is the - dnl first among the preferred languages and CC is the territory. - AC_CACHE_CHECK([for CFLocaleCopyPreferredLanguages], [gt_cv_func_CFLocaleCopyPreferredLanguages], - [gt_save_LIBS="$LIBS" - LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" - AC_LINK_IFELSE( - [AC_LANG_PROGRAM( - [[#include ]], - [[CFLocaleCopyPreferredLanguages();]])], - [gt_cv_func_CFLocaleCopyPreferredLanguages=yes], - [gt_cv_func_CFLocaleCopyPreferredLanguages=no]) - LIBS="$gt_save_LIBS"]) - if test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then - AC_DEFINE([HAVE_CFLOCALECOPYPREFERREDLANGUAGES], [1], - [Define to 1 if you have the Mac OS X function CFLocaleCopyPreferredLanguages in the CoreFoundation framework.]) - fi - INTL_MACOSX_LIBS= - if test $gt_cv_func_CFPreferencesCopyAppValue = yes \ - || test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then - INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" - fi - AC_SUBST([INTL_MACOSX_LIBS]) -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-ld.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-ld.m4 deleted file mode 100644 index 98c348faf..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-ld.m4 +++ /dev/null @@ -1,168 +0,0 @@ -# lib-ld.m4 serial 9 -dnl Copyright (C) 1996-2003, 2009-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. - -dnl Subroutines of libtool.m4, -dnl with replacements s/_*LT_PATH/AC_LIB_PROG/ and s/lt_/acl_/ to avoid -dnl collision with libtool.m4. - -dnl From libtool-2.4. Sets the variable with_gnu_ld to yes or no. -AC_DEFUN([AC_LIB_PROG_LD_GNU], -[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], [acl_cv_prog_gnu_ld], -[# I'd rather use --version here, but apparently some GNU lds only accept -v. -case `$LD -v 2>&1 /dev/null 2>&1 \ - && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ - || PATH_SEPARATOR=';' - } -fi - -if test -n "$LD"; then - AC_MSG_CHECKING([for ld]) -elif test "$GCC" = yes; then - AC_MSG_CHECKING([for ld used by $CC]) -elif test "$with_gnu_ld" = yes; then - AC_MSG_CHECKING([for GNU ld]) -else - AC_MSG_CHECKING([for non-GNU ld]) -fi -if test -n "$LD"; then - # Let the user override the test with a path. - : -else - AC_CACHE_VAL([acl_cv_path_LD], - [ - acl_cv_path_LD= # Final result of this test - ac_prog=ld # Program to search in $PATH - if test "$GCC" = yes; then - # Check if gcc -print-prog-name=ld gives a path. - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return which upsets mingw - acl_output=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - acl_output=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $acl_output in - # Accept absolute paths. - [[\\/]]* | ?:[[\\/]]*) - re_direlt='/[[^/]][[^/]]*/\.\./' - # Canonicalize the pathname of ld - acl_output=`echo "$acl_output" | sed 's%\\\\%/%g'` - while echo "$acl_output" | grep "$re_direlt" > /dev/null 2>&1; do - acl_output=`echo $acl_output | sed "s%$re_direlt%/%"` - done - # Got the pathname. No search in PATH is needed. - acl_cv_path_LD="$acl_output" - ac_prog= - ;; - "") - # If it fails, then pretend we aren't using GCC. - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac - fi - if test -n "$ac_prog"; then - # Search for $ac_prog in $PATH. - acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS="$acl_save_ifs" - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - acl_cv_path_LD="$ac_dir/$ac_prog" - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$acl_cv_path_LD" -v 2>&1 conftest.sh - . ./conftest.sh - rm -f ./conftest.sh - acl_cv_rpath=done - ]) - wl="$acl_cv_wl" - acl_libext="$acl_cv_libext" - acl_shlibext="$acl_cv_shlibext" - acl_libname_spec="$acl_cv_libname_spec" - acl_library_names_spec="$acl_cv_library_names_spec" - acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" - acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" - acl_hardcode_direct="$acl_cv_hardcode_direct" - acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" - dnl Determine whether the user wants rpath handling at all. - AC_ARG_ENABLE([rpath], - [ --disable-rpath do not hardcode runtime library paths], - :, enable_rpath=yes) -]) - -dnl AC_LIB_FROMPACKAGE(name, package) -dnl declares that libname comes from the given package. The configure file -dnl will then not have a --with-libname-prefix option but a -dnl --with-package-prefix option. Several libraries can come from the same -dnl package. This declaration must occur before an AC_LIB_LINKFLAGS or similar -dnl macro call that searches for libname. -AC_DEFUN([AC_LIB_FROMPACKAGE], -[ - pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], - [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) - define([acl_frompackage_]NAME, [$2]) - popdef([NAME]) - pushdef([PACK],[$2]) - pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], - [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) - define([acl_libsinpackage_]PACKUP, - m4_ifdef([acl_libsinpackage_]PACKUP, [m4_defn([acl_libsinpackage_]PACKUP)[, ]],)[lib$1]) - popdef([PACKUP]) - popdef([PACK]) -]) - -dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and -dnl the libraries corresponding to explicit and implicit dependencies. -dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. -dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found -dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. -AC_DEFUN([AC_LIB_LINKFLAGS_BODY], -[ - AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) - pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], - [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) - pushdef([PACK],[m4_ifdef([acl_frompackage_]NAME, [acl_frompackage_]NAME, lib[$1])]) - pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], - [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) - pushdef([PACKLIBS],[m4_ifdef([acl_frompackage_]NAME, [acl_libsinpackage_]PACKUP, lib[$1])]) - dnl By default, look in $includedir and $libdir. - use_additional=yes - AC_LIB_WITH_FINAL_PREFIX([ - eval additional_includedir=\"$includedir\" - eval additional_libdir=\"$libdir\" - eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" - eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" - ]) - AC_ARG_WITH(PACK[-prefix], -[[ --with-]]PACK[[-prefix[=DIR] search for ]PACKLIBS[ in DIR/include and DIR/lib - --without-]]PACK[[-prefix don't search for ]PACKLIBS[ in includedir and libdir]], -[ - if test "X$withval" = "Xno"; then - use_additional=no - else - if test "X$withval" = "X"; then - AC_LIB_WITH_FINAL_PREFIX([ - eval additional_includedir=\"$includedir\" - eval additional_libdir=\"$libdir\" - eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" - eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" - ]) - else - additional_includedir="$withval/include" - additional_libdir="$withval/$acl_libdirstem" - additional_libdir2="$withval/$acl_libdirstem2" - additional_libdir3="$withval/$acl_libdirstem3" - fi - fi -]) - if test "X$additional_libdir2" = "X$additional_libdir"; then - additional_libdir2= - fi - if test "X$additional_libdir3" = "X$additional_libdir"; then - additional_libdir3= - fi - dnl Search the library and its dependencies in $additional_libdir and - dnl $LDFLAGS. Using breadth-first-seach. - LIB[]NAME= - LTLIB[]NAME= - INC[]NAME= - LIB[]NAME[]_PREFIX= - dnl HAVE_LIB${NAME} is an indicator that LIB${NAME}, LTLIB${NAME} have been - dnl computed. So it has to be reset here. - HAVE_LIB[]NAME= - rpathdirs= - ltrpathdirs= - names_already_handled= - names_next_round='$1 $2' - while test -n "$names_next_round"; do - names_this_round="$names_next_round" - names_next_round= - for name in $names_this_round; do - already_handled= - for n in $names_already_handled; do - if test "$n" = "$name"; then - already_handled=yes - break - fi - done - if test -z "$already_handled"; then - names_already_handled="$names_already_handled $name" - dnl See if it was already located by an earlier AC_LIB_LINKFLAGS - dnl or AC_LIB_HAVE_LINKFLAGS call. - uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` - eval value=\"\$HAVE_LIB$uppername\" - if test -n "$value"; then - if test "$value" = yes; then - eval value=\"\$LIB$uppername\" - test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" - eval value=\"\$LTLIB$uppername\" - test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" - else - dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined - dnl that this library doesn't exist. So just drop it. - : - fi - else - dnl Search the library lib$name in $additional_libdir and $LDFLAGS - dnl and the already constructed $LIBNAME/$LTLIBNAME. - found_dir= - found_la= - found_so= - found_a= - eval libname=\"$acl_libname_spec\" # typically: libname=lib$name - if test -n "$acl_shlibext"; then - shrext=".$acl_shlibext" # typically: shrext=.so - else - shrext= - fi - if test $use_additional = yes; then - for additional_libdir_variable in additional_libdir additional_libdir2 additional_libdir3; do - if test "X$found_dir" = "X"; then - eval dir=\$$additional_libdir_variable - if test -n "$dir"; then - dnl The same code as in the loop below: - dnl First look for a shared library. - if test -n "$acl_shlibext"; then - if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then - found_dir="$dir" - found_so="$dir/$libname$shrext" - else - if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then - ver=`(cd "$dir" && \ - for f in "$libname$shrext".*; do echo "$f"; done \ - | sed -e "s,^$libname$shrext\\\\.,," \ - | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ - | sed 1q ) 2>/dev/null` - if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then - found_dir="$dir" - found_so="$dir/$libname$shrext.$ver" - fi - else - eval library_names=\"$acl_library_names_spec\" - for f in $library_names; do - if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then - found_dir="$dir" - found_so="$dir/$f" - break - fi - done - fi - fi - fi - dnl Then look for a static library. - if test "X$found_dir" = "X"; then - if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then - found_dir="$dir" - found_a="$dir/$libname.$acl_libext" - fi - fi - if test "X$found_dir" != "X"; then - if test -f "$dir/$libname.la"; then - found_la="$dir/$libname.la" - fi - fi - fi - fi - done - fi - if test "X$found_dir" = "X"; then - for x in $LDFLAGS $LTLIB[]NAME; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - case "$x" in - -L*) - dir=`echo "X$x" | sed -e 's/^X-L//'` - dnl First look for a shared library. - if test -n "$acl_shlibext"; then - if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then - found_dir="$dir" - found_so="$dir/$libname$shrext" - else - if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then - ver=`(cd "$dir" && \ - for f in "$libname$shrext".*; do echo "$f"; done \ - | sed -e "s,^$libname$shrext\\\\.,," \ - | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ - | sed 1q ) 2>/dev/null` - if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then - found_dir="$dir" - found_so="$dir/$libname$shrext.$ver" - fi - else - eval library_names=\"$acl_library_names_spec\" - for f in $library_names; do - if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then - found_dir="$dir" - found_so="$dir/$f" - break - fi - done - fi - fi - fi - dnl Then look for a static library. - if test "X$found_dir" = "X"; then - if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then - found_dir="$dir" - found_a="$dir/$libname.$acl_libext" - fi - fi - if test "X$found_dir" != "X"; then - if test -f "$dir/$libname.la"; then - found_la="$dir/$libname.la" - fi - fi - ;; - esac - if test "X$found_dir" != "X"; then - break - fi - done - fi - if test "X$found_dir" != "X"; then - dnl Found the library. - LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" - if test "X$found_so" != "X"; then - dnl Linking with a shared library. We attempt to hardcode its - dnl directory into the executable's runpath, unless it's the - dnl standard /usr/lib. - if test "$enable_rpath" = no \ - || test "X$found_dir" = "X/usr/$acl_libdirstem" \ - || test "X$found_dir" = "X/usr/$acl_libdirstem2" \ - || test "X$found_dir" = "X/usr/$acl_libdirstem3"; then - dnl No hardcoding is needed. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" - else - dnl Use an explicit option to hardcode DIR into the resulting - dnl binary. - dnl Potentially add DIR to ltrpathdirs. - dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. - haveit= - for x in $ltrpathdirs; do - if test "X$x" = "X$found_dir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - ltrpathdirs="$ltrpathdirs $found_dir" - fi - dnl The hardcoding into $LIBNAME is system dependent. - if test "$acl_hardcode_direct" = yes; then - dnl Using DIR/libNAME.so during linking hardcodes DIR into the - dnl resulting binary. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" - else - if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - dnl Use an explicit option to hardcode DIR into the resulting - dnl binary. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" - dnl Potentially add DIR to rpathdirs. - dnl The rpathdirs will be appended to $LIBNAME at the end. - haveit= - for x in $rpathdirs; do - if test "X$x" = "X$found_dir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - rpathdirs="$rpathdirs $found_dir" - fi - else - dnl Rely on "-L$found_dir". - dnl But don't add it if it's already contained in the LDFLAGS - dnl or the already constructed $LIBNAME - haveit= - for x in $LDFLAGS $LIB[]NAME; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-L$found_dir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" - fi - if test "$acl_hardcode_minus_L" != no; then - dnl FIXME: Not sure whether we should use - dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" - dnl here. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" - else - dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH - dnl here, because this doesn't fit in flags passed to the - dnl compiler. So give up. No hardcoding. This affects only - dnl very old systems. - dnl FIXME: Not sure whether we should use - dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" - dnl here. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" - fi - fi - fi - fi - else - if test "X$found_a" != "X"; then - dnl Linking with a static library. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" - else - dnl We shouldn't come here, but anyway it's good to have a - dnl fallback. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" - fi - fi - dnl Assume the include files are nearby. - additional_includedir= - case "$found_dir" in - */$acl_libdirstem | */$acl_libdirstem/) - basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` - if test "$name" = '$1'; then - LIB[]NAME[]_PREFIX="$basedir" - fi - additional_includedir="$basedir/include" - ;; - */$acl_libdirstem2 | */$acl_libdirstem2/) - basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` - if test "$name" = '$1'; then - LIB[]NAME[]_PREFIX="$basedir" - fi - additional_includedir="$basedir/include" - ;; - */$acl_libdirstem3 | */$acl_libdirstem3/) - basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem3/"'*$,,'` - if test "$name" = '$1'; then - LIB[]NAME[]_PREFIX="$basedir" - fi - additional_includedir="$basedir/include" - ;; - esac - if test "X$additional_includedir" != "X"; then - dnl Potentially add $additional_includedir to $INCNAME. - dnl But don't add it - dnl 1. if it's the standard /usr/include, - dnl 2. if it's /usr/local/include and we are using GCC on Linux, - dnl 3. if it's already present in $CPPFLAGS or the already - dnl constructed $INCNAME, - dnl 4. if it doesn't exist as a directory. - if test "X$additional_includedir" != "X/usr/include"; then - haveit= - if test "X$additional_includedir" = "X/usr/local/include"; then - if test -n "$GCC"; then - case $host_os in - linux* | gnu* | k*bsd*-gnu) haveit=yes;; - esac - fi - fi - if test -z "$haveit"; then - for x in $CPPFLAGS $INC[]NAME; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-I$additional_includedir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - if test -d "$additional_includedir"; then - dnl Really add $additional_includedir to $INCNAME. - INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" - fi - fi - fi - fi - fi - dnl Look for dependencies. - if test -n "$found_la"; then - dnl Read the .la file. It defines the variables - dnl dlname, library_names, old_library, dependency_libs, current, - dnl age, revision, installed, dlopen, dlpreopen, libdir. - save_libdir="$libdir" - case "$found_la" in - */* | *\\*) . "$found_la" ;; - *) . "./$found_la" ;; - esac - libdir="$save_libdir" - dnl We use only dependency_libs. - for dep in $dependency_libs; do - case "$dep" in - -L*) - dependency_libdir=`echo "X$dep" | sed -e 's/^X-L//'` - dnl Potentially add $dependency_libdir to $LIBNAME and $LTLIBNAME. - dnl But don't add it - dnl 1. if it's the standard /usr/lib, - dnl 2. if it's /usr/local/lib and we are using GCC on Linux, - dnl 3. if it's already present in $LDFLAGS or the already - dnl constructed $LIBNAME, - dnl 4. if it doesn't exist as a directory. - if test "X$dependency_libdir" != "X/usr/$acl_libdirstem" \ - && test "X$dependency_libdir" != "X/usr/$acl_libdirstem2" \ - && test "X$dependency_libdir" != "X/usr/$acl_libdirstem3"; then - haveit= - if test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem" \ - || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem2" \ - || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem3"; then - if test -n "$GCC"; then - case $host_os in - linux* | gnu* | k*bsd*-gnu) haveit=yes;; - esac - fi - fi - if test -z "$haveit"; then - haveit= - for x in $LDFLAGS $LIB[]NAME; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-L$dependency_libdir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - if test -d "$dependency_libdir"; then - dnl Really add $dependency_libdir to $LIBNAME. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$dependency_libdir" - fi - fi - haveit= - for x in $LDFLAGS $LTLIB[]NAME; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-L$dependency_libdir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - if test -d "$dependency_libdir"; then - dnl Really add $dependency_libdir to $LTLIBNAME. - LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$dependency_libdir" - fi - fi - fi - fi - ;; - -R*) - dir=`echo "X$dep" | sed -e 's/^X-R//'` - if test "$enable_rpath" != no; then - dnl Potentially add DIR to rpathdirs. - dnl The rpathdirs will be appended to $LIBNAME at the end. - haveit= - for x in $rpathdirs; do - if test "X$x" = "X$dir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - rpathdirs="$rpathdirs $dir" - fi - dnl Potentially add DIR to ltrpathdirs. - dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. - haveit= - for x in $ltrpathdirs; do - if test "X$x" = "X$dir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - ltrpathdirs="$ltrpathdirs $dir" - fi - fi - ;; - -l*) - dnl Handle this in the next round. - names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` - ;; - *.la) - dnl Handle this in the next round. Throw away the .la's - dnl directory; it is already contained in a preceding -L - dnl option. - names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` - ;; - *) - dnl Most likely an immediate library name. - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" - LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" - ;; - esac - done - fi - else - dnl Didn't find the library; assume it is in the system directories - dnl known to the linker and runtime loader. (All the system - dnl directories known to the linker should also be known to the - dnl runtime loader, otherwise the system is severely misconfigured.) - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" - LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" - fi - fi - fi - done - done - if test "X$rpathdirs" != "X"; then - if test -n "$acl_hardcode_libdir_separator"; then - dnl Weird platform: only the last -rpath option counts, the user must - dnl pass all path elements in one option. We can arrange that for a - dnl single library, but not when more than one $LIBNAMEs are used. - alldirs= - for found_dir in $rpathdirs; do - alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" - done - dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. - acl_save_libdir="$libdir" - libdir="$alldirs" - eval flag=\"$acl_hardcode_libdir_flag_spec\" - libdir="$acl_save_libdir" - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" - else - dnl The -rpath options are cumulative. - for found_dir in $rpathdirs; do - acl_save_libdir="$libdir" - libdir="$found_dir" - eval flag=\"$acl_hardcode_libdir_flag_spec\" - libdir="$acl_save_libdir" - LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" - done - fi - fi - if test "X$ltrpathdirs" != "X"; then - dnl When using libtool, the option that works for both libraries and - dnl executables is -R. The -R options are cumulative. - for found_dir in $ltrpathdirs; do - LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" - done - fi - popdef([PACKLIBS]) - popdef([PACKUP]) - popdef([PACK]) - popdef([NAME]) -]) - -dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, -dnl unless already present in VAR. -dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes -dnl contains two or three consecutive elements that belong together. -AC_DEFUN([AC_LIB_APPENDTOVAR], -[ - for element in [$2]; do - haveit= - for x in $[$1]; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X$element"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - [$1]="${[$1]}${[$1]:+ }$element" - fi - done -]) - -dnl For those cases where a variable contains several -L and -l options -dnl referring to unknown libraries and directories, this macro determines the -dnl necessary additional linker options for the runtime path. -dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) -dnl sets LDADDVAR to linker options needed together with LIBSVALUE. -dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, -dnl otherwise linking without libtool is assumed. -AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], -[ - AC_REQUIRE([AC_LIB_RPATH]) - AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) - $1= - if test "$enable_rpath" != no; then - if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then - dnl Use an explicit option to hardcode directories into the resulting - dnl binary. - rpathdirs= - next= - for opt in $2; do - if test -n "$next"; then - dir="$next" - dnl No need to hardcode the standard /usr/lib. - if test "X$dir" != "X/usr/$acl_libdirstem" \ - && test "X$dir" != "X/usr/$acl_libdirstem2" \ - && test "X$dir" != "X/usr/$acl_libdirstem3"; then - rpathdirs="$rpathdirs $dir" - fi - next= - else - case $opt in - -L) next=yes ;; - -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` - dnl No need to hardcode the standard /usr/lib. - if test "X$dir" != "X/usr/$acl_libdirstem" \ - && test "X$dir" != "X/usr/$acl_libdirstem2" \ - && test "X$dir" != "X/usr/$acl_libdirstem3"; then - rpathdirs="$rpathdirs $dir" - fi - next= ;; - *) next= ;; - esac - fi - done - if test "X$rpathdirs" != "X"; then - if test -n ""$3""; then - dnl libtool is used for linking. Use -R options. - for dir in $rpathdirs; do - $1="${$1}${$1:+ }-R$dir" - done - else - dnl The linker is used for linking directly. - if test -n "$acl_hardcode_libdir_separator"; then - dnl Weird platform: only the last -rpath option counts, the user - dnl must pass all path elements in one option. - alldirs= - for dir in $rpathdirs; do - alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" - done - acl_save_libdir="$libdir" - libdir="$alldirs" - eval flag=\"$acl_hardcode_libdir_flag_spec\" - libdir="$acl_save_libdir" - $1="$flag" - else - dnl The -rpath options are cumulative. - for dir in $rpathdirs; do - acl_save_libdir="$libdir" - libdir="$dir" - eval flag=\"$acl_hardcode_libdir_flag_spec\" - libdir="$acl_save_libdir" - $1="${$1}${$1:+ }$flag" - done - fi - fi - fi - fi - fi - AC_SUBST([$1]) -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-prefix.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-prefix.m4 deleted file mode 100644 index c8a0b464c..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/lib-prefix.m4 +++ /dev/null @@ -1,320 +0,0 @@ -# lib-prefix.m4 serial 17 -dnl Copyright (C) 2001-2005, 2008-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. - -dnl From Bruno Haible. - -dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed -dnl to access previously installed libraries. The basic assumption is that -dnl a user will want packages to use other packages he previously installed -dnl with the same --prefix option. -dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate -dnl libraries, but is otherwise very convenient. -AC_DEFUN([AC_LIB_PREFIX], -[ - AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_HOST]) - AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) - AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) - dnl By default, look in $includedir and $libdir. - use_additional=yes - AC_LIB_WITH_FINAL_PREFIX([ - eval additional_includedir=\"$includedir\" - eval additional_libdir=\"$libdir\" - ]) - AC_ARG_WITH([lib-prefix], -[[ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib - --without-lib-prefix don't search for libraries in includedir and libdir]], -[ - if test "X$withval" = "Xno"; then - use_additional=no - else - if test "X$withval" = "X"; then - AC_LIB_WITH_FINAL_PREFIX([ - eval additional_includedir=\"$includedir\" - eval additional_libdir=\"$libdir\" - ]) - else - additional_includedir="$withval/include" - additional_libdir="$withval/$acl_libdirstem" - fi - fi -]) - if test $use_additional = yes; then - dnl Potentially add $additional_includedir to $CPPFLAGS. - dnl But don't add it - dnl 1. if it's the standard /usr/include, - dnl 2. if it's already present in $CPPFLAGS, - dnl 3. if it's /usr/local/include and we are using GCC on Linux, - dnl 4. if it doesn't exist as a directory. - if test "X$additional_includedir" != "X/usr/include"; then - haveit= - for x in $CPPFLAGS; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-I$additional_includedir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - if test "X$additional_includedir" = "X/usr/local/include"; then - if test -n "$GCC"; then - case $host_os in - linux* | gnu* | k*bsd*-gnu) haveit=yes;; - esac - fi - fi - if test -z "$haveit"; then - if test -d "$additional_includedir"; then - dnl Really add $additional_includedir to $CPPFLAGS. - CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" - fi - fi - fi - fi - dnl Potentially add $additional_libdir to $LDFLAGS. - dnl But don't add it - dnl 1. if it's the standard /usr/lib, - dnl 2. if it's already present in $LDFLAGS, - dnl 3. if it's /usr/local/lib and we are using GCC on Linux, - dnl 4. if it doesn't exist as a directory. - if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then - haveit= - for x in $LDFLAGS; do - AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) - if test "X$x" = "X-L$additional_libdir"; then - haveit=yes - break - fi - done - if test -z "$haveit"; then - if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then - if test -n "$GCC"; then - case $host_os in - linux*) haveit=yes;; - esac - fi - fi - if test -z "$haveit"; then - if test -d "$additional_libdir"; then - dnl Really add $additional_libdir to $LDFLAGS. - LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" - fi - fi - fi - fi - fi -]) - -dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, -dnl acl_final_exec_prefix, containing the values to which $prefix and -dnl $exec_prefix will expand at the end of the configure script. -AC_DEFUN([AC_LIB_PREPARE_PREFIX], -[ - dnl Unfortunately, prefix and exec_prefix get only finally determined - dnl at the end of configure. - if test "X$prefix" = "XNONE"; then - acl_final_prefix="$ac_default_prefix" - else - acl_final_prefix="$prefix" - fi - if test "X$exec_prefix" = "XNONE"; then - acl_final_exec_prefix='${prefix}' - else - acl_final_exec_prefix="$exec_prefix" - fi - acl_save_prefix="$prefix" - prefix="$acl_final_prefix" - eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" - prefix="$acl_save_prefix" -]) - -dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the -dnl variables prefix and exec_prefix bound to the values they will have -dnl at the end of the configure script. -AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], -[ - acl_save_prefix="$prefix" - prefix="$acl_final_prefix" - acl_save_exec_prefix="$exec_prefix" - exec_prefix="$acl_final_exec_prefix" - $1 - exec_prefix="$acl_save_exec_prefix" - prefix="$acl_save_prefix" -]) - -dnl AC_LIB_PREPARE_MULTILIB creates -dnl - a function acl_is_expected_elfclass, that tests whether standard input -dn; has a 32-bit or 64-bit ELF header, depending on the host CPU ABI, -dnl - 3 variables acl_libdirstem, acl_libdirstem2, acl_libdirstem3, containing -dnl the basename of the libdir to try in turn, either "lib" or "lib64" or -dnl "lib/64" or "lib32" or "lib/sparcv9" or "lib/amd64" or similar. -AC_DEFUN([AC_LIB_PREPARE_MULTILIB], -[ - dnl There is no formal standard regarding lib, lib32, and lib64. - dnl On most glibc systems, the current practice is that on a system supporting - dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under - dnl $prefix/lib64 and 32-bit libraries go under $prefix/lib. However, on - dnl Arch Linux based distributions, it's the opposite: 32-bit libraries go - dnl under $prefix/lib32 and 64-bit libraries go under $prefix/lib. - dnl We determine the compiler's default mode by looking at the compiler's - dnl library search path. If at least one of its elements ends in /lib64 or - dnl points to a directory whose absolute pathname ends in /lib64, we use that - dnl for 64-bit ABIs. Similarly for 32-bit ABIs. Otherwise we use the default, - dnl namely "lib". - dnl On Solaris systems, the current practice is that on a system supporting - dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under - dnl $prefix/lib/64 (which is a symlink to either $prefix/lib/sparcv9 or - dnl $prefix/lib/amd64) and 32-bit libraries go under $prefix/lib. - AC_REQUIRE([AC_CANONICAL_HOST]) - AC_REQUIRE([gl_HOST_CPU_C_ABI_32BIT]) - - AC_CACHE_CHECK([for ELF binary format], [gl_cv_elf], - [AC_EGREP_CPP([Extensible Linking Format], - [#ifdef __ELF__ - Extensible Linking Format - #endif - ], - [gl_cv_elf=yes], - [gl_cv_elf=no]) - ]) - if test $gl_cv_elf; then - # Extract the ELF class of a file (5th byte) in decimal. - # Cf. https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header - if od -A x < /dev/null >/dev/null 2>/dev/null; then - # Use POSIX od. - func_elfclass () - { - od -A n -t d1 -j 4 -N 1 - } - else - # Use BSD hexdump. - func_elfclass () - { - dd bs=1 count=1 skip=4 2>/dev/null | hexdump -e '1/1 "%3d "' - echo - } - fi -changequote(,)dnl - case $HOST_CPU_C_ABI_32BIT in - yes) - # 32-bit ABI. - acl_is_expected_elfclass () - { - test "`func_elfclass | sed -e 's/[ ]//g'`" = 1 - } - ;; - no) - # 64-bit ABI. - acl_is_expected_elfclass () - { - test "`func_elfclass | sed -e 's/[ ]//g'`" = 2 - } - ;; - *) - # Unknown. - acl_is_expected_elfclass () - { - : - } - ;; - esac -changequote([,])dnl - else - acl_is_expected_elfclass () - { - : - } - fi - - dnl Allow the user to override the result by setting acl_cv_libdirstems. - AC_CACHE_CHECK([for the common suffixes of directories in the library search path], - [acl_cv_libdirstems], - [dnl Try 'lib' first, because that's the default for libdir in GNU, see - dnl . - acl_libdirstem=lib - acl_libdirstem2= - acl_libdirstem3= - case "$host_os" in - solaris*) - dnl See Solaris 10 Software Developer Collection > Solaris 64-bit Developer's Guide > The Development Environment - dnl . - dnl "Portable Makefiles should refer to any library directories using the 64 symbolic link." - dnl But we want to recognize the sparcv9 or amd64 subdirectory also if the - dnl symlink is missing, so we set acl_libdirstem2 too. - if test $HOST_CPU_C_ABI_32BIT = no; then - acl_libdirstem2=lib/64 - case "$host_cpu" in - sparc*) acl_libdirstem3=lib/sparcv9 ;; - i*86 | x86_64) acl_libdirstem3=lib/amd64 ;; - esac - fi - ;; - *) - dnl If $CC generates code for a 32-bit ABI, the libraries are - dnl surely under $prefix/lib or $prefix/lib32, not $prefix/lib64. - dnl Similarly, if $CC generates code for a 64-bit ABI, the libraries - dnl are surely under $prefix/lib or $prefix/lib64, not $prefix/lib32. - dnl Find the compiler's search path. However, non-system compilers - dnl sometimes have odd library search paths. But we can't simply invoke - dnl '/usr/bin/gcc -print-search-dirs' because that would not take into - dnl account the -m32/-m31 or -m64 options from the $CC or $CFLAGS. - searchpath=`(LC_ALL=C $CC $CPPFLAGS $CFLAGS -print-search-dirs) 2>/dev/null \ - | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` - if test $HOST_CPU_C_ABI_32BIT != no; then - # 32-bit or unknown ABI. - if test -d /usr/lib32; then - acl_libdirstem2=lib32 - fi - fi - if test $HOST_CPU_C_ABI_32BIT != yes; then - # 64-bit or unknown ABI. - if test -d /usr/lib64; then - acl_libdirstem3=lib64 - fi - fi - if test -n "$searchpath"; then - acl_save_IFS="${IFS= }"; IFS=":" - for searchdir in $searchpath; do - if test -d "$searchdir"; then - case "$searchdir" in - */lib32/ | */lib32 ) acl_libdirstem2=lib32 ;; - */lib64/ | */lib64 ) acl_libdirstem3=lib64 ;; - */../ | */.. ) - # Better ignore directories of this form. They are misleading. - ;; - *) searchdir=`cd "$searchdir" && pwd` - case "$searchdir" in - */lib32 ) acl_libdirstem2=lib32 ;; - */lib64 ) acl_libdirstem3=lib64 ;; - esac ;; - esac - fi - done - IFS="$acl_save_IFS" - if test $HOST_CPU_C_ABI_32BIT = yes; then - # 32-bit ABI. - acl_libdirstem3= - fi - if test $HOST_CPU_C_ABI_32BIT = no; then - # 64-bit ABI. - acl_libdirstem2= - fi - fi - ;; - esac - test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" - test -n "$acl_libdirstem3" || acl_libdirstem3="$acl_libdirstem" - acl_cv_libdirstems="$acl_libdirstem,$acl_libdirstem2,$acl_libdirstem3" - ]) - dnl Decompose acl_cv_libdirstems into acl_libdirstem, acl_libdirstem2, and - dnl acl_libdirstem3. -changequote(,)dnl - acl_libdirstem=`echo "$acl_cv_libdirstems" | sed -e 's/,.*//'` - acl_libdirstem2=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,//' -e 's/,.*//'` - acl_libdirstem3=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,[^,]*,//' -e 's/,.*//'` -changequote([,])dnl -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/nls.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/nls.m4 deleted file mode 100644 index 5a506fc4b..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/nls.m4 +++ /dev/null @@ -1,32 +0,0 @@ -# nls.m4 serial 6 (gettext-0.20.2) -dnl Copyright (C) 1995-2003, 2005-2006, 2008-2014, 2016, 2019-2020 Free -dnl Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. -dnl -dnl This file can be used in projects which are not available under -dnl the GNU General Public License or the GNU Lesser General Public -dnl License but which still want to provide support for the GNU gettext -dnl functionality. -dnl Please note that the actual code of the GNU gettext library is covered -dnl by the GNU Lesser General Public License, and the rest of the GNU -dnl gettext package is covered by the GNU General Public License. -dnl They are *not* in the public domain. - -dnl Authors: -dnl Ulrich Drepper , 1995-2000. -dnl Bruno Haible , 2000-2003. - -AC_PREREQ([2.50]) - -AC_DEFUN([AM_NLS], -[ - AC_MSG_CHECKING([whether NLS is requested]) - dnl Default is enabled NLS - AC_ARG_ENABLE([nls], - [ --disable-nls do not use Native Language Support], - USE_NLS=$enableval, USE_NLS=yes) - AC_MSG_RESULT([$USE_NLS]) - AC_SUBST([USE_NLS]) -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/po.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/po.m4 deleted file mode 100644 index 3778fd7aa..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/aclocal/po.m4 +++ /dev/null @@ -1,450 +0,0 @@ -# po.m4 serial 31 (gettext-0.20.2) -dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. -dnl This file is free software; the Free Software Foundation -dnl gives unlimited permission to copy and/or distribute it, -dnl with or without modifications, as long as this notice is preserved. -dnl -dnl This file can be used in projects which are not available under -dnl the GNU General Public License or the GNU Lesser General Public -dnl License but which still want to provide support for the GNU gettext -dnl functionality. -dnl Please note that the actual code of the GNU gettext library is covered -dnl by the GNU Lesser General Public License, and the rest of the GNU -dnl gettext package is covered by the GNU General Public License. -dnl They are *not* in the public domain. - -dnl Authors: -dnl Ulrich Drepper , 1995-2000. -dnl Bruno Haible , 2000-2003. - -AC_PREREQ([2.60]) - -dnl Checks for all prerequisites of the po subdirectory. -AC_DEFUN([AM_PO_SUBDIRS], -[ - AC_REQUIRE([AC_PROG_MAKE_SET])dnl - AC_REQUIRE([AC_PROG_INSTALL])dnl - AC_REQUIRE([AC_PROG_MKDIR_P])dnl - AC_REQUIRE([AC_PROG_SED])dnl - AC_REQUIRE([AM_NLS])dnl - - dnl Release version of the gettext macros. This is used to ensure that - dnl the gettext macros and po/Makefile.in.in are in sync. - AC_SUBST([GETTEXT_MACRO_VERSION], [0.20]) - - dnl Perform the following tests also if --disable-nls has been given, - dnl because they are needed for "make dist" to work. - - dnl Search for GNU msgfmt in the PATH. - dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. - dnl The second test excludes FreeBSD msgfmt. - AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, - [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && - (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], - :) - AC_PATH_PROG([GMSGFMT], [gmsgfmt], [$MSGFMT]) - - dnl Test whether it is GNU msgfmt >= 0.15. -changequote(,)dnl - case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in - '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; - *) GMSGFMT_015=$GMSGFMT ;; - esac -changequote([,])dnl - AC_SUBST([GMSGFMT_015]) - - dnl Search for GNU xgettext 0.12 or newer in the PATH. - dnl The first test excludes Solaris xgettext and early GNU xgettext versions. - dnl The second test excludes FreeBSD xgettext. - AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, - [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && - (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], - :) - dnl Remove leftover from FreeBSD xgettext call. - rm -f messages.po - - dnl Test whether it is GNU xgettext >= 0.15. -changequote(,)dnl - case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in - '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; - *) XGETTEXT_015=$XGETTEXT ;; - esac -changequote([,])dnl - AC_SUBST([XGETTEXT_015]) - - dnl Search for GNU msgmerge 0.11 or newer in the PATH. - AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, - [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :) - - dnl Test whether it is GNU msgmerge >= 0.20. - if LC_ALL=C $MSGMERGE --help | grep ' --for-msgfmt ' >/dev/null; then - MSGMERGE_FOR_MSGFMT_OPTION='--for-msgfmt' - else - dnl Test whether it is GNU msgmerge >= 0.12. - if LC_ALL=C $MSGMERGE --help | grep ' --no-fuzzy-matching ' >/dev/null; then - MSGMERGE_FOR_MSGFMT_OPTION='--no-fuzzy-matching --no-location --quiet' - else - dnl With these old versions, $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) is - dnl slow. But this is not a big problem, as such old gettext versions are - dnl hardly in use any more. - MSGMERGE_FOR_MSGFMT_OPTION='--no-location --quiet' - fi - fi - AC_SUBST([MSGMERGE_FOR_MSGFMT_OPTION]) - - dnl Support for AM_XGETTEXT_OPTION. - test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= - AC_SUBST([XGETTEXT_EXTRA_OPTIONS]) - - AC_CONFIG_COMMANDS([po-directories], [[ - for ac_file in $CONFIG_FILES; do - # Support "outfile[:infile[:infile...]]" - case "$ac_file" in - *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; - esac - # PO directories have a Makefile.in generated from Makefile.in.in. - case "$ac_file" in */Makefile.in) - # Adjust a relative srcdir. - ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` - ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` - ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` - # In autoconf-2.13 it is called $ac_given_srcdir. - # In autoconf-2.50 it is called $srcdir. - test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" - case "$ac_given_srcdir" in - .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; - /*) top_srcdir="$ac_given_srcdir" ;; - *) top_srcdir="$ac_dots$ac_given_srcdir" ;; - esac - # Treat a directory as a PO directory if and only if it has a - # POTFILES.in file. This allows packages to have multiple PO - # directories under different names or in different locations. - if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then - rm -f "$ac_dir/POTFILES" - test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" - gt_tab=`printf '\t'` - cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ${gt_tab}]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" - POMAKEFILEDEPS="POTFILES.in" - # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend - # on $ac_dir but don't depend on user-specified configuration - # parameters. - if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then - # The LINGUAS file contains the set of available languages. - if test -n "$OBSOLETE_ALL_LINGUAS"; then - test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" - fi - ALL_LINGUAS=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` - POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" - else - # The set of available languages was given in configure.in. - ALL_LINGUAS=$OBSOLETE_ALL_LINGUAS - fi - # Compute POFILES - # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) - # Compute UPDATEPOFILES - # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) - # Compute DUMMYPOFILES - # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) - # Compute GMOFILES - # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) - case "$ac_given_srcdir" in - .) srcdirpre= ;; - *) srcdirpre='$(srcdir)/' ;; - esac - POFILES= - UPDATEPOFILES= - DUMMYPOFILES= - GMOFILES= - for lang in $ALL_LINGUAS; do - POFILES="$POFILES $srcdirpre$lang.po" - UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" - DUMMYPOFILES="$DUMMYPOFILES $lang.nop" - GMOFILES="$GMOFILES $srcdirpre$lang.gmo" - done - # CATALOGS depends on both $ac_dir and the user's LINGUAS - # environment variable. - INST_LINGUAS= - if test -n "$ALL_LINGUAS"; then - for presentlang in $ALL_LINGUAS; do - useit=no - if test "%UNSET%" != "$LINGUAS"; then - desiredlanguages="$LINGUAS" - else - desiredlanguages="$ALL_LINGUAS" - fi - for desiredlang in $desiredlanguages; do - # Use the presentlang catalog if desiredlang is - # a. equal to presentlang, or - # b. a variant of presentlang (because in this case, - # presentlang can be used as a fallback for messages - # which are not translated in the desiredlang catalog). - case "$desiredlang" in - "$presentlang"*) useit=yes;; - esac - done - if test $useit = yes; then - INST_LINGUAS="$INST_LINGUAS $presentlang" - fi - done - fi - CATALOGS= - if test -n "$INST_LINGUAS"; then - for lang in $INST_LINGUAS; do - CATALOGS="$CATALOGS $lang.gmo" - done - fi - test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" - sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" - for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do - if test -f "$f"; then - case "$f" in - *.orig | *.bak | *~) ;; - *) cat "$f" >> "$ac_dir/Makefile" ;; - esac - fi - done - fi - ;; - esac - done]], - [# Capture the value of obsolete ALL_LINGUAS because we need it to compute - # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. - OBSOLETE_ALL_LINGUAS="$ALL_LINGUAS" - # Capture the value of LINGUAS because we need it to compute CATALOGS. - LINGUAS="${LINGUAS-%UNSET%}" - ]) -]) - -dnl Postprocesses a Makefile in a directory containing PO files. -AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE], -[ - # When this code is run, in config.status, two variables have already been - # set: - # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in, - # - LINGUAS is the value of the environment variable LINGUAS at configure - # time. - -changequote(,)dnl - # Adjust a relative srcdir. - ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` - ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` - ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` - # In autoconf-2.13 it is called $ac_given_srcdir. - # In autoconf-2.50 it is called $srcdir. - test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" - case "$ac_given_srcdir" in - .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; - /*) top_srcdir="$ac_given_srcdir" ;; - *) top_srcdir="$ac_dots$ac_given_srcdir" ;; - esac - - # Find a way to echo strings without interpreting backslash. - if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then - gt_echo='echo' - else - if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then - gt_echo='printf %s\n' - else - echo_func () { - cat < "$ac_file.tmp" - tab=`printf '\t'` - if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then - # Add dependencies that cannot be formulated as a simple suffix rule. - for lang in $ALL_LINGUAS; do - frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'` - cat >> "$ac_file.tmp" < /dev/null; then - # Add dependencies that cannot be formulated as a simple suffix rule. - for lang in $ALL_LINGUAS; do - frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'` - cat >> "$ac_file.tmp" <> "$ac_file.tmp" <, 1996. - -AC_PREREQ([2.50]) - -# Search path for a program which passes the given test. - -dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, -dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) -AC_DEFUN([AM_PATH_PROG_WITH_TEST], -[ -# Prepare PATH_SEPARATOR. -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which - # contains only /bin. Note that ksh looks also at the FPATH variable, - # so we have to set that as well for the test. - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ - && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ - || PATH_SEPARATOR=';' - } -fi - -# Find out how to test for executable files. Don't use a zero-byte file, -# as systems may use methods other than mode bits to determine executability. -cat >conf$$.file <<_ASEOF -#! /bin/sh -exit 0 -_ASEOF -chmod +x conf$$.file -if test -x conf$$.file >/dev/null 2>&1; then - ac_executable_p="test -x" -else - ac_executable_p="test -f" -fi -rm -f conf$$.file - -# Extract the first word of "$2", so it can be a program name with args. -set dummy $2; ac_word=[$]2 -AC_MSG_CHECKING([for $ac_word]) -AC_CACHE_VAL([ac_cv_path_$1], -[case "[$]$1" in - [[\\/]]* | ?:[[\\/]]*) - ac_cv_path_$1="[$]$1" # Let the user override the test with a path. - ;; - *) - ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR - for ac_dir in ifelse([$5], , $PATH, [$5]); do - IFS="$ac_save_IFS" - test -z "$ac_dir" && ac_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then - echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD - if [$3]; then - ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" - break 2 - fi - fi - done - done - IFS="$ac_save_IFS" -dnl If no 4th arg is given, leave the cache variable unset, -dnl so AC_PATH_PROGS will keep looking. -ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" -])dnl - ;; -esac])dnl -$1="$ac_cv_path_$1" -if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then - AC_MSG_RESULT([$][$1]) -else - AC_MSG_RESULT([no]) -fi -AC_SUBST([$1])dnl -]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/config.rpath b/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/config.rpath deleted file mode 100755 index 24be79cfb..000000000 --- a/poky/meta/recipes-core/gettext/gettext-minimal-0.20.2/config.rpath +++ /dev/null @@ -1,684 +0,0 @@ -#! /bin/sh -# Output a system dependent set of variables, describing how to set the -# run time search path of shared libraries in an executable. -# -# Copyright 1996-2020 Free Software Foundation, Inc. -# Taken from GNU libtool, 2001 -# Originally by Gordon Matzigkeit , 1996 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. -# -# The first argument passed to this file is the canonical host specification, -# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM -# or -# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM -# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld -# should be set by the caller. -# -# The set of defined variables is at the end of this script. - -# Known limitations: -# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer -# than 256 bytes, otherwise the compiler driver will dump core. The only -# known workaround is to choose shorter directory names for the build -# directory and/or the installation directory. - -# All known linkers require a '.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a -shrext=.so - -host="$1" -host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` -host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` -host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` - -# Code taken from libtool.m4's _LT_CC_BASENAME. - -for cc_temp in $CC""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac -done -cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'` - -# Code taken from libtool.m4's _LT_COMPILER_PIC. - -wl= -if test "$GCC" = yes; then - wl='-Wl,' -else - case "$host_os" in - aix*) - wl='-Wl,' - ;; - mingw* | cygwin* | pw32* | os2* | cegcc*) - ;; - hpux9* | hpux10* | hpux11*) - wl='-Wl,' - ;; - irix5* | irix6* | nonstopux*) - wl='-Wl,' - ;; - linux* | k*bsd*-gnu | kopensolaris*-gnu) - case $cc_basename in - ecc*) - wl='-Wl,' - ;; - icc* | ifort*) - wl='-Wl,' - ;; - lf95*) - wl='-Wl,' - ;; - nagfor*) - wl='-Wl,-Wl,,' - ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - wl='-Wl,' - ;; - ccc*) - wl='-Wl,' - ;; - xl* | bgxl* | bgf* | mpixl*) - wl='-Wl,' - ;; - como) - wl='-lopt=' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ F* | *Sun*Fortran*) - wl= - ;; - *Sun\ C*) - wl='-Wl,' - ;; - esac - ;; - esac - ;; - newsos6) - ;; - *nto* | *qnx*) - ;; - osf3* | osf4* | osf5*) - wl='-Wl,' - ;; - rdos*) - ;; - solaris*) - case $cc_basename in - f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - wl='-Qoption ld ' - ;; - *) - wl='-Wl,' - ;; - esac - ;; - sunos4*) - wl='-Qoption ld ' - ;; - sysv4 | sysv4.2uw2* | sysv4.3*) - wl='-Wl,' - ;; - sysv4*MP*) - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - wl='-Wl,' - ;; - unicos*) - wl='-Wl,' - ;; - uts4*) - ;; - esac -fi - -# Code taken from libtool.m4's _LT_LINKER_SHLIBS. - -hardcode_libdir_flag_spec= -hardcode_libdir_separator= -hardcode_direct=no -hardcode_minus_L=no - -case "$host_os" in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test "$GCC" != yes; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd*) - with_gnu_ld=no - ;; -esac - -ld_shlibs=yes -if test "$with_gnu_ld" = yes; then - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - # Unlike libtool, we use -rpath here, not --rpath, since the documented - # option of GNU ld is called -rpath, not --rpath. - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - case "$host_os" in - aix[3-9]*) - # On AIX/PPC, the GNU linker is very broken - if test "$host_cpu" != ia64; then - ld_shlibs=no - fi - ;; - amigaos*) - case "$host_cpu" in - powerpc) - ;; - m68k) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - beos*) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - : - else - ld_shlibs=no - fi - ;; - cygwin* | mingw* | pw32* | cegcc*) - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec='-L$libdir' - if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then - : - else - ld_shlibs=no - fi - ;; - haiku*) - ;; - interix[3-9]*) - hardcode_direct=no - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - ;; - gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - : - else - ld_shlibs=no - fi - ;; - netbsd*) - ;; - solaris*) - if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then - ld_shlibs=no - elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - : - else - ld_shlibs=no - fi - ;; - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs=no - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' - else - ld_shlibs=no - fi - ;; - esac - ;; - sunos4*) - hardcode_direct=yes - ;; - *) - if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then - : - else - ld_shlibs=no - fi - ;; - esac - if test "$ld_shlibs" = no; then - hardcode_libdir_flag_spec= - fi -else - case "$host_os" in - aix3*) - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L=yes - if test "$GCC" = yes; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct=unsupported - fi - ;; - aix[4-9]*) - if test "$host_cpu" = ia64; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - else - aix_use_runtimelinking=no - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # need to do runtime linking. - case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) - for ld_flag in $LDFLAGS; do - if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then - aix_use_runtimelinking=yes - break - fi - done - ;; - esac - fi - hardcode_direct=yes - hardcode_libdir_separator=':' - if test "$GCC" = yes; then - case $host_os in aix4.[012]|aix4.[012].*) - collect2name=`${CC} -print-prog-name=collect2` - if test -f "$collect2name" && \ - strings "$collect2name" | grep resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct=unsupported - hardcode_minus_L=yes - hardcode_libdir_flag_spec='-L$libdir' - hardcode_libdir_separator= - fi - ;; - esac - fi - # Begin _LT_AC_SYS_LIBPATH_AIX. - echo 'int main () { return 0; }' > conftest.c - ${CC} ${LDFLAGS} conftest.c -o conftest - aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } -}'` - if test -z "$aix_libpath"; then - aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } -}'` - fi - if test -z "$aix_libpath"; then - aix_libpath="/usr/lib:/lib" - fi - rm -f conftest.c conftest - # End _LT_AC_SYS_LIBPATH_AIX. - if test "$aix_use_runtimelinking" = yes; then - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - else - if test "$host_cpu" = ia64; then - hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' - else - hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" - fi - fi - ;; - amigaos*) - case "$host_cpu" in - powerpc) - ;; - m68k) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - bsdi[45]*) - ;; - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - hardcode_libdir_flag_spec=' ' - libext=lib - ;; - darwin* | rhapsody*) - hardcode_direct=no - if { case $cc_basename in ifort*) true;; *) test "$GCC" = yes;; esac; }; then - : - else - ld_shlibs=no - fi - ;; - dgux*) - hardcode_libdir_flag_spec='-L$libdir' - ;; - freebsd2.[01]*) - hardcode_direct=yes - hardcode_minus_L=yes - ;; - freebsd* | dragonfly*) - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - ;; - hpux9*) - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; - hpux10*) - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - fi - ;; - hpux11*) - if test "$with_gnu_ld" = no; then - hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' - hardcode_libdir_separator=: - case $host_cpu in - hppa*64*|ia64*) - hardcode_direct=no - ;; - *) - hardcode_direct=yes - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; - esac - fi - ;; - irix5* | irix6* | nonstopux*) - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - ;; - netbsd*) - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - ;; - newsos6) - hardcode_direct=yes - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - ;; - *nto* | *qnx*) - ;; - openbsd*) - if test -f /usr/libexec/ld.so; then - hardcode_direct=yes - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - else - case "$host_os" in - openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) - hardcode_libdir_flag_spec='-R$libdir' - ;; - *) - hardcode_libdir_flag_spec='${wl}-rpath,$libdir' - ;; - esac - fi - else - ld_shlibs=no - fi - ;; - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - osf3*) - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - hardcode_libdir_separator=: - ;; - osf4* | osf5*) - if test "$GCC" = yes; then - hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' - else - # Both cc and cxx compiler support -rpath directly - hardcode_libdir_flag_spec='-rpath $libdir' - fi - hardcode_libdir_separator=: - ;; - solaris*) - hardcode_libdir_flag_spec='-R$libdir' - ;; - sunos4*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_direct=yes - hardcode_minus_L=yes - ;; - sysv4) - case $host_vendor in - sni) - hardcode_direct=yes # is this really true??? - ;; - siemens) - hardcode_direct=no - ;; - motorola) - hardcode_direct=no #Motorola manual says yes, but my tests say they lie - ;; - esac - ;; - sysv4.3*) - ;; - sysv4*MP*) - if test -d /usr/nec; then - ld_shlibs=yes - fi - ;; - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - ;; - sysv5* | sco3.2v5* | sco5v6*) - hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' - hardcode_libdir_separator=':' - ;; - uts4*) - hardcode_libdir_flag_spec='-L$libdir' - ;; - *) - ld_shlibs=no - ;; - esac -fi - -# Check dynamic linker characteristics -# Code taken from libtool.m4's _LT_SYS_DYNAMIC_LINKER. -# Unlike libtool.m4, here we don't care about _all_ names of the library, but -# only about the one the linker finds when passed -lNAME. This is the last -# element of library_names_spec in libtool.m4, or possibly two of them if the -# linker has special search rules. -library_names_spec= # the last element of library_names_spec in libtool.m4 -libname_spec='lib$name' -case "$host_os" in - aix3*) - library_names_spec='$libname.a' - ;; - aix[4-9]*) - library_names_spec='$libname$shrext' - ;; - amigaos*) - case "$host_cpu" in - powerpc*) - library_names_spec='$libname$shrext' ;; - m68k) - library_names_spec='$libname.a' ;; - esac - ;; - beos*) - library_names_spec='$libname$shrext' - ;; - bsdi[45]*) - library_names_spec='$libname$shrext' - ;; - cygwin* | mingw* | pw32* | cegcc*) - shrext=.dll - library_names_spec='$libname.dll.a $libname.lib' - ;; - darwin* | rhapsody*) - shrext=.dylib - library_names_spec='$libname$shrext' - ;; - dgux*) - library_names_spec='$libname$shrext' - ;; - freebsd[23].*) - library_names_spec='$libname$shrext$versuffix' - ;; - freebsd* | dragonfly*) - library_names_spec='$libname$shrext' - ;; - gnu*) - library_names_spec='$libname$shrext' - ;; - haiku*) - library_names_spec='$libname$shrext' - ;; - hpux9* | hpux10* | hpux11*) - case $host_cpu in - ia64*) - shrext=.so - ;; - hppa*64*) - shrext=.sl - ;; - *) - shrext=.sl - ;; - esac - library_names_spec='$libname$shrext' - ;; - interix[3-9]*) - library_names_spec='$libname$shrext' - ;; - irix5* | irix6* | nonstopux*) - library_names_spec='$libname$shrext' - case "$host_os" in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; - *) libsuff= shlibsuff= ;; - esac - ;; - esac - ;; - linux*oldld* | linux*aout* | linux*coff*) - ;; - linux* | k*bsd*-gnu | kopensolaris*-gnu) - library_names_spec='$libname$shrext' - ;; - knetbsd*-gnu) - library_names_spec='$libname$shrext' - ;; - netbsd*) - library_names_spec='$libname$shrext' - ;; - newsos6) - library_names_spec='$libname$shrext' - ;; - *nto* | *qnx*) - library_names_spec='$libname$shrext' - ;; - openbsd*) - library_names_spec='$libname$shrext$versuffix' - ;; - os2*) - libname_spec='$name' - shrext=.dll - library_names_spec='$libname.a' - ;; - osf3* | osf4* | osf5*) - library_names_spec='$libname$shrext' - ;; - rdos*) - ;; - solaris*) - library_names_spec='$libname$shrext' - ;; - sunos4*) - library_names_spec='$libname$shrext$versuffix' - ;; - sysv4 | sysv4.3*) - library_names_spec='$libname$shrext' - ;; - sysv4*MP*) - library_names_spec='$libname$shrext' - ;; - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - library_names_spec='$libname$shrext' - ;; - tpf*) - library_names_spec='$libname$shrext' - ;; - uts4*) - library_names_spec='$libname$shrext' - ;; -esac - -sed_quote_subst='s/\(["`$\\]\)/\\\1/g' -escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` -shlibext=`echo "$shrext" | sed -e 's,^\.,,'` -escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` -escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` -escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` - -LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' < +# Copyright (C) 2000-2020 Free Software Foundation, Inc. +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. This file is offered as-is, +# without any warranty. +# +# Origin: gettext-0.21 +GETTEXT_MACRO_VERSION = 0.20 + +PACKAGE = @PACKAGE@ +VERSION = @VERSION@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ + +SED = @SED@ +SHELL = /bin/sh +@SET_MAKE@ + +srcdir = @srcdir@ +top_srcdir = @top_srcdir@ +VPATH = @srcdir@ + +prefix = @prefix@ +exec_prefix = @exec_prefix@ +datarootdir = @datarootdir@ +datadir = @datadir@ +localedir = @localedir@ +gettextsrcdir = $(datadir)/gettext/po + +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ + +# We use $(mkdir_p). +# In automake <= 1.9.x, $(mkdir_p) is defined either as "mkdir -p --" or as +# "$(mkinstalldirs)" or as "$(install_sh) -d". For these automake versions, +# @install_sh@ does not start with $(SHELL), so we add it. +# In automake >= 1.10, @mkdir_p@ is derived from ${MKDIR_P}, which is defined +# either as "/path/to/mkdir -p" or ".../install-sh -c -d". For these automake +# versions, $(mkinstalldirs) and $(install_sh) are unused. +mkinstalldirs = $(SHELL) @install_sh@ -d +install_sh = $(SHELL) @install_sh@ +MKDIR_P = @MKDIR_P@ +mkdir_p = @mkdir_p@ + +# When building gettext-tools, we prefer to use the built programs +# rather than installed programs. However, we can't do that when we +# are cross compiling. +CROSS_COMPILING = @CROSS_COMPILING@ + +GMSGFMT_ = @GMSGFMT@ +GMSGFMT_no = @GMSGFMT@ +GMSGFMT_yes = @GMSGFMT_015@ +GMSGFMT = $(GMSGFMT_$(USE_MSGCTXT)) +XGETTEXT_ = @XGETTEXT@ +XGETTEXT_no = @XGETTEXT@ +XGETTEXT_yes = @XGETTEXT_015@ +XGETTEXT = $(XGETTEXT_$(USE_MSGCTXT)) +MSGMERGE = @MSGMERGE@ +MSGMERGE_UPDATE = @MSGMERGE@ --update +MSGMERGE_FOR_MSGFMT_OPTION = @MSGMERGE_FOR_MSGFMT_OPTION@ +MSGINIT = msginit +MSGCONV = msgconv +MSGFILTER = msgfilter + +POFILES = @POFILES@ +GMOFILES = @GMOFILES@ +UPDATEPOFILES = @UPDATEPOFILES@ +DUMMYPOFILES = @DUMMYPOFILES@ +DISTFILES.common = Makefile.in.in remove-potcdate.sin \ +$(DISTFILES.common.extra1) $(DISTFILES.common.extra2) $(DISTFILES.common.extra3) +DISTFILES = $(DISTFILES.common) Makevars POTFILES.in \ +$(POFILES) $(GMOFILES) \ +$(DISTFILES.extra1) $(DISTFILES.extra2) $(DISTFILES.extra3) + +POTFILES = \ + +CATALOGS = @CATALOGS@ + +POFILESDEPS_ = $(srcdir)/$(DOMAIN).pot +POFILESDEPS_yes = $(POFILESDEPS_) +POFILESDEPS_no = +POFILESDEPS = $(POFILESDEPS_$(PO_DEPENDS_ON_POT)) + +DISTFILESDEPS_ = update-po +DISTFILESDEPS_yes = $(DISTFILESDEPS_) +DISTFILESDEPS_no = +DISTFILESDEPS = $(DISTFILESDEPS_$(DIST_DEPENDS_ON_UPDATE_PO)) + +# Makevars gets inserted here. (Don't remove this line!) + +all: all-@USE_NLS@ + + +.SUFFIXES: +.SUFFIXES: .po .gmo .sed .sin .nop .po-create .po-update + +# The .pot file, stamp-po, .po files, and .gmo files appear in release tarballs. +# The GNU Coding Standards say in +# : +# "GNU distributions usually contain some files which are not source files +# ... . Since these files normally appear in the source directory, they +# should always appear in the source directory, not in the build directory. +# So Makefile rules to update them should put the updated files in the +# source directory." +# Therefore we put these files in the source directory, not the build directory. + +# During .po -> .gmo conversion, take into account the most recent changes to +# the .pot file. This eliminates the need to update the .po files when the +# .pot file has changed, which would be troublesome if the .po files are put +# under version control. +$(GMOFILES): $(srcdir)/$(DOMAIN).pot +.po.gmo: + @lang=`echo $* | sed -e 's,.*/,,'`; \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}rm -f $${lang}.gmo && $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) -o $${lang}.1po $${lang}.po $(DOMAIN).pot && $(GMSGFMT) -c --statistics --verbose -o $${lang}.gmo $${lang}.1po && rm -f $${lang}.1po"; \ + cd $(srcdir) && \ + rm -f $${lang}.gmo && \ + $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) -o $${lang}.1po $${lang}.po $(DOMAIN).pot && \ + $(GMSGFMT) -c --statistics --verbose -o t-$${lang}.gmo $${lang}.1po && \ + mv t-$${lang}.gmo $${lang}.gmo && \ + rm -f $${lang}.1po + +.sin.sed: + sed -e '/^#/d' $< > t-$@ + mv t-$@ $@ + + +all-yes: $(srcdir)/stamp-po +all-no: + +# Ensure that the gettext macros and this Makefile.in.in are in sync. +CHECK_MACRO_VERSION = \ + test "$(GETTEXT_MACRO_VERSION)" = "@GETTEXT_MACRO_VERSION@" \ + || { echo "*** error: gettext infrastructure mismatch: using a Makefile.in.in from gettext version $(GETTEXT_MACRO_VERSION) but the autoconf macros are from gettext version @GETTEXT_MACRO_VERSION@" 1>&2; \ + exit 1; \ + } + +# $(srcdir)/$(DOMAIN).pot is only created when needed. When xgettext finds no +# internationalized messages, no $(srcdir)/$(DOMAIN).pot is created (because +# we don't want to bother translators with empty POT files). We assume that +# LINGUAS is empty in this case, i.e. $(POFILES) and $(GMOFILES) are empty. +# In this case, $(srcdir)/stamp-po is a nop (i.e. a phony target). + +# $(srcdir)/stamp-po is a timestamp denoting the last time at which the CATALOGS +# have been loosely updated. Its purpose is that when a developer or translator +# checks out the package from a version control system, and the $(DOMAIN).pot +# file is not under version control, "make" will update the $(DOMAIN).pot and +# the $(CATALOGS), but subsequent invocations of "make" will do nothing. This +# timestamp would not be necessary if updating the $(CATALOGS) would always +# touch them; however, the rule for $(POFILES) has been designed to not touch +# files that don't need to be changed. +$(srcdir)/stamp-po: $(srcdir)/$(DOMAIN).pot + @$(CHECK_MACRO_VERSION) + test ! -f $(srcdir)/$(DOMAIN).pot || \ + test -z "$(GMOFILES)" || $(MAKE) $(GMOFILES) + @test ! -f $(srcdir)/$(DOMAIN).pot || { \ + echo "touch $(srcdir)/stamp-po" && \ + echo timestamp > $(srcdir)/stamp-poT && \ + mv $(srcdir)/stamp-poT $(srcdir)/stamp-po; \ + } + +# Note: Target 'all' must not depend on target '$(DOMAIN).pot-update', +# otherwise packages like GCC can not be built if only parts of the source +# have been downloaded. + +# This target rebuilds $(DOMAIN).pot; it is an expensive operation. +# Note that $(DOMAIN).pot is not touched if it doesn't need to be changed. +# The determination of whether the package xyz is a GNU one is based on the +# heuristic whether some file in the top level directory mentions "GNU xyz". +# If GNU 'find' is available, we avoid grepping through monster files. +$(DOMAIN).pot-update: $(POTFILES) $(srcdir)/POTFILES.in remove-potcdate.sed + package_gnu="$(PACKAGE_GNU)"; \ + test -n "$$package_gnu" || { \ + if { if (LC_ALL=C find --version) 2>/dev/null | grep GNU >/dev/null; then \ + LC_ALL=C find -L $(top_srcdir) -maxdepth 1 -type f -size -10000000c -exec grep -i 'GNU @PACKAGE@' /dev/null '{}' ';' 2>/dev/null; \ + else \ + LC_ALL=C grep -i 'GNU @PACKAGE@' $(top_srcdir)/* 2>/dev/null; \ + fi; \ + } | grep -v 'libtool:' >/dev/null; then \ + package_gnu=yes; \ + else \ + package_gnu=no; \ + fi; \ + }; \ + if test "$$package_gnu" = "yes"; then \ + package_prefix='GNU '; \ + else \ + package_prefix=''; \ + fi; \ + if test -n '$(MSGID_BUGS_ADDRESS)' || test '$(PACKAGE_BUGREPORT)' = '@'PACKAGE_BUGREPORT'@'; then \ + msgid_bugs_address='$(MSGID_BUGS_ADDRESS)'; \ + else \ + msgid_bugs_address='$(PACKAGE_BUGREPORT)'; \ + fi; \ + case `$(XGETTEXT) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ + '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].* | 0.16 | 0.16.[0-1]*) \ + $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ + --add-comments=TRANSLATORS: \ + --files-from=$(srcdir)/POTFILES.in \ + --copyright-holder='$(COPYRIGHT_HOLDER)' \ + --msgid-bugs-address="$$msgid_bugs_address" \ + $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ + ;; \ + *) \ + $(XGETTEXT) --default-domain=$(DOMAIN) --directory=$(top_srcdir) \ + --add-comments=TRANSLATORS: \ + --files-from=$(srcdir)/POTFILES.in \ + --copyright-holder='$(COPYRIGHT_HOLDER)' \ + --package-name="$${package_prefix}@PACKAGE@" \ + --package-version='@VERSION@' \ + --msgid-bugs-address="$$msgid_bugs_address" \ + $(XGETTEXT_OPTIONS) @XGETTEXT_EXTRA_OPTIONS@ \ + ;; \ + esac + test ! -f $(DOMAIN).po || { \ + if test -f $(srcdir)/$(DOMAIN).pot-header; then \ + sed -e '1,/^#$$/d' < $(DOMAIN).po > $(DOMAIN).1po && \ + cat $(srcdir)/$(DOMAIN).pot-header $(DOMAIN).1po > $(DOMAIN).po && \ + rm -f $(DOMAIN).1po \ + || exit 1; \ + fi; \ + if test -f $(srcdir)/$(DOMAIN).pot; then \ + sed -f remove-potcdate.sed < $(srcdir)/$(DOMAIN).pot > $(DOMAIN).1po && \ + sed -f remove-potcdate.sed < $(DOMAIN).po > $(DOMAIN).2po && \ + if cmp $(DOMAIN).1po $(DOMAIN).2po >/dev/null 2>&1; then \ + rm -f $(DOMAIN).1po $(DOMAIN).2po $(DOMAIN).po; \ + else \ + rm -f $(DOMAIN).1po $(DOMAIN).2po $(srcdir)/$(DOMAIN).pot && \ + mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ + fi; \ + else \ + mv $(DOMAIN).po $(srcdir)/$(DOMAIN).pot; \ + fi; \ + } + +# This rule has no dependencies: we don't need to update $(DOMAIN).pot at +# every "make" invocation, only create it when it is missing. +# Only "make $(DOMAIN).pot-update" or "make dist" will force an update. +$(srcdir)/$(DOMAIN).pot: + $(MAKE) $(DOMAIN).pot-update + +# This target rebuilds a PO file if $(DOMAIN).pot has changed. +# Note that a PO file is not touched if it doesn't need to be changed. +$(POFILES): $(POFILESDEPS) + @test -f $(srcdir)/$(DOMAIN).pot || $(MAKE) $(srcdir)/$(DOMAIN).pot + @lang=`echo $@ | sed -e 's,.*/,,' -e 's/\.po$$//'`; \ + if test -f "$(srcdir)/$${lang}.po"; then \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}$(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --lang=$${lang} --previous $${lang}.po $(DOMAIN).pot"; \ + cd $(srcdir) \ + && { case `$(MSGMERGE_UPDATE) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ + '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].*) \ + $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) $${lang}.po $(DOMAIN).pot;; \ + 0.1[6-7] | 0.1[6-7].*) \ + $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --previous $${lang}.po $(DOMAIN).pot;; \ + *) \ + $(MSGMERGE_UPDATE) $(MSGMERGE_OPTIONS) --lang=$${lang} --previous $${lang}.po $(DOMAIN).pot;; \ + esac; \ + }; \ + else \ + $(MAKE) $${lang}.po-create; \ + fi + + +install: install-exec install-data +install-exec: +install-data: install-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ + for file in $(DISTFILES.common) Makevars.template; do \ + $(INSTALL_DATA) $(srcdir)/$$file \ + $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + for file in Makevars; do \ + rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + else \ + : ; \ + fi +install-data-no: all +install-data-yes: all + @catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + dir=$(localedir)/$$lang/LC_MESSAGES; \ + $(mkdir_p) $(DESTDIR)$$dir; \ + if test -r $$cat; then realcat=$$cat; else realcat=$(srcdir)/$$cat; fi; \ + $(INSTALL_DATA) $$realcat $(DESTDIR)$$dir/$(DOMAIN).mo; \ + echo "installing $$realcat as $(DESTDIR)$$dir/$(DOMAIN).mo"; \ + for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ + if test -n "$$lc"; then \ + if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ + link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ + mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ + for file in *; do \ + if test -f $$file; then \ + ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ + fi; \ + done); \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + else \ + if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ + :; \ + else \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + fi; \ + fi; \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + ln -s ../LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ + ln $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo 2>/dev/null || \ + cp -p $(DESTDIR)$(localedir)/$$lang/LC_MESSAGES/$(DOMAIN).mo $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + echo "installing $$realcat link as $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo"; \ + fi; \ + done; \ + done + +install-strip: install + +installdirs: installdirs-exec installdirs-data +installdirs-exec: +installdirs-data: installdirs-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + $(mkdir_p) $(DESTDIR)$(gettextsrcdir); \ + else \ + : ; \ + fi +installdirs-data-no: +installdirs-data-yes: + @catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + dir=$(localedir)/$$lang/LC_MESSAGES; \ + $(mkdir_p) $(DESTDIR)$$dir; \ + for lc in '' $(EXTRA_LOCALE_CATEGORIES); do \ + if test -n "$$lc"; then \ + if (cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc 2>/dev/null) | grep ' -> ' >/dev/null; then \ + link=`cd $(DESTDIR)$(localedir)/$$lang && LC_ALL=C ls -l -d $$lc | sed -e 's/^.* -> //'`; \ + mv $(DESTDIR)$(localedir)/$$lang/$$lc $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + (cd $(DESTDIR)$(localedir)/$$lang/$$lc.old && \ + for file in *; do \ + if test -f $$file; then \ + ln -s ../$$link/$$file $(DESTDIR)$(localedir)/$$lang/$$lc/$$file; \ + fi; \ + done); \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc.old; \ + else \ + if test -d $(DESTDIR)$(localedir)/$$lang/$$lc; then \ + :; \ + else \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc; \ + mkdir $(DESTDIR)$(localedir)/$$lang/$$lc; \ + fi; \ + fi; \ + fi; \ + done; \ + done + +# Define this as empty until I found a useful application. +installcheck: + +uninstall: uninstall-exec uninstall-data +uninstall-exec: +uninstall-data: uninstall-data-@USE_NLS@ + if test "$(PACKAGE)" = "gettext-tools"; then \ + for file in $(DISTFILES.common) Makevars.template; do \ + rm -f $(DESTDIR)$(gettextsrcdir)/$$file; \ + done; \ + else \ + : ; \ + fi +uninstall-data-no: +uninstall-data-yes: + catalogs='$(CATALOGS)'; \ + for cat in $$catalogs; do \ + cat=`basename $$cat`; \ + lang=`echo $$cat | sed -e 's/\.gmo$$//'`; \ + for lc in LC_MESSAGES $(EXTRA_LOCALE_CATEGORIES); do \ + rm -f $(DESTDIR)$(localedir)/$$lang/$$lc/$(DOMAIN).mo; \ + done; \ + done + +check: all + +info dvi ps pdf html tags TAGS ctags CTAGS ID: + +install-dvi install-ps install-pdf install-html: + +mostlyclean: + rm -f remove-potcdate.sed + rm -f $(srcdir)/stamp-poT + rm -f core core.* $(DOMAIN).po $(DOMAIN).1po $(DOMAIN).2po *.new.po + rm -fr *.o + +clean: mostlyclean + +distclean: clean + rm -f Makefile Makefile.in POTFILES + +maintainer-clean: distclean + @echo "This command is intended for maintainers to use;" + @echo "it deletes files that may require special tools to rebuild." + rm -f $(srcdir)/$(DOMAIN).pot $(srcdir)/stamp-po $(GMOFILES) + +distdir = $(top_builddir)/$(PACKAGE)-$(VERSION)/$(subdir) +dist distdir: + test -z "$(DISTFILESDEPS)" || $(MAKE) $(DISTFILESDEPS) + @$(MAKE) dist2 +# This is a separate target because 'update-po' must be executed before. +dist2: $(srcdir)/stamp-po $(DISTFILES) + @dists="$(DISTFILES)"; \ + if test "$(PACKAGE)" = "gettext-tools"; then \ + dists="$$dists Makevars.template"; \ + fi; \ + if test -f $(srcdir)/$(DOMAIN).pot; then \ + dists="$$dists $(DOMAIN).pot stamp-po"; \ + else \ + case $(XGETTEXT) in \ + :) echo "Warning: Creating a tarball without '$(DOMAIN).pot', because a suitable 'xgettext' program was not found in PATH." 1>&2;; \ + *) echo "Warning: Creating a tarball without '$(DOMAIN).pot', because 'xgettext' found no strings to extract. Check the contents of the POTFILES.in file and the XGETTEXT_OPTIONS in the Makevars file." 1>&2;; \ + esac; \ + fi; \ + if test -f $(srcdir)/ChangeLog; then \ + dists="$$dists ChangeLog"; \ + fi; \ + for i in 0 1 2 3 4 5 6 7 8 9; do \ + if test -f $(srcdir)/ChangeLog.$$i; then \ + dists="$$dists ChangeLog.$$i"; \ + fi; \ + done; \ + if test -f $(srcdir)/LINGUAS; then dists="$$dists LINGUAS"; fi; \ + for file in $$dists; do \ + if test -f $$file; then \ + cp -p $$file $(distdir) || exit 1; \ + else \ + cp -p $(srcdir)/$$file $(distdir) || exit 1; \ + fi; \ + done + +update-po: Makefile + $(MAKE) $(DOMAIN).pot-update + test -z "$(UPDATEPOFILES)" || $(MAKE) $(UPDATEPOFILES) + $(MAKE) update-gmo + +# General rule for creating PO files. + +.nop.po-create: + @lang=`echo $@ | sed -e 's/\.po-create$$//'`; \ + echo "File $$lang.po does not exist. If you are a translator, you can create it through 'msginit'." 1>&2; \ + exit 1 + +# General rule for updating PO files. + +.nop.po-update: + @lang=`echo $@ | sed -e 's/\.po-update$$//'`; \ + if test "$(PACKAGE)" = "gettext-tools" && test "$(CROSS_COMPILING)" != "yes"; then PATH=`pwd`/../src:$$PATH; fi; \ + tmpdir=`pwd`; \ + echo "$$lang:"; \ + test "$(srcdir)" = . && cdcmd="" || cdcmd="cd $(srcdir) && "; \ + echo "$${cdcmd}$(MSGMERGE) $(MSGMERGE_OPTIONS) --lang=$$lang --previous $$lang.po $(DOMAIN).pot -o $$lang.new.po"; \ + cd $(srcdir); \ + if { case `$(MSGMERGE) --version | sed 1q | sed -e 's,^[^0-9]*,,'` in \ + '' | 0.[0-9] | 0.[0-9].* | 0.1[0-5] | 0.1[0-5].*) \ + $(MSGMERGE) $(MSGMERGE_OPTIONS) -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ + 0.1[6-7] | 0.1[6-7].*) \ + $(MSGMERGE) $(MSGMERGE_OPTIONS) --previous -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ + *) \ + $(MSGMERGE) $(MSGMERGE_OPTIONS) --lang=$$lang --previous -o $$tmpdir/$$lang.new.po $$lang.po $(DOMAIN).pot;; \ + esac; \ + }; then \ + if cmp $$lang.po $$tmpdir/$$lang.new.po >/dev/null 2>&1; then \ + rm -f $$tmpdir/$$lang.new.po; \ + else \ + if mv -f $$tmpdir/$$lang.new.po $$lang.po; then \ + :; \ + else \ + echo "msgmerge for $$lang.po failed: cannot move $$tmpdir/$$lang.new.po to $$lang.po" 1>&2; \ + exit 1; \ + fi; \ + fi; \ + else \ + echo "msgmerge for $$lang.po failed!" 1>&2; \ + rm -f $$tmpdir/$$lang.new.po; \ + fi + +$(DUMMYPOFILES): + +update-gmo: Makefile $(GMOFILES) + @: + +# Recreate Makefile by invoking config.status. Explicitly invoke the shell, +# because execution permission bits may not work on the current file system. +# Use @SHELL@, which is the shell determined by autoconf for the use by its +# scripts, not $(SHELL) which is hardwired to /bin/sh and may be deficient. +Makefile: Makefile.in.in Makevars $(top_builddir)/config.status @POMAKEFILEDEPS@ + cd $(top_builddir) \ + && @SHELL@ ./config.status $(subdir)/$@.in po-directories + +force: + +# Tell versions [3.59,3.63) of GNU make not to export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/gettext.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/gettext.m4 new file mode 100644 index 000000000..4f25a27d9 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/gettext.m4 @@ -0,0 +1,386 @@ +# gettext.m4 serial 71 (gettext-0.20.2) +dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can be used in projects which are not available under +dnl the GNU General Public License or the GNU Lesser General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Lesser General Public License, and the rest of the GNU +dnl gettext package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2006, 2008-2010. + +dnl Macro to add for using GNU gettext. + +dnl Usage: AM_GNU_GETTEXT([INTLSYMBOL], [NEEDSYMBOL], [INTLDIR]). +dnl INTLSYMBOL must be one of 'external', 'use-libtool'. +dnl INTLSYMBOL should be 'external' for packages other than GNU gettext, and +dnl 'use-libtool' for the packages 'gettext-runtime' and 'gettext-tools'. +dnl If INTLSYMBOL is 'use-libtool', then a libtool library +dnl $(top_builddir)/intl/libintl.la will be created (shared and/or static, +dnl depending on --{enable,disable}-{shared,static} and on the presence of +dnl AM-DISABLE-SHARED). +dnl If NEEDSYMBOL is specified and is 'need-ngettext', then GNU gettext +dnl implementations (in libc or libintl) without the ngettext() function +dnl will be ignored. If NEEDSYMBOL is specified and is +dnl 'need-formatstring-macros', then GNU gettext implementations that don't +dnl support the ISO C 99 formatstring macros will be ignored. +dnl INTLDIR is used to find the intl libraries. If empty, +dnl the value '$(top_builddir)/intl/' is used. +dnl +dnl The result of the configuration is one of three cases: +dnl 1) GNU gettext, as included in the intl subdirectory, will be compiled +dnl and used. +dnl Catalog format: GNU --> install in $(datadir) +dnl Catalog extension: .mo after installation, .gmo in source tree +dnl 2) GNU gettext has been found in the system's C library. +dnl Catalog format: GNU --> install in $(datadir) +dnl Catalog extension: .mo after installation, .gmo in source tree +dnl 3) No internationalization, always use English msgid. +dnl Catalog format: none +dnl Catalog extension: none +dnl If INTLSYMBOL is 'external', only cases 2 and 3 can occur. +dnl The use of .gmo is historical (it was needed to avoid overwriting the +dnl GNU format catalogs when building on a platform with an X/Open gettext), +dnl but we keep it in order not to force irrelevant filename changes on the +dnl maintainers. +dnl +AC_DEFUN([AM_GNU_GETTEXT], +[ + dnl Argument checking. + ifelse([$1], [], , [ifelse([$1], [external], , [ifelse([$1], [use-libtool], , + [errprint([ERROR: invalid first argument to AM_GNU_GETTEXT +])])])]) + ifelse(ifelse([$1], [], [old])[]ifelse([$1], [no-libtool], [old]), [old], + [errprint([ERROR: Use of AM_GNU_GETTEXT without [external] argument is no longer supported. +])]) + ifelse([$2], [], , [ifelse([$2], [need-ngettext], , [ifelse([$2], [need-formatstring-macros], , + [errprint([ERROR: invalid second argument to AM_GNU_GETTEXT +])])])]) + define([gt_included_intl], + ifelse([$1], [external], [no], [yes])) + gt_NEEDS_INIT + AM_GNU_GETTEXT_NEED([$2]) + + AC_REQUIRE([AM_PO_SUBDIRS])dnl + ifelse(gt_included_intl, yes, [ + AC_REQUIRE([AM_INTL_SUBDIR])dnl + ]) + + dnl Prerequisites of AC_LIB_LINKFLAGS_BODY. + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + AC_REQUIRE([AC_LIB_RPATH]) + + dnl Sometimes libintl requires libiconv, so first search for libiconv. + dnl Ideally we would do this search only after the + dnl if test "$USE_NLS" = "yes"; then + dnl if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then + dnl tests. But if configure.in invokes AM_ICONV after AM_GNU_GETTEXT + dnl the configure script would need to contain the same shell code + dnl again, outside any 'if'. There are two solutions: + dnl - Invoke AM_ICONV_LINKFLAGS_BODY here, outside any 'if'. + dnl - Control the expansions in more detail using AC_PROVIDE_IFELSE. + dnl Since AC_PROVIDE_IFELSE is not documented, we avoid it. + ifelse(gt_included_intl, yes, , [ + AC_REQUIRE([AM_ICONV_LINKFLAGS_BODY]) + ]) + + dnl Sometimes, on Mac OS X, libintl requires linking with CoreFoundation. + gt_INTL_MACOSX + + dnl Set USE_NLS. + AC_REQUIRE([AM_NLS]) + + ifelse(gt_included_intl, yes, [ + BUILD_INCLUDED_LIBINTL=no + USE_INCLUDED_LIBINTL=no + ]) + LIBINTL= + LTLIBINTL= + POSUB= + + dnl Add a version number to the cache macros. + case " $gt_needs " in + *" need-formatstring-macros "*) gt_api_version=3 ;; + *" need-ngettext "*) gt_api_version=2 ;; + *) gt_api_version=1 ;; + esac + gt_func_gnugettext_libc="gt_cv_func_gnugettext${gt_api_version}_libc" + gt_func_gnugettext_libintl="gt_cv_func_gnugettext${gt_api_version}_libintl" + + dnl If we use NLS figure out what method + if test "$USE_NLS" = "yes"; then + gt_use_preinstalled_gnugettext=no + ifelse(gt_included_intl, yes, [ + AC_MSG_CHECKING([whether included gettext is requested]) + AC_ARG_WITH([included-gettext], + [ --with-included-gettext use the GNU gettext library included here], + nls_cv_force_use_gnu_gettext=$withval, + nls_cv_force_use_gnu_gettext=no) + AC_MSG_RESULT([$nls_cv_force_use_gnu_gettext]) + + nls_cv_use_gnu_gettext="$nls_cv_force_use_gnu_gettext" + if test "$nls_cv_force_use_gnu_gettext" != "yes"; then + ]) + dnl User does not insist on using GNU NLS library. Figure out what + dnl to use. If GNU gettext is available we use this. Else we have + dnl to fall back to GNU NLS library. + + if test $gt_api_version -ge 3; then + gt_revision_test_code=' +#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +#define __GNU_GETTEXT_SUPPORTED_REVISION(major) ((major) == 0 ? 0 : -1) +#endif +changequote(,)dnl +typedef int array [2 * (__GNU_GETTEXT_SUPPORTED_REVISION(0) >= 1) - 1]; +changequote([,])dnl +' + else + gt_revision_test_code= + fi + if test $gt_api_version -ge 2; then + gt_expression_test_code=' + * ngettext ("", "", 0)' + else + gt_expression_test_code= + fi + + AC_CACHE_CHECK([for GNU gettext in libc], [$gt_func_gnugettext_libc], + [AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +extern int _nl_msg_cat_cntr; +extern int *_nl_domain_bindings; +#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_domain_bindings) +#else +#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 +#endif +$gt_revision_test_code + ]], + [[ +bindtextdomain ("", ""); +return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION + ]])], + [eval "$gt_func_gnugettext_libc=yes"], + [eval "$gt_func_gnugettext_libc=no"])]) + + if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" != "yes"; }; then + dnl Sometimes libintl requires libiconv, so first search for libiconv. + ifelse(gt_included_intl, yes, , [ + AM_ICONV_LINK + ]) + dnl Search for libintl and define LIBINTL, LTLIBINTL and INCINTL + dnl accordingly. Don't use AC_LIB_LINKFLAGS_BODY([intl],[iconv]) + dnl because that would add "-liconv" to LIBINTL and LTLIBINTL + dnl even if libiconv doesn't exist. + AC_LIB_LINKFLAGS_BODY([intl]) + AC_CACHE_CHECK([for GNU gettext in libintl], + [$gt_func_gnugettext_libintl], + [gt_save_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $INCINTL" + gt_save_LIBS="$LIBS" + LIBS="$LIBS $LIBINTL" + dnl Now see whether libintl exists and does not depend on libiconv. + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) +#else +#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 +#endif +$gt_revision_test_code + ]], + [[ +bindtextdomain ("", ""); +return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION + ]])], + [eval "$gt_func_gnugettext_libintl=yes"], + [eval "$gt_func_gnugettext_libintl=no"]) + dnl Now see whether libintl exists and depends on libiconv. + if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" != yes; } && test -n "$LIBICONV"; then + LIBS="$LIBS $LIBICONV" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#ifndef __GNU_GETTEXT_SUPPORTED_REVISION +extern int _nl_msg_cat_cntr; +extern +#ifdef __cplusplus +"C" +#endif +const char *_nl_expand_alias (const char *); +#define __GNU_GETTEXT_SYMBOL_EXPRESSION (_nl_msg_cat_cntr + *_nl_expand_alias ("")) +#else +#define __GNU_GETTEXT_SYMBOL_EXPRESSION 0 +#endif +$gt_revision_test_code + ]], + [[ +bindtextdomain ("", ""); +return * gettext ("")$gt_expression_test_code + __GNU_GETTEXT_SYMBOL_EXPRESSION + ]])], + [LIBINTL="$LIBINTL $LIBICONV" + LTLIBINTL="$LTLIBINTL $LTLIBICONV" + eval "$gt_func_gnugettext_libintl=yes" + ]) + fi + CPPFLAGS="$gt_save_CPPFLAGS" + LIBS="$gt_save_LIBS"]) + fi + + dnl If an already present or preinstalled GNU gettext() is found, + dnl use it. But if this macro is used in GNU gettext, and GNU + dnl gettext is already preinstalled in libintl, we update this + dnl libintl. (Cf. the install rule in intl/Makefile.in.) + if { eval "gt_val=\$$gt_func_gnugettext_libc"; test "$gt_val" = "yes"; } \ + || { { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; } \ + && test "$PACKAGE" != gettext-runtime \ + && test "$PACKAGE" != gettext-tools; }; then + gt_use_preinstalled_gnugettext=yes + else + dnl Reset the values set by searching for libintl. + LIBINTL= + LTLIBINTL= + INCINTL= + fi + + ifelse(gt_included_intl, yes, [ + if test "$gt_use_preinstalled_gnugettext" != "yes"; then + dnl GNU gettext is not found in the C library. + dnl Fall back on included GNU gettext library. + nls_cv_use_gnu_gettext=yes + fi + fi + + if test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Mark actions used to generate GNU NLS library. + BUILD_INCLUDED_LIBINTL=yes + USE_INCLUDED_LIBINTL=yes + LIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LIBICONV $LIBTHREAD" + LTLIBINTL="ifelse([$3],[],\${top_builddir}/intl,[$3])/libintl.la $LTLIBICONV $LTLIBTHREAD" + LIBS=`echo " $LIBS " | sed -e 's/ -lintl / /' -e 's/^ //' -e 's/ $//'` + fi + + CATOBJEXT= + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Mark actions to use GNU gettext tools. + CATOBJEXT=.gmo + fi + ]) + + if test -n "$INTL_MACOSX_LIBS"; then + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + dnl Some extra flags are needed during linking. + LIBINTL="$LIBINTL $INTL_MACOSX_LIBS" + LTLIBINTL="$LTLIBINTL $INTL_MACOSX_LIBS" + fi + fi + + if test "$gt_use_preinstalled_gnugettext" = "yes" \ + || test "$nls_cv_use_gnu_gettext" = "yes"; then + AC_DEFINE([ENABLE_NLS], [1], + [Define to 1 if translation of program messages to the user's native language + is requested.]) + else + USE_NLS=no + fi + fi + + AC_MSG_CHECKING([whether to use NLS]) + AC_MSG_RESULT([$USE_NLS]) + if test "$USE_NLS" = "yes"; then + AC_MSG_CHECKING([where the gettext function comes from]) + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then + gt_source="external libintl" + else + gt_source="libc" + fi + else + gt_source="included intl directory" + fi + AC_MSG_RESULT([$gt_source]) + fi + + if test "$USE_NLS" = "yes"; then + + if test "$gt_use_preinstalled_gnugettext" = "yes"; then + if { eval "gt_val=\$$gt_func_gnugettext_libintl"; test "$gt_val" = "yes"; }; then + AC_MSG_CHECKING([how to link with libintl]) + AC_MSG_RESULT([$LIBINTL]) + AC_LIB_APPENDTOVAR([CPPFLAGS], [$INCINTL]) + fi + + dnl For backward compatibility. Some packages may be using this. + AC_DEFINE([HAVE_GETTEXT], [1], + [Define if the GNU gettext() function is already present or preinstalled.]) + AC_DEFINE([HAVE_DCGETTEXT], [1], + [Define if the GNU dcgettext() function is already present or preinstalled.]) + fi + + dnl We need to process the po/ directory. + POSUB=po + fi + + ifelse(gt_included_intl, yes, [ + dnl In GNU gettext we have to set BUILD_INCLUDED_LIBINTL to 'yes' + dnl because some of the testsuite requires it. + BUILD_INCLUDED_LIBINTL=yes + + dnl Make all variables we use known to autoconf. + AC_SUBST([BUILD_INCLUDED_LIBINTL]) + AC_SUBST([USE_INCLUDED_LIBINTL]) + AC_SUBST([CATOBJEXT]) + ]) + + dnl For backward compatibility. Some Makefiles may be using this. + INTLLIBS="$LIBINTL" + AC_SUBST([INTLLIBS]) + + dnl Make all documented variables known to autoconf. + AC_SUBST([LIBINTL]) + AC_SUBST([LTLIBINTL]) + AC_SUBST([POSUB]) +]) + + +dnl gt_NEEDS_INIT ensures that the gt_needs variable is initialized. +m4_define([gt_NEEDS_INIT], +[ + m4_divert_text([DEFAULTS], [gt_needs=]) + m4_define([gt_NEEDS_INIT], []) +]) + + +dnl Usage: AM_GNU_GETTEXT_NEED([NEEDSYMBOL]) +AC_DEFUN([AM_GNU_GETTEXT_NEED], +[ + m4_divert_text([INIT_PREPARE], [gt_needs="$gt_needs $1"]) +]) + + +dnl Usage: AM_GNU_GETTEXT_VERSION([gettext-version]) +AC_DEFUN([AM_GNU_GETTEXT_VERSION], []) + + +dnl Usage: AM_GNU_GETTEXT_REQUIRE_VERSION([gettext-version]) +AC_DEFUN([AM_GNU_GETTEXT_REQUIRE_VERSION], []) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/host-cpu-c-abi.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/host-cpu-c-abi.m4 new file mode 100644 index 000000000..6db2aa25a --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/host-cpu-c-abi.m4 @@ -0,0 +1,675 @@ +# host-cpu-c-abi.m4 serial 13 +dnl Copyright (C) 2002-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible and Sam Steingold. + +dnl Sets the HOST_CPU variable to the canonical name of the CPU. +dnl Sets the HOST_CPU_C_ABI variable to the canonical name of the CPU with its +dnl C language ABI (application binary interface). +dnl Also defines __${HOST_CPU}__ and __${HOST_CPU_C_ABI}__ as C macros in +dnl config.h. +dnl +dnl This canonical name can be used to select a particular assembly language +dnl source file that will interoperate with C code on the given host. +dnl +dnl For example: +dnl * 'i386' and 'sparc' are different canonical names, because code for i386 +dnl will not run on SPARC CPUs and vice versa. They have different +dnl instruction sets. +dnl * 'sparc' and 'sparc64' are different canonical names, because code for +dnl 'sparc' and code for 'sparc64' cannot be linked together: 'sparc' code +dnl contains 32-bit instructions, whereas 'sparc64' code contains 64-bit +dnl instructions. A process on a SPARC CPU can be in 32-bit mode or in 64-bit +dnl mode, but not both. +dnl * 'mips' and 'mipsn32' are different canonical names, because they use +dnl different argument passing and return conventions for C functions, and +dnl although the instruction set of 'mips' is a large subset of the +dnl instruction set of 'mipsn32'. +dnl * 'mipsn32' and 'mips64' are different canonical names, because they use +dnl different sizes for the C types like 'int' and 'void *', and although +dnl the instruction sets of 'mipsn32' and 'mips64' are the same. +dnl * The same canonical name is used for different endiannesses. You can +dnl determine the endianness through preprocessor symbols: +dnl - 'arm': test __ARMEL__. +dnl - 'mips', 'mipsn32', 'mips64': test _MIPSEB vs. _MIPSEL. +dnl - 'powerpc64': test _BIG_ENDIAN vs. _LITTLE_ENDIAN. +dnl * The same name 'i386' is used for CPUs of type i386, i486, i586 +dnl (Pentium), AMD K7, Pentium II, Pentium IV, etc., because +dnl - Instructions that do not exist on all of these CPUs (cmpxchg, +dnl MMX, SSE, SSE2, 3DNow! etc.) are not frequently used. If your +dnl assembly language source files use such instructions, you will +dnl need to make the distinction. +dnl - Speed of execution of the common instruction set is reasonable across +dnl the entire family of CPUs. If you have assembly language source files +dnl that are optimized for particular CPU types (like GNU gmp has), you +dnl will need to make the distinction. +dnl See . +AC_DEFUN([gl_HOST_CPU_C_ABI], +[ + AC_REQUIRE([AC_CANONICAL_HOST]) + AC_REQUIRE([gl_C_ASM]) + AC_CACHE_CHECK([host CPU and C ABI], [gl_cv_host_cpu_c_abi], + [case "$host_cpu" in + +changequote(,)dnl + i[34567]86 ) +changequote([,])dnl + gl_cv_host_cpu_c_abi=i386 + ;; + + x86_64 ) + # On x86_64 systems, the C compiler may be generating code in one of + # these ABIs: + # - 64-bit instruction set, 64-bit pointers, 64-bit 'long': x86_64. + # - 64-bit instruction set, 64-bit pointers, 32-bit 'long': x86_64 + # with native Windows (mingw, MSVC). + # - 64-bit instruction set, 32-bit pointers, 32-bit 'long': x86_64-x32. + # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': i386. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if (defined __x86_64__ || defined __amd64__ \ + || defined _M_X64 || defined _M_AMD64) + int ok; + #else + error fail + #endif + ]])], + [AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __ILP32__ || defined _ILP32 + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=x86_64-x32], + [gl_cv_host_cpu_c_abi=x86_64])], + [gl_cv_host_cpu_c_abi=i386]) + ;; + +changequote(,)dnl + alphaev[4-8] | alphaev56 | alphapca5[67] | alphaev6[78] ) +changequote([,])dnl + gl_cv_host_cpu_c_abi=alpha + ;; + + arm* | aarch64 ) + # Assume arm with EABI. + # On arm64 systems, the C compiler may be generating code in one of + # these ABIs: + # - aarch64 instruction set, 64-bit pointers, 64-bit 'long': arm64. + # - aarch64 instruction set, 32-bit pointers, 32-bit 'long': arm64-ilp32. + # - 32-bit instruction set, 32-bit pointers, 32-bit 'long': arm or armhf. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#ifdef __aarch64__ + int ok; + #else + error fail + #endif + ]])], + [AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __ILP32__ || defined _ILP32 + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=arm64-ilp32], + [gl_cv_host_cpu_c_abi=arm64])], + [# Don't distinguish little-endian and big-endian arm, since they + # don't require different machine code for simple operations and + # since the user can distinguish them through the preprocessor + # defines __ARMEL__ vs. __ARMEB__. + # But distinguish arm which passes floating-point arguments and + # return values in integer registers (r0, r1, ...) - this is + # gcc -mfloat-abi=soft or gcc -mfloat-abi=softfp - from arm which + # passes them in float registers (s0, s1, ...) and double registers + # (d0, d1, ...) - this is gcc -mfloat-abi=hard. GCC 4.6 or newer + # sets the preprocessor defines __ARM_PCS (for the first case) and + # __ARM_PCS_VFP (for the second case), but older GCC does not. + echo 'double ddd; void func (double dd) { ddd = dd; }' > conftest.c + # Look for a reference to the register d0 in the .s file. + AC_TRY_COMMAND(${CC-cc} $CFLAGS $CPPFLAGS $gl_c_asm_opt conftest.c) >/dev/null 2>&1 + if LC_ALL=C grep 'd0,' conftest.$gl_asmext >/dev/null; then + gl_cv_host_cpu_c_abi=armhf + else + gl_cv_host_cpu_c_abi=arm + fi + rm -f conftest* + ]) + ;; + + hppa1.0 | hppa1.1 | hppa2.0* | hppa64 ) + # On hppa, the C compiler may be generating 32-bit code or 64-bit + # code. In the latter case, it defines _LP64 and __LP64__. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#ifdef __LP64__ + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=hppa64], + [gl_cv_host_cpu_c_abi=hppa]) + ;; + + ia64* ) + # On ia64 on HP-UX, the C compiler may be generating 64-bit code or + # 32-bit code. In the latter case, it defines _ILP32. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#ifdef _ILP32 + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=ia64-ilp32], + [gl_cv_host_cpu_c_abi=ia64]) + ;; + + mips* ) + # We should also check for (_MIPS_SZPTR == 64), but gcc keeps this + # at 32. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined _MIPS_SZLONG && (_MIPS_SZLONG == 64) + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=mips64], + [# In the n32 ABI, _ABIN32 is defined, _ABIO32 is not defined (but + # may later get defined by ), and _MIPS_SIM == _ABIN32. + # In the 32 ABI, _ABIO32 is defined, _ABIN32 is not defined (but + # may later get defined by ), and _MIPS_SIM == _ABIO32. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if (_MIPS_SIM == _ABIN32) + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=mipsn32], + [gl_cv_host_cpu_c_abi=mips])]) + ;; + + powerpc* ) + # Different ABIs are in use on AIX vs. Mac OS X vs. Linux,*BSD. + # No need to distinguish them here; the caller may distinguish + # them based on the OS. + # On powerpc64 systems, the C compiler may still be generating + # 32-bit code. And on powerpc-ibm-aix systems, the C compiler may + # be generating 64-bit code. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __powerpc64__ || defined _ARCH_PPC64 + int ok; + #else + error fail + #endif + ]])], + [# On powerpc64, there are two ABIs on Linux: The AIX compatible + # one and the ELFv2 one. The latter defines _CALL_ELF=2. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined _CALL_ELF && _CALL_ELF == 2 + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=powerpc64-elfv2], + [gl_cv_host_cpu_c_abi=powerpc64]) + ], + [gl_cv_host_cpu_c_abi=powerpc]) + ;; + + rs6000 ) + gl_cv_host_cpu_c_abi=powerpc + ;; + + riscv32 | riscv64 ) + # There are 2 architectures (with variants): rv32* and rv64*. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if __riscv_xlen == 64 + int ok; + #else + error fail + #endif + ]])], + [cpu=riscv64], + [cpu=riscv32]) + # There are 6 ABIs: ilp32, ilp32f, ilp32d, lp64, lp64f, lp64d. + # Size of 'long' and 'void *': + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __LP64__ + int ok; + #else + error fail + #endif + ]])], + [main_abi=lp64], + [main_abi=ilp32]) + # Float ABIs: + # __riscv_float_abi_double: + # 'float' and 'double' are passed in floating-point registers. + # __riscv_float_abi_single: + # 'float' are passed in floating-point registers. + # __riscv_float_abi_soft: + # No values are passed in floating-point registers. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __riscv_float_abi_double + int ok; + #else + error fail + #endif + ]])], + [float_abi=d], + [AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __riscv_float_abi_single + int ok; + #else + error fail + #endif + ]])], + [float_abi=f], + [float_abi='']) + ]) + gl_cv_host_cpu_c_abi="${cpu}-${main_abi}${float_abi}" + ;; + + s390* ) + # On s390x, the C compiler may be generating 64-bit (= s390x) code + # or 31-bit (= s390) code. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __LP64__ || defined __s390x__ + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=s390x], + [gl_cv_host_cpu_c_abi=s390]) + ;; + + sparc | sparc64 ) + # UltraSPARCs running Linux have `uname -m` = "sparc64", but the + # C compiler still generates 32-bit code. + AC_COMPILE_IFELSE( + [AC_LANG_SOURCE( + [[#if defined __sparcv9 || defined __arch64__ + int ok; + #else + error fail + #endif + ]])], + [gl_cv_host_cpu_c_abi=sparc64], + [gl_cv_host_cpu_c_abi=sparc]) + ;; + + *) + gl_cv_host_cpu_c_abi="$host_cpu" + ;; + esac + ]) + + dnl In most cases, $HOST_CPU and $HOST_CPU_C_ABI are the same. + HOST_CPU=`echo "$gl_cv_host_cpu_c_abi" | sed -e 's/-.*//'` + HOST_CPU_C_ABI="$gl_cv_host_cpu_c_abi" + AC_SUBST([HOST_CPU]) + AC_SUBST([HOST_CPU_C_ABI]) + + # This was + # AC_DEFINE_UNQUOTED([__${HOST_CPU}__]) + # AC_DEFINE_UNQUOTED([__${HOST_CPU_C_ABI}__]) + # earlier, but KAI C++ 3.2d doesn't like this. + sed -e 's/-/_/g' >> confdefs.h < +#include + ]], + [[iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd);]])], + [am_cv_func_iconv=yes]) + if test "$am_cv_func_iconv" != yes; then + am_save_LIBS="$LIBS" + LIBS="$LIBS $LIBICONV" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#include + ]], + [[iconv_t cd = iconv_open("",""); + iconv(cd,NULL,NULL,NULL,NULL); + iconv_close(cd);]])], + [am_cv_lib_iconv=yes] + [am_cv_func_iconv=yes]) + LIBS="$am_save_LIBS" + fi + ]) + if test "$am_cv_func_iconv" = yes; then + AC_CACHE_CHECK([for working iconv], [am_cv_func_iconv_works], [ + dnl This tests against bugs in AIX 5.1, AIX 6.1..7.1, HP-UX 11.11, + dnl Solaris 10. + am_save_LIBS="$LIBS" + if test $am_cv_lib_iconv = yes; then + LIBS="$LIBS $LIBICONV" + fi + am_cv_func_iconv_works=no + for ac_iconv_const in '' 'const'; do + AC_RUN_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#include + +#ifndef ICONV_CONST +# define ICONV_CONST $ac_iconv_const +#endif + ]], + [[int result = 0; + /* Test against AIX 5.1 bug: Failures are not distinguishable from successful + returns. */ + { + iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); + if (cd_utf8_to_88591 != (iconv_t)(-1)) + { + static ICONV_CONST char input[] = "\342\202\254"; /* EURO SIGN */ + char buf[10]; + ICONV_CONST char *inptr = input; + size_t inbytesleft = strlen (input); + char *outptr = buf; + size_t outbytesleft = sizeof (buf); + size_t res = iconv (cd_utf8_to_88591, + &inptr, &inbytesleft, + &outptr, &outbytesleft); + if (res == 0) + result |= 1; + iconv_close (cd_utf8_to_88591); + } + } + /* Test against Solaris 10 bug: Failures are not distinguishable from + successful returns. */ + { + iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); + if (cd_ascii_to_88591 != (iconv_t)(-1)) + { + static ICONV_CONST char input[] = "\263"; + char buf[10]; + ICONV_CONST char *inptr = input; + size_t inbytesleft = strlen (input); + char *outptr = buf; + size_t outbytesleft = sizeof (buf); + size_t res = iconv (cd_ascii_to_88591, + &inptr, &inbytesleft, + &outptr, &outbytesleft); + if (res == 0) + result |= 2; + iconv_close (cd_ascii_to_88591); + } + } + /* Test against AIX 6.1..7.1 bug: Buffer overrun. */ + { + iconv_t cd_88591_to_utf8 = iconv_open ("UTF-8", "ISO-8859-1"); + if (cd_88591_to_utf8 != (iconv_t)(-1)) + { + static ICONV_CONST char input[] = "\304"; + static char buf[2] = { (char)0xDE, (char)0xAD }; + ICONV_CONST char *inptr = input; + size_t inbytesleft = 1; + char *outptr = buf; + size_t outbytesleft = 1; + size_t res = iconv (cd_88591_to_utf8, + &inptr, &inbytesleft, + &outptr, &outbytesleft); + if (res != (size_t)(-1) || outptr - buf > 1 || buf[1] != (char)0xAD) + result |= 4; + iconv_close (cd_88591_to_utf8); + } + } +#if 0 /* This bug could be worked around by the caller. */ + /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ + { + iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); + if (cd_88591_to_utf8 != (iconv_t)(-1)) + { + static ICONV_CONST char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; + char buf[50]; + ICONV_CONST char *inptr = input; + size_t inbytesleft = strlen (input); + char *outptr = buf; + size_t outbytesleft = sizeof (buf); + size_t res = iconv (cd_88591_to_utf8, + &inptr, &inbytesleft, + &outptr, &outbytesleft); + if ((int)res > 0) + result |= 8; + iconv_close (cd_88591_to_utf8); + } + } +#endif + /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is + provided. */ + { + /* Try standardized names. */ + iconv_t cd1 = iconv_open ("UTF-8", "EUC-JP"); + /* Try IRIX, OSF/1 names. */ + iconv_t cd2 = iconv_open ("UTF-8", "eucJP"); + /* Try AIX names. */ + iconv_t cd3 = iconv_open ("UTF-8", "IBM-eucJP"); + /* Try HP-UX names. */ + iconv_t cd4 = iconv_open ("utf8", "eucJP"); + if (cd1 == (iconv_t)(-1) && cd2 == (iconv_t)(-1) + && cd3 == (iconv_t)(-1) && cd4 == (iconv_t)(-1)) + result |= 16; + if (cd1 != (iconv_t)(-1)) + iconv_close (cd1); + if (cd2 != (iconv_t)(-1)) + iconv_close (cd2); + if (cd3 != (iconv_t)(-1)) + iconv_close (cd3); + if (cd4 != (iconv_t)(-1)) + iconv_close (cd4); + } + return result; +]])], + [am_cv_func_iconv_works=yes], , + [case "$host_os" in + aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; + *) am_cv_func_iconv_works="guessing yes" ;; + esac]) + test "$am_cv_func_iconv_works" = no || break + done + LIBS="$am_save_LIBS" + ]) + case "$am_cv_func_iconv_works" in + *no) am_func_iconv=no am_cv_lib_iconv=no ;; + *) am_func_iconv=yes ;; + esac + else + am_func_iconv=no am_cv_lib_iconv=no + fi + if test "$am_func_iconv" = yes; then + AC_DEFINE([HAVE_ICONV], [1], + [Define if you have the iconv() function and it works.]) + fi + if test "$am_cv_lib_iconv" = yes; then + AC_MSG_CHECKING([how to link with libiconv]) + AC_MSG_RESULT([$LIBICONV]) + else + dnl If $LIBICONV didn't lead to a usable library, we don't need $INCICONV + dnl either. + CPPFLAGS="$am_save_CPPFLAGS" + LIBICONV= + LTLIBICONV= + fi + AC_SUBST([LIBICONV]) + AC_SUBST([LTLIBICONV]) +]) + +dnl Define AM_ICONV using AC_DEFUN_ONCE for Autoconf >= 2.64, in order to +dnl avoid warnings like +dnl "warning: AC_REQUIRE: `AM_ICONV' was expanded before it was required". +dnl This is tricky because of the way 'aclocal' is implemented: +dnl - It requires defining an auxiliary macro whose name ends in AC_DEFUN. +dnl Otherwise aclocal's initial scan pass would miss the macro definition. +dnl - It requires a line break inside the AC_DEFUN_ONCE and AC_DEFUN expansions. +dnl Otherwise aclocal would emit many "Use of uninitialized value $1" +dnl warnings. +m4_define([gl_iconv_AC_DEFUN], + m4_version_prereq([2.64], + [[AC_DEFUN_ONCE( + [$1], [$2])]], + [m4_ifdef([gl_00GNULIB], + [[AC_DEFUN_ONCE( + [$1], [$2])]], + [[AC_DEFUN( + [$1], [$2])]])])) +gl_iconv_AC_DEFUN([AM_ICONV], +[ + AM_ICONV_LINK + if test "$am_cv_func_iconv" = yes; then + AC_MSG_CHECKING([for iconv declaration]) + AC_CACHE_VAL([am_cv_proto_iconv], [ + AC_COMPILE_IFELSE( + [AC_LANG_PROGRAM( + [[ +#include +#include +extern +#ifdef __cplusplus +"C" +#endif +#if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) +size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); +#else +size_t iconv(); +#endif + ]], + [[]])], + [am_cv_proto_iconv_arg1=""], + [am_cv_proto_iconv_arg1="const"]) + am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);"]) + am_cv_proto_iconv=`echo "[$]am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` + AC_MSG_RESULT([ + $am_cv_proto_iconv]) + else + dnl When compiling GNU libiconv on a system that does not have iconv yet, + dnl pick the POSIX compliant declaration without 'const'. + am_cv_proto_iconv_arg1="" + fi + AC_DEFINE_UNQUOTED([ICONV_CONST], [$am_cv_proto_iconv_arg1], + [Define as const if the declaration of iconv() needs const.]) + dnl Also substitute ICONV_CONST in the gnulib generated . + m4_ifdef([gl_ICONV_H_DEFAULTS], + [AC_REQUIRE([gl_ICONV_H_DEFAULTS]) + if test -n "$am_cv_proto_iconv_arg1"; then + ICONV_CONST="const" + fi + ]) +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/intlmacosx.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/intlmacosx.m4 new file mode 100644 index 000000000..ebd9937c1 --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/intlmacosx.m4 @@ -0,0 +1,65 @@ +# intlmacosx.m4 serial 8 (gettext-0.20.2) +dnl Copyright (C) 2004-2014, 2016, 2019-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can be used in projects which are not available under +dnl the GNU General Public License or the GNU Lesser General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Lesser General Public License, and the rest of the GNU +dnl gettext package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Checks for special options needed on Mac OS X. +dnl Defines INTL_MACOSX_LIBS. +AC_DEFUN([gt_INTL_MACOSX], +[ + dnl Check for API introduced in Mac OS X 10.4. + AC_CACHE_CHECK([for CFPreferencesCopyAppValue], + [gt_cv_func_CFPreferencesCopyAppValue], + [gt_save_LIBS="$LIBS" + LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[#include ]], + [[CFPreferencesCopyAppValue(NULL, NULL)]])], + [gt_cv_func_CFPreferencesCopyAppValue=yes], + [gt_cv_func_CFPreferencesCopyAppValue=no]) + LIBS="$gt_save_LIBS"]) + if test $gt_cv_func_CFPreferencesCopyAppValue = yes; then + AC_DEFINE([HAVE_CFPREFERENCESCOPYAPPVALUE], [1], + [Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in the CoreFoundation framework.]) + fi + dnl Don't check for the API introduced in Mac OS X 10.5, CFLocaleCopyCurrent, + dnl because in macOS 10.13.4 it has the following behaviour: + dnl When two or more languages are specified in the + dnl "System Preferences > Language & Region > Preferred Languages" panel, + dnl it returns en_CC where CC is the territory (even when English is not among + dnl the preferred languages!). What we want instead is what + dnl CFLocaleCopyCurrent returned in earlier macOS releases and what + dnl CFPreferencesCopyAppValue still returns, namely ll_CC where ll is the + dnl first among the preferred languages and CC is the territory. + AC_CACHE_CHECK([for CFLocaleCopyPreferredLanguages], [gt_cv_func_CFLocaleCopyPreferredLanguages], + [gt_save_LIBS="$LIBS" + LIBS="$LIBS -Wl,-framework -Wl,CoreFoundation" + AC_LINK_IFELSE( + [AC_LANG_PROGRAM( + [[#include ]], + [[CFLocaleCopyPreferredLanguages();]])], + [gt_cv_func_CFLocaleCopyPreferredLanguages=yes], + [gt_cv_func_CFLocaleCopyPreferredLanguages=no]) + LIBS="$gt_save_LIBS"]) + if test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then + AC_DEFINE([HAVE_CFLOCALECOPYPREFERREDLANGUAGES], [1], + [Define to 1 if you have the Mac OS X function CFLocaleCopyPreferredLanguages in the CoreFoundation framework.]) + fi + INTL_MACOSX_LIBS= + if test $gt_cv_func_CFPreferencesCopyAppValue = yes \ + || test $gt_cv_func_CFLocaleCopyPreferredLanguages = yes; then + INTL_MACOSX_LIBS="-Wl,-framework -Wl,CoreFoundation" + fi + AC_SUBST([INTL_MACOSX_LIBS]) +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-ld.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-ld.m4 new file mode 100644 index 000000000..98c348faf --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-ld.m4 @@ -0,0 +1,168 @@ +# lib-ld.m4 serial 9 +dnl Copyright (C) 1996-2003, 2009-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl Subroutines of libtool.m4, +dnl with replacements s/_*LT_PATH/AC_LIB_PROG/ and s/lt_/acl_/ to avoid +dnl collision with libtool.m4. + +dnl From libtool-2.4. Sets the variable with_gnu_ld to yes or no. +AC_DEFUN([AC_LIB_PROG_LD_GNU], +[AC_CACHE_CHECK([if the linker ($LD) is GNU ld], [acl_cv_prog_gnu_ld], +[# I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 /dev/null 2>&1 \ + && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ + || PATH_SEPARATOR=';' + } +fi + +if test -n "$LD"; then + AC_MSG_CHECKING([for ld]) +elif test "$GCC" = yes; then + AC_MSG_CHECKING([for ld used by $CC]) +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +if test -n "$LD"; then + # Let the user override the test with a path. + : +else + AC_CACHE_VAL([acl_cv_path_LD], + [ + acl_cv_path_LD= # Final result of this test + ac_prog=ld # Program to search in $PATH + if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + acl_output=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + acl_output=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $acl_output in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + acl_output=`echo "$acl_output" | sed 's%\\\\%/%g'` + while echo "$acl_output" | grep "$re_direlt" > /dev/null 2>&1; do + acl_output=`echo $acl_output | sed "s%$re_direlt%/%"` + done + # Got the pathname. No search in PATH is needed. + acl_cv_path_LD="$acl_output" + ac_prog= + ;; + "") + # If it fails, then pretend we aren't using GCC. + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac + fi + if test -n "$ac_prog"; then + # Search for $ac_prog in $PATH. + acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$acl_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + acl_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$acl_cv_path_LD" -v 2>&1 conftest.sh + . ./conftest.sh + rm -f ./conftest.sh + acl_cv_rpath=done + ]) + wl="$acl_cv_wl" + acl_libext="$acl_cv_libext" + acl_shlibext="$acl_cv_shlibext" + acl_libname_spec="$acl_cv_libname_spec" + acl_library_names_spec="$acl_cv_library_names_spec" + acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" + acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" + acl_hardcode_direct="$acl_cv_hardcode_direct" + acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" + dnl Determine whether the user wants rpath handling at all. + AC_ARG_ENABLE([rpath], + [ --disable-rpath do not hardcode runtime library paths], + :, enable_rpath=yes) +]) + +dnl AC_LIB_FROMPACKAGE(name, package) +dnl declares that libname comes from the given package. The configure file +dnl will then not have a --with-libname-prefix option but a +dnl --with-package-prefix option. Several libraries can come from the same +dnl package. This declaration must occur before an AC_LIB_LINKFLAGS or similar +dnl macro call that searches for libname. +AC_DEFUN([AC_LIB_FROMPACKAGE], +[ + pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) + define([acl_frompackage_]NAME, [$2]) + popdef([NAME]) + pushdef([PACK],[$2]) + pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) + define([acl_libsinpackage_]PACKUP, + m4_ifdef([acl_libsinpackage_]PACKUP, [m4_defn([acl_libsinpackage_]PACKUP)[, ]],)[lib$1]) + popdef([PACKUP]) + popdef([PACK]) +]) + +dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and +dnl the libraries corresponding to explicit and implicit dependencies. +dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. +dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found +dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. +AC_DEFUN([AC_LIB_LINKFLAGS_BODY], +[ + AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) + pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) + pushdef([PACK],[m4_ifdef([acl_frompackage_]NAME, [acl_frompackage_]NAME, lib[$1])]) + pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], + [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) + pushdef([PACKLIBS],[m4_ifdef([acl_frompackage_]NAME, [acl_libsinpackage_]PACKUP, lib[$1])]) + dnl By default, look in $includedir and $libdir. + use_additional=yes + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" + eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" + ]) + AC_ARG_WITH(PACK[-prefix], +[[ --with-]]PACK[[-prefix[=DIR] search for ]PACKLIBS[ in DIR/include and DIR/lib + --without-]]PACK[[-prefix don't search for ]PACKLIBS[ in includedir and libdir]], +[ + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + eval additional_libdir2=\"$exec_prefix/$acl_libdirstem2\" + eval additional_libdir3=\"$exec_prefix/$acl_libdirstem3\" + ]) + else + additional_includedir="$withval/include" + additional_libdir="$withval/$acl_libdirstem" + additional_libdir2="$withval/$acl_libdirstem2" + additional_libdir3="$withval/$acl_libdirstem3" + fi + fi +]) + if test "X$additional_libdir2" = "X$additional_libdir"; then + additional_libdir2= + fi + if test "X$additional_libdir3" = "X$additional_libdir"; then + additional_libdir3= + fi + dnl Search the library and its dependencies in $additional_libdir and + dnl $LDFLAGS. Using breadth-first-seach. + LIB[]NAME= + LTLIB[]NAME= + INC[]NAME= + LIB[]NAME[]_PREFIX= + dnl HAVE_LIB${NAME} is an indicator that LIB${NAME}, LTLIB${NAME} have been + dnl computed. So it has to be reset here. + HAVE_LIB[]NAME= + rpathdirs= + ltrpathdirs= + names_already_handled= + names_next_round='$1 $2' + while test -n "$names_next_round"; do + names_this_round="$names_next_round" + names_next_round= + for name in $names_this_round; do + already_handled= + for n in $names_already_handled; do + if test "$n" = "$name"; then + already_handled=yes + break + fi + done + if test -z "$already_handled"; then + names_already_handled="$names_already_handled $name" + dnl See if it was already located by an earlier AC_LIB_LINKFLAGS + dnl or AC_LIB_HAVE_LINKFLAGS call. + uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` + eval value=\"\$HAVE_LIB$uppername\" + if test -n "$value"; then + if test "$value" = yes; then + eval value=\"\$LIB$uppername\" + test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" + eval value=\"\$LTLIB$uppername\" + test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" + else + dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined + dnl that this library doesn't exist. So just drop it. + : + fi + else + dnl Search the library lib$name in $additional_libdir and $LDFLAGS + dnl and the already constructed $LIBNAME/$LTLIBNAME. + found_dir= + found_la= + found_so= + found_a= + eval libname=\"$acl_libname_spec\" # typically: libname=lib$name + if test -n "$acl_shlibext"; then + shrext=".$acl_shlibext" # typically: shrext=.so + else + shrext= + fi + if test $use_additional = yes; then + for additional_libdir_variable in additional_libdir additional_libdir2 additional_libdir3; do + if test "X$found_dir" = "X"; then + eval dir=\$$additional_libdir_variable + if test -n "$dir"; then + dnl The same code as in the loop below: + dnl First look for a shared library. + if test -n "$acl_shlibext"; then + if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then + found_dir="$dir" + found_so="$dir/$libname$shrext" + else + if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then + ver=`(cd "$dir" && \ + for f in "$libname$shrext".*; do echo "$f"; done \ + | sed -e "s,^$libname$shrext\\\\.,," \ + | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ + | sed 1q ) 2>/dev/null` + if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then + found_dir="$dir" + found_so="$dir/$libname$shrext.$ver" + fi + else + eval library_names=\"$acl_library_names_spec\" + for f in $library_names; do + if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then + found_dir="$dir" + found_so="$dir/$f" + break + fi + done + fi + fi + fi + dnl Then look for a static library. + if test "X$found_dir" = "X"; then + if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then + found_dir="$dir" + found_a="$dir/$libname.$acl_libext" + fi + fi + if test "X$found_dir" != "X"; then + if test -f "$dir/$libname.la"; then + found_la="$dir/$libname.la" + fi + fi + fi + fi + done + fi + if test "X$found_dir" = "X"; then + for x in $LDFLAGS $LTLIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + case "$x" in + -L*) + dir=`echo "X$x" | sed -e 's/^X-L//'` + dnl First look for a shared library. + if test -n "$acl_shlibext"; then + if test -f "$dir/$libname$shrext" && acl_is_expected_elfclass < "$dir/$libname$shrext"; then + found_dir="$dir" + found_so="$dir/$libname$shrext" + else + if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then + ver=`(cd "$dir" && \ + for f in "$libname$shrext".*; do echo "$f"; done \ + | sed -e "s,^$libname$shrext\\\\.,," \ + | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ + | sed 1q ) 2>/dev/null` + if test -n "$ver" && test -f "$dir/$libname$shrext.$ver" && acl_is_expected_elfclass < "$dir/$libname$shrext.$ver"; then + found_dir="$dir" + found_so="$dir/$libname$shrext.$ver" + fi + else + eval library_names=\"$acl_library_names_spec\" + for f in $library_names; do + if test -f "$dir/$f" && acl_is_expected_elfclass < "$dir/$f"; then + found_dir="$dir" + found_so="$dir/$f" + break + fi + done + fi + fi + fi + dnl Then look for a static library. + if test "X$found_dir" = "X"; then + if test -f "$dir/$libname.$acl_libext" && ${AR-ar} -p "$dir/$libname.$acl_libext" | acl_is_expected_elfclass; then + found_dir="$dir" + found_a="$dir/$libname.$acl_libext" + fi + fi + if test "X$found_dir" != "X"; then + if test -f "$dir/$libname.la"; then + found_la="$dir/$libname.la" + fi + fi + ;; + esac + if test "X$found_dir" != "X"; then + break + fi + done + fi + if test "X$found_dir" != "X"; then + dnl Found the library. + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" + if test "X$found_so" != "X"; then + dnl Linking with a shared library. We attempt to hardcode its + dnl directory into the executable's runpath, unless it's the + dnl standard /usr/lib. + if test "$enable_rpath" = no \ + || test "X$found_dir" = "X/usr/$acl_libdirstem" \ + || test "X$found_dir" = "X/usr/$acl_libdirstem2" \ + || test "X$found_dir" = "X/usr/$acl_libdirstem3"; then + dnl No hardcoding is needed. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + dnl Use an explicit option to hardcode DIR into the resulting + dnl binary. + dnl Potentially add DIR to ltrpathdirs. + dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $found_dir" + fi + dnl The hardcoding into $LIBNAME is system dependent. + if test "$acl_hardcode_direct" = yes; then + dnl Using DIR/libNAME.so during linking hardcodes DIR into the + dnl resulting binary. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then + dnl Use an explicit option to hardcode DIR into the resulting + dnl binary. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + dnl Potentially add DIR to rpathdirs. + dnl The rpathdirs will be appended to $LIBNAME at the end. + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $found_dir" + fi + else + dnl Rely on "-L$found_dir". + dnl But don't add it if it's already contained in the LDFLAGS + dnl or the already constructed $LIBNAME + haveit= + for x in $LDFLAGS $LIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$found_dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" + fi + if test "$acl_hardcode_minus_L" != no; then + dnl FIXME: Not sure whether we should use + dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" + dnl here. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" + else + dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH + dnl here, because this doesn't fit in flags passed to the + dnl compiler. So give up. No hardcoding. This affects only + dnl very old systems. + dnl FIXME: Not sure whether we should use + dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" + dnl here. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" + fi + fi + fi + fi + else + if test "X$found_a" != "X"; then + dnl Linking with a static library. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" + else + dnl We shouldn't come here, but anyway it's good to have a + dnl fallback. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" + fi + fi + dnl Assume the include files are nearby. + additional_includedir= + case "$found_dir" in + */$acl_libdirstem | */$acl_libdirstem/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` + if test "$name" = '$1'; then + LIB[]NAME[]_PREFIX="$basedir" + fi + additional_includedir="$basedir/include" + ;; + */$acl_libdirstem2 | */$acl_libdirstem2/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` + if test "$name" = '$1'; then + LIB[]NAME[]_PREFIX="$basedir" + fi + additional_includedir="$basedir/include" + ;; + */$acl_libdirstem3 | */$acl_libdirstem3/) + basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem3/"'*$,,'` + if test "$name" = '$1'; then + LIB[]NAME[]_PREFIX="$basedir" + fi + additional_includedir="$basedir/include" + ;; + esac + if test "X$additional_includedir" != "X"; then + dnl Potentially add $additional_includedir to $INCNAME. + dnl But don't add it + dnl 1. if it's the standard /usr/include, + dnl 2. if it's /usr/local/include and we are using GCC on Linux, + dnl 3. if it's already present in $CPPFLAGS or the already + dnl constructed $INCNAME, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + for x in $CPPFLAGS $INC[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + dnl Really add $additional_includedir to $INCNAME. + INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" + fi + fi + fi + fi + fi + dnl Look for dependencies. + if test -n "$found_la"; then + dnl Read the .la file. It defines the variables + dnl dlname, library_names, old_library, dependency_libs, current, + dnl age, revision, installed, dlopen, dlpreopen, libdir. + save_libdir="$libdir" + case "$found_la" in + */* | *\\*) . "$found_la" ;; + *) . "./$found_la" ;; + esac + libdir="$save_libdir" + dnl We use only dependency_libs. + for dep in $dependency_libs; do + case "$dep" in + -L*) + dependency_libdir=`echo "X$dep" | sed -e 's/^X-L//'` + dnl Potentially add $dependency_libdir to $LIBNAME and $LTLIBNAME. + dnl But don't add it + dnl 1. if it's the standard /usr/lib, + dnl 2. if it's /usr/local/lib and we are using GCC on Linux, + dnl 3. if it's already present in $LDFLAGS or the already + dnl constructed $LIBNAME, + dnl 4. if it doesn't exist as a directory. + if test "X$dependency_libdir" != "X/usr/$acl_libdirstem" \ + && test "X$dependency_libdir" != "X/usr/$acl_libdirstem2" \ + && test "X$dependency_libdir" != "X/usr/$acl_libdirstem3"; then + haveit= + if test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem" \ + || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem2" \ + || test "X$dependency_libdir" = "X/usr/local/$acl_libdirstem3"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + haveit= + for x in $LDFLAGS $LIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$dependency_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$dependency_libdir"; then + dnl Really add $dependency_libdir to $LIBNAME. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$dependency_libdir" + fi + fi + haveit= + for x in $LDFLAGS $LTLIB[]NAME; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$dependency_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test -d "$dependency_libdir"; then + dnl Really add $dependency_libdir to $LTLIBNAME. + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$dependency_libdir" + fi + fi + fi + fi + ;; + -R*) + dir=`echo "X$dep" | sed -e 's/^X-R//'` + if test "$enable_rpath" != no; then + dnl Potentially add DIR to rpathdirs. + dnl The rpathdirs will be appended to $LIBNAME at the end. + haveit= + for x in $rpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + rpathdirs="$rpathdirs $dir" + fi + dnl Potentially add DIR to ltrpathdirs. + dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. + haveit= + for x in $ltrpathdirs; do + if test "X$x" = "X$dir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + ltrpathdirs="$ltrpathdirs $dir" + fi + fi + ;; + -l*) + dnl Handle this in the next round. + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` + ;; + *.la) + dnl Handle this in the next round. Throw away the .la's + dnl directory; it is already contained in a preceding -L + dnl option. + names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` + ;; + *) + dnl Most likely an immediate library name. + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" + ;; + esac + done + fi + else + dnl Didn't find the library; assume it is in the system directories + dnl known to the linker and runtime loader. (All the system + dnl directories known to the linker should also be known to the + dnl runtime loader, otherwise the system is severely misconfigured.) + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" + fi + fi + fi + done + done + if test "X$rpathdirs" != "X"; then + if test -n "$acl_hardcode_libdir_separator"; then + dnl Weird platform: only the last -rpath option counts, the user must + dnl pass all path elements in one option. We can arrange that for a + dnl single library, but not when more than one $LIBNAMEs are used. + alldirs= + for found_dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" + done + dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$acl_hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" + else + dnl The -rpath options are cumulative. + for found_dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$found_dir" + eval flag=\"$acl_hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" + done + fi + fi + if test "X$ltrpathdirs" != "X"; then + dnl When using libtool, the option that works for both libraries and + dnl executables is -R. The -R options are cumulative. + for found_dir in $ltrpathdirs; do + LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" + done + fi + popdef([PACKLIBS]) + popdef([PACKUP]) + popdef([PACK]) + popdef([NAME]) +]) + +dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, +dnl unless already present in VAR. +dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes +dnl contains two or three consecutive elements that belong together. +AC_DEFUN([AC_LIB_APPENDTOVAR], +[ + for element in [$2]; do + haveit= + for x in $[$1]; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X$element"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + [$1]="${[$1]}${[$1]:+ }$element" + fi + done +]) + +dnl For those cases where a variable contains several -L and -l options +dnl referring to unknown libraries and directories, this macro determines the +dnl necessary additional linker options for the runtime path. +dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) +dnl sets LDADDVAR to linker options needed together with LIBSVALUE. +dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, +dnl otherwise linking without libtool is assumed. +AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], +[ + AC_REQUIRE([AC_LIB_RPATH]) + AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) + $1= + if test "$enable_rpath" != no; then + if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then + dnl Use an explicit option to hardcode directories into the resulting + dnl binary. + rpathdirs= + next= + for opt in $2; do + if test -n "$next"; then + dir="$next" + dnl No need to hardcode the standard /usr/lib. + if test "X$dir" != "X/usr/$acl_libdirstem" \ + && test "X$dir" != "X/usr/$acl_libdirstem2" \ + && test "X$dir" != "X/usr/$acl_libdirstem3"; then + rpathdirs="$rpathdirs $dir" + fi + next= + else + case $opt in + -L) next=yes ;; + -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` + dnl No need to hardcode the standard /usr/lib. + if test "X$dir" != "X/usr/$acl_libdirstem" \ + && test "X$dir" != "X/usr/$acl_libdirstem2" \ + && test "X$dir" != "X/usr/$acl_libdirstem3"; then + rpathdirs="$rpathdirs $dir" + fi + next= ;; + *) next= ;; + esac + fi + done + if test "X$rpathdirs" != "X"; then + if test -n ""$3""; then + dnl libtool is used for linking. Use -R options. + for dir in $rpathdirs; do + $1="${$1}${$1:+ }-R$dir" + done + else + dnl The linker is used for linking directly. + if test -n "$acl_hardcode_libdir_separator"; then + dnl Weird platform: only the last -rpath option counts, the user + dnl must pass all path elements in one option. + alldirs= + for dir in $rpathdirs; do + alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" + done + acl_save_libdir="$libdir" + libdir="$alldirs" + eval flag=\"$acl_hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + $1="$flag" + else + dnl The -rpath options are cumulative. + for dir in $rpathdirs; do + acl_save_libdir="$libdir" + libdir="$dir" + eval flag=\"$acl_hardcode_libdir_flag_spec\" + libdir="$acl_save_libdir" + $1="${$1}${$1:+ }$flag" + done + fi + fi + fi + fi + fi + AC_SUBST([$1]) +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-prefix.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-prefix.m4 new file mode 100644 index 000000000..c8a0b464c --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/lib-prefix.m4 @@ -0,0 +1,320 @@ +# lib-prefix.m4 serial 17 +dnl Copyright (C) 2001-2005, 2008-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. + +dnl From Bruno Haible. + +dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed +dnl to access previously installed libraries. The basic assumption is that +dnl a user will want packages to use other packages he previously installed +dnl with the same --prefix option. +dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate +dnl libraries, but is otherwise very convenient. +AC_DEFUN([AC_LIB_PREFIX], +[ + AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) + AC_REQUIRE([AC_PROG_CC]) + AC_REQUIRE([AC_CANONICAL_HOST]) + AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) + AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) + dnl By default, look in $includedir and $libdir. + use_additional=yes + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + AC_ARG_WITH([lib-prefix], +[[ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib + --without-lib-prefix don't search for libraries in includedir and libdir]], +[ + if test "X$withval" = "Xno"; then + use_additional=no + else + if test "X$withval" = "X"; then + AC_LIB_WITH_FINAL_PREFIX([ + eval additional_includedir=\"$includedir\" + eval additional_libdir=\"$libdir\" + ]) + else + additional_includedir="$withval/include" + additional_libdir="$withval/$acl_libdirstem" + fi + fi +]) + if test $use_additional = yes; then + dnl Potentially add $additional_includedir to $CPPFLAGS. + dnl But don't add it + dnl 1. if it's the standard /usr/include, + dnl 2. if it's already present in $CPPFLAGS, + dnl 3. if it's /usr/local/include and we are using GCC on Linux, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_includedir" != "X/usr/include"; then + haveit= + for x in $CPPFLAGS; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-I$additional_includedir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test "X$additional_includedir" = "X/usr/local/include"; then + if test -n "$GCC"; then + case $host_os in + linux* | gnu* | k*bsd*-gnu) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + if test -d "$additional_includedir"; then + dnl Really add $additional_includedir to $CPPFLAGS. + CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" + fi + fi + fi + fi + dnl Potentially add $additional_libdir to $LDFLAGS. + dnl But don't add it + dnl 1. if it's the standard /usr/lib, + dnl 2. if it's already present in $LDFLAGS, + dnl 3. if it's /usr/local/lib and we are using GCC on Linux, + dnl 4. if it doesn't exist as a directory. + if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then + haveit= + for x in $LDFLAGS; do + AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) + if test "X$x" = "X-L$additional_libdir"; then + haveit=yes + break + fi + done + if test -z "$haveit"; then + if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then + if test -n "$GCC"; then + case $host_os in + linux*) haveit=yes;; + esac + fi + fi + if test -z "$haveit"; then + if test -d "$additional_libdir"; then + dnl Really add $additional_libdir to $LDFLAGS. + LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" + fi + fi + fi + fi + fi +]) + +dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, +dnl acl_final_exec_prefix, containing the values to which $prefix and +dnl $exec_prefix will expand at the end of the configure script. +AC_DEFUN([AC_LIB_PREPARE_PREFIX], +[ + dnl Unfortunately, prefix and exec_prefix get only finally determined + dnl at the end of configure. + if test "X$prefix" = "XNONE"; then + acl_final_prefix="$ac_default_prefix" + else + acl_final_prefix="$prefix" + fi + if test "X$exec_prefix" = "XNONE"; then + acl_final_exec_prefix='${prefix}' + else + acl_final_exec_prefix="$exec_prefix" + fi + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" + prefix="$acl_save_prefix" +]) + +dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the +dnl variables prefix and exec_prefix bound to the values they will have +dnl at the end of the configure script. +AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], +[ + acl_save_prefix="$prefix" + prefix="$acl_final_prefix" + acl_save_exec_prefix="$exec_prefix" + exec_prefix="$acl_final_exec_prefix" + $1 + exec_prefix="$acl_save_exec_prefix" + prefix="$acl_save_prefix" +]) + +dnl AC_LIB_PREPARE_MULTILIB creates +dnl - a function acl_is_expected_elfclass, that tests whether standard input +dn; has a 32-bit or 64-bit ELF header, depending on the host CPU ABI, +dnl - 3 variables acl_libdirstem, acl_libdirstem2, acl_libdirstem3, containing +dnl the basename of the libdir to try in turn, either "lib" or "lib64" or +dnl "lib/64" or "lib32" or "lib/sparcv9" or "lib/amd64" or similar. +AC_DEFUN([AC_LIB_PREPARE_MULTILIB], +[ + dnl There is no formal standard regarding lib, lib32, and lib64. + dnl On most glibc systems, the current practice is that on a system supporting + dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under + dnl $prefix/lib64 and 32-bit libraries go under $prefix/lib. However, on + dnl Arch Linux based distributions, it's the opposite: 32-bit libraries go + dnl under $prefix/lib32 and 64-bit libraries go under $prefix/lib. + dnl We determine the compiler's default mode by looking at the compiler's + dnl library search path. If at least one of its elements ends in /lib64 or + dnl points to a directory whose absolute pathname ends in /lib64, we use that + dnl for 64-bit ABIs. Similarly for 32-bit ABIs. Otherwise we use the default, + dnl namely "lib". + dnl On Solaris systems, the current practice is that on a system supporting + dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under + dnl $prefix/lib/64 (which is a symlink to either $prefix/lib/sparcv9 or + dnl $prefix/lib/amd64) and 32-bit libraries go under $prefix/lib. + AC_REQUIRE([AC_CANONICAL_HOST]) + AC_REQUIRE([gl_HOST_CPU_C_ABI_32BIT]) + + AC_CACHE_CHECK([for ELF binary format], [gl_cv_elf], + [AC_EGREP_CPP([Extensible Linking Format], + [#ifdef __ELF__ + Extensible Linking Format + #endif + ], + [gl_cv_elf=yes], + [gl_cv_elf=no]) + ]) + if test $gl_cv_elf; then + # Extract the ELF class of a file (5th byte) in decimal. + # Cf. https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + if od -A x < /dev/null >/dev/null 2>/dev/null; then + # Use POSIX od. + func_elfclass () + { + od -A n -t d1 -j 4 -N 1 + } + else + # Use BSD hexdump. + func_elfclass () + { + dd bs=1 count=1 skip=4 2>/dev/null | hexdump -e '1/1 "%3d "' + echo + } + fi +changequote(,)dnl + case $HOST_CPU_C_ABI_32BIT in + yes) + # 32-bit ABI. + acl_is_expected_elfclass () + { + test "`func_elfclass | sed -e 's/[ ]//g'`" = 1 + } + ;; + no) + # 64-bit ABI. + acl_is_expected_elfclass () + { + test "`func_elfclass | sed -e 's/[ ]//g'`" = 2 + } + ;; + *) + # Unknown. + acl_is_expected_elfclass () + { + : + } + ;; + esac +changequote([,])dnl + else + acl_is_expected_elfclass () + { + : + } + fi + + dnl Allow the user to override the result by setting acl_cv_libdirstems. + AC_CACHE_CHECK([for the common suffixes of directories in the library search path], + [acl_cv_libdirstems], + [dnl Try 'lib' first, because that's the default for libdir in GNU, see + dnl . + acl_libdirstem=lib + acl_libdirstem2= + acl_libdirstem3= + case "$host_os" in + solaris*) + dnl See Solaris 10 Software Developer Collection > Solaris 64-bit Developer's Guide > The Development Environment + dnl . + dnl "Portable Makefiles should refer to any library directories using the 64 symbolic link." + dnl But we want to recognize the sparcv9 or amd64 subdirectory also if the + dnl symlink is missing, so we set acl_libdirstem2 too. + if test $HOST_CPU_C_ABI_32BIT = no; then + acl_libdirstem2=lib/64 + case "$host_cpu" in + sparc*) acl_libdirstem3=lib/sparcv9 ;; + i*86 | x86_64) acl_libdirstem3=lib/amd64 ;; + esac + fi + ;; + *) + dnl If $CC generates code for a 32-bit ABI, the libraries are + dnl surely under $prefix/lib or $prefix/lib32, not $prefix/lib64. + dnl Similarly, if $CC generates code for a 64-bit ABI, the libraries + dnl are surely under $prefix/lib or $prefix/lib64, not $prefix/lib32. + dnl Find the compiler's search path. However, non-system compilers + dnl sometimes have odd library search paths. But we can't simply invoke + dnl '/usr/bin/gcc -print-search-dirs' because that would not take into + dnl account the -m32/-m31 or -m64 options from the $CC or $CFLAGS. + searchpath=`(LC_ALL=C $CC $CPPFLAGS $CFLAGS -print-search-dirs) 2>/dev/null \ + | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` + if test $HOST_CPU_C_ABI_32BIT != no; then + # 32-bit or unknown ABI. + if test -d /usr/lib32; then + acl_libdirstem2=lib32 + fi + fi + if test $HOST_CPU_C_ABI_32BIT != yes; then + # 64-bit or unknown ABI. + if test -d /usr/lib64; then + acl_libdirstem3=lib64 + fi + fi + if test -n "$searchpath"; then + acl_save_IFS="${IFS= }"; IFS=":" + for searchdir in $searchpath; do + if test -d "$searchdir"; then + case "$searchdir" in + */lib32/ | */lib32 ) acl_libdirstem2=lib32 ;; + */lib64/ | */lib64 ) acl_libdirstem3=lib64 ;; + */../ | */.. ) + # Better ignore directories of this form. They are misleading. + ;; + *) searchdir=`cd "$searchdir" && pwd` + case "$searchdir" in + */lib32 ) acl_libdirstem2=lib32 ;; + */lib64 ) acl_libdirstem3=lib64 ;; + esac ;; + esac + fi + done + IFS="$acl_save_IFS" + if test $HOST_CPU_C_ABI_32BIT = yes; then + # 32-bit ABI. + acl_libdirstem3= + fi + if test $HOST_CPU_C_ABI_32BIT = no; then + # 64-bit ABI. + acl_libdirstem2= + fi + fi + ;; + esac + test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" + test -n "$acl_libdirstem3" || acl_libdirstem3="$acl_libdirstem" + acl_cv_libdirstems="$acl_libdirstem,$acl_libdirstem2,$acl_libdirstem3" + ]) + dnl Decompose acl_cv_libdirstems into acl_libdirstem, acl_libdirstem2, and + dnl acl_libdirstem3. +changequote(,)dnl + acl_libdirstem=`echo "$acl_cv_libdirstems" | sed -e 's/,.*//'` + acl_libdirstem2=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,//' -e 's/,.*//'` + acl_libdirstem3=`echo "$acl_cv_libdirstems" | sed -e 's/^[^,]*,[^,]*,//' -e 's/,.*//'` +changequote([,])dnl +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/nls.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/nls.m4 new file mode 100644 index 000000000..5a506fc4b --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/nls.m4 @@ -0,0 +1,32 @@ +# nls.m4 serial 6 (gettext-0.20.2) +dnl Copyright (C) 1995-2003, 2005-2006, 2008-2014, 2016, 2019-2020 Free +dnl Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can be used in projects which are not available under +dnl the GNU General Public License or the GNU Lesser General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Lesser General Public License, and the rest of the GNU +dnl gettext package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2003. + +AC_PREREQ([2.50]) + +AC_DEFUN([AM_NLS], +[ + AC_MSG_CHECKING([whether NLS is requested]) + dnl Default is enabled NLS + AC_ARG_ENABLE([nls], + [ --disable-nls do not use Native Language Support], + USE_NLS=$enableval, USE_NLS=yes) + AC_MSG_RESULT([$USE_NLS]) + AC_SUBST([USE_NLS]) +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/po.m4 b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/po.m4 new file mode 100644 index 000000000..3778fd7aa --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/aclocal/po.m4 @@ -0,0 +1,450 @@ +# po.m4 serial 31 (gettext-0.20.2) +dnl Copyright (C) 1995-2014, 2016, 2018-2020 Free Software Foundation, Inc. +dnl This file is free software; the Free Software Foundation +dnl gives unlimited permission to copy and/or distribute it, +dnl with or without modifications, as long as this notice is preserved. +dnl +dnl This file can be used in projects which are not available under +dnl the GNU General Public License or the GNU Lesser General Public +dnl License but which still want to provide support for the GNU gettext +dnl functionality. +dnl Please note that the actual code of the GNU gettext library is covered +dnl by the GNU Lesser General Public License, and the rest of the GNU +dnl gettext package is covered by the GNU General Public License. +dnl They are *not* in the public domain. + +dnl Authors: +dnl Ulrich Drepper , 1995-2000. +dnl Bruno Haible , 2000-2003. + +AC_PREREQ([2.60]) + +dnl Checks for all prerequisites of the po subdirectory. +AC_DEFUN([AM_PO_SUBDIRS], +[ + AC_REQUIRE([AC_PROG_MAKE_SET])dnl + AC_REQUIRE([AC_PROG_INSTALL])dnl + AC_REQUIRE([AC_PROG_MKDIR_P])dnl + AC_REQUIRE([AC_PROG_SED])dnl + AC_REQUIRE([AM_NLS])dnl + + dnl Release version of the gettext macros. This is used to ensure that + dnl the gettext macros and po/Makefile.in.in are in sync. + AC_SUBST([GETTEXT_MACRO_VERSION], [0.20]) + + dnl Perform the following tests also if --disable-nls has been given, + dnl because they are needed for "make dist" to work. + + dnl Search for GNU msgfmt in the PATH. + dnl The first test excludes Solaris msgfmt and early GNU msgfmt versions. + dnl The second test excludes FreeBSD msgfmt. + AM_PATH_PROG_WITH_TEST(MSGFMT, msgfmt, + [$ac_dir/$ac_word --statistics /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && + (if $ac_dir/$ac_word --statistics /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], + :) + AC_PATH_PROG([GMSGFMT], [gmsgfmt], [$MSGFMT]) + + dnl Test whether it is GNU msgfmt >= 0.15. +changequote(,)dnl + case `$GMSGFMT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in + '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) GMSGFMT_015=: ;; + *) GMSGFMT_015=$GMSGFMT ;; + esac +changequote([,])dnl + AC_SUBST([GMSGFMT_015]) + + dnl Search for GNU xgettext 0.12 or newer in the PATH. + dnl The first test excludes Solaris xgettext and early GNU xgettext versions. + dnl The second test excludes FreeBSD xgettext. + AM_PATH_PROG_WITH_TEST(XGETTEXT, xgettext, + [$ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1 && + (if $ac_dir/$ac_word --omit-header --copyright-holder= --msgid-bugs-address= /dev/null 2>&1 >/dev/null | grep usage >/dev/null; then exit 1; else exit 0; fi)], + :) + dnl Remove leftover from FreeBSD xgettext call. + rm -f messages.po + + dnl Test whether it is GNU xgettext >= 0.15. +changequote(,)dnl + case `$XGETTEXT --version | sed 1q | sed -e 's,^[^0-9]*,,'` in + '' | 0.[0-9] | 0.[0-9].* | 0.1[0-4] | 0.1[0-4].*) XGETTEXT_015=: ;; + *) XGETTEXT_015=$XGETTEXT ;; + esac +changequote([,])dnl + AC_SUBST([XGETTEXT_015]) + + dnl Search for GNU msgmerge 0.11 or newer in the PATH. + AM_PATH_PROG_WITH_TEST(MSGMERGE, msgmerge, + [$ac_dir/$ac_word --update -q /dev/null /dev/null >&]AS_MESSAGE_LOG_FD[ 2>&1], :) + + dnl Test whether it is GNU msgmerge >= 0.20. + if LC_ALL=C $MSGMERGE --help | grep ' --for-msgfmt ' >/dev/null; then + MSGMERGE_FOR_MSGFMT_OPTION='--for-msgfmt' + else + dnl Test whether it is GNU msgmerge >= 0.12. + if LC_ALL=C $MSGMERGE --help | grep ' --no-fuzzy-matching ' >/dev/null; then + MSGMERGE_FOR_MSGFMT_OPTION='--no-fuzzy-matching --no-location --quiet' + else + dnl With these old versions, $(MSGMERGE) $(MSGMERGE_FOR_MSGFMT_OPTION) is + dnl slow. But this is not a big problem, as such old gettext versions are + dnl hardly in use any more. + MSGMERGE_FOR_MSGFMT_OPTION='--no-location --quiet' + fi + fi + AC_SUBST([MSGMERGE_FOR_MSGFMT_OPTION]) + + dnl Support for AM_XGETTEXT_OPTION. + test -n "${XGETTEXT_EXTRA_OPTIONS+set}" || XGETTEXT_EXTRA_OPTIONS= + AC_SUBST([XGETTEXT_EXTRA_OPTIONS]) + + AC_CONFIG_COMMANDS([po-directories], [[ + for ac_file in $CONFIG_FILES; do + # Support "outfile[:infile[:infile...]]" + case "$ac_file" in + *:*) ac_file=`echo "$ac_file"|sed 's%:.*%%'` ;; + esac + # PO directories have a Makefile.in generated from Makefile.in.in. + case "$ac_file" in */Makefile.in) + # Adjust a relative srcdir. + ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` + ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` + ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` + # In autoconf-2.13 it is called $ac_given_srcdir. + # In autoconf-2.50 it is called $srcdir. + test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" + case "$ac_given_srcdir" in + .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; + /*) top_srcdir="$ac_given_srcdir" ;; + *) top_srcdir="$ac_dots$ac_given_srcdir" ;; + esac + # Treat a directory as a PO directory if and only if it has a + # POTFILES.in file. This allows packages to have multiple PO + # directories under different names or in different locations. + if test -f "$ac_given_srcdir/$ac_dir/POTFILES.in"; then + rm -f "$ac_dir/POTFILES" + test -n "$as_me" && echo "$as_me: creating $ac_dir/POTFILES" || echo "creating $ac_dir/POTFILES" + gt_tab=`printf '\t'` + cat "$ac_given_srcdir/$ac_dir/POTFILES.in" | sed -e "/^#/d" -e "/^[ ${gt_tab}]*\$/d" -e "s,.*, $top_srcdir/& \\\\," | sed -e "\$s/\(.*\) \\\\/\1/" > "$ac_dir/POTFILES" + POMAKEFILEDEPS="POTFILES.in" + # ALL_LINGUAS, POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES depend + # on $ac_dir but don't depend on user-specified configuration + # parameters. + if test -f "$ac_given_srcdir/$ac_dir/LINGUAS"; then + # The LINGUAS file contains the set of available languages. + if test -n "$OBSOLETE_ALL_LINGUAS"; then + test -n "$as_me" && echo "$as_me: setting ALL_LINGUAS in configure.in is obsolete" || echo "setting ALL_LINGUAS in configure.in is obsolete" + fi + ALL_LINGUAS=`sed -e "/^#/d" -e "s/#.*//" "$ac_given_srcdir/$ac_dir/LINGUAS"` + POMAKEFILEDEPS="$POMAKEFILEDEPS LINGUAS" + else + # The set of available languages was given in configure.in. + ALL_LINGUAS=$OBSOLETE_ALL_LINGUAS + fi + # Compute POFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).po) + # Compute UPDATEPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).po-update) + # Compute DUMMYPOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(lang).nop) + # Compute GMOFILES + # as $(foreach lang, $(ALL_LINGUAS), $(srcdir)/$(lang).gmo) + case "$ac_given_srcdir" in + .) srcdirpre= ;; + *) srcdirpre='$(srcdir)/' ;; + esac + POFILES= + UPDATEPOFILES= + DUMMYPOFILES= + GMOFILES= + for lang in $ALL_LINGUAS; do + POFILES="$POFILES $srcdirpre$lang.po" + UPDATEPOFILES="$UPDATEPOFILES $lang.po-update" + DUMMYPOFILES="$DUMMYPOFILES $lang.nop" + GMOFILES="$GMOFILES $srcdirpre$lang.gmo" + done + # CATALOGS depends on both $ac_dir and the user's LINGUAS + # environment variable. + INST_LINGUAS= + if test -n "$ALL_LINGUAS"; then + for presentlang in $ALL_LINGUAS; do + useit=no + if test "%UNSET%" != "$LINGUAS"; then + desiredlanguages="$LINGUAS" + else + desiredlanguages="$ALL_LINGUAS" + fi + for desiredlang in $desiredlanguages; do + # Use the presentlang catalog if desiredlang is + # a. equal to presentlang, or + # b. a variant of presentlang (because in this case, + # presentlang can be used as a fallback for messages + # which are not translated in the desiredlang catalog). + case "$desiredlang" in + "$presentlang"*) useit=yes;; + esac + done + if test $useit = yes; then + INST_LINGUAS="$INST_LINGUAS $presentlang" + fi + done + fi + CATALOGS= + if test -n "$INST_LINGUAS"; then + for lang in $INST_LINGUAS; do + CATALOGS="$CATALOGS $lang.gmo" + done + fi + test -n "$as_me" && echo "$as_me: creating $ac_dir/Makefile" || echo "creating $ac_dir/Makefile" + sed -e "/^POTFILES =/r $ac_dir/POTFILES" -e "/^# Makevars/r $ac_given_srcdir/$ac_dir/Makevars" -e "s|@POFILES@|$POFILES|g" -e "s|@UPDATEPOFILES@|$UPDATEPOFILES|g" -e "s|@DUMMYPOFILES@|$DUMMYPOFILES|g" -e "s|@GMOFILES@|$GMOFILES|g" -e "s|@CATALOGS@|$CATALOGS|g" -e "s|@POMAKEFILEDEPS@|$POMAKEFILEDEPS|g" "$ac_dir/Makefile.in" > "$ac_dir/Makefile" + for f in "$ac_given_srcdir/$ac_dir"/Rules-*; do + if test -f "$f"; then + case "$f" in + *.orig | *.bak | *~) ;; + *) cat "$f" >> "$ac_dir/Makefile" ;; + esac + fi + done + fi + ;; + esac + done]], + [# Capture the value of obsolete ALL_LINGUAS because we need it to compute + # POFILES, UPDATEPOFILES, DUMMYPOFILES, GMOFILES, CATALOGS. + OBSOLETE_ALL_LINGUAS="$ALL_LINGUAS" + # Capture the value of LINGUAS because we need it to compute CATALOGS. + LINGUAS="${LINGUAS-%UNSET%}" + ]) +]) + +dnl Postprocesses a Makefile in a directory containing PO files. +AC_DEFUN([AM_POSTPROCESS_PO_MAKEFILE], +[ + # When this code is run, in config.status, two variables have already been + # set: + # - OBSOLETE_ALL_LINGUAS is the value of LINGUAS set in configure.in, + # - LINGUAS is the value of the environment variable LINGUAS at configure + # time. + +changequote(,)dnl + # Adjust a relative srcdir. + ac_dir=`echo "$ac_file"|sed 's%/[^/][^/]*$%%'` + ac_dir_suffix=/`echo "$ac_dir"|sed 's%^\./%%'` + ac_dots=`echo "$ac_dir_suffix"|sed 's%/[^/]*%../%g'` + # In autoconf-2.13 it is called $ac_given_srcdir. + # In autoconf-2.50 it is called $srcdir. + test -n "$ac_given_srcdir" || ac_given_srcdir="$srcdir" + case "$ac_given_srcdir" in + .) top_srcdir=`echo $ac_dots|sed 's%/$%%'` ;; + /*) top_srcdir="$ac_given_srcdir" ;; + *) top_srcdir="$ac_dots$ac_given_srcdir" ;; + esac + + # Find a way to echo strings without interpreting backslash. + if test "X`(echo '\t') 2>/dev/null`" = 'X\t'; then + gt_echo='echo' + else + if test "X`(printf '%s\n' '\t') 2>/dev/null`" = 'X\t'; then + gt_echo='printf %s\n' + else + echo_func () { + cat < "$ac_file.tmp" + tab=`printf '\t'` + if grep -l '@TCLCATALOGS@' "$ac_file" > /dev/null; then + # Add dependencies that cannot be formulated as a simple suffix rule. + for lang in $ALL_LINGUAS; do + frobbedlang=`echo $lang | sed -e 's/\..*$//' -e 'y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/'` + cat >> "$ac_file.tmp" < /dev/null; then + # Add dependencies that cannot be formulated as a simple suffix rule. + for lang in $ALL_LINGUAS; do + frobbedlang=`echo $lang | sed -e 's/_/-/g' -e 's/^sr-CS/sr-SP/' -e 's/@latin$/-Latn/' -e 's/@cyrillic$/-Cyrl/' -e 's/^sr-SP$/sr-SP-Latn/' -e 's/^uz-UZ$/uz-UZ-Latn/'` + cat >> "$ac_file.tmp" <> "$ac_file.tmp" <, 1996. + +AC_PREREQ([2.50]) + +# Search path for a program which passes the given test. + +dnl AM_PATH_PROG_WITH_TEST(VARIABLE, PROG-TO-CHECK-FOR, +dnl TEST-PERFORMED-ON-FOUND_PROGRAM [, VALUE-IF-NOT-FOUND [, PATH]]) +AC_DEFUN([AM_PATH_PROG_WITH_TEST], +[ +# Prepare PATH_SEPARATOR. +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which + # contains only /bin. Note that ksh looks also at the FPATH variable, + # so we have to set that as well for the test. + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ + && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ + || PATH_SEPARATOR=';' + } +fi + +# Find out how to test for executable files. Don't use a zero-byte file, +# as systems may use methods other than mode bits to determine executability. +cat >conf$$.file <<_ASEOF +#! /bin/sh +exit 0 +_ASEOF +chmod +x conf$$.file +if test -x conf$$.file >/dev/null 2>&1; then + ac_executable_p="test -x" +else + ac_executable_p="test -f" +fi +rm -f conf$$.file + +# Extract the first word of "$2", so it can be a program name with args. +set dummy $2; ac_word=[$]2 +AC_MSG_CHECKING([for $ac_word]) +AC_CACHE_VAL([ac_cv_path_$1], +[case "[$]$1" in + [[\\/]]* | ?:[[\\/]]*) + ac_cv_path_$1="[$]$1" # Let the user override the test with a path. + ;; + *) + ac_save_IFS="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in ifelse([$5], , $PATH, [$5]); do + IFS="$ac_save_IFS" + test -z "$ac_dir" && ac_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if $ac_executable_p "$ac_dir/$ac_word$ac_exec_ext"; then + echo "$as_me: trying $ac_dir/$ac_word..." >&AS_MESSAGE_LOG_FD + if [$3]; then + ac_cv_path_$1="$ac_dir/$ac_word$ac_exec_ext" + break 2 + fi + fi + done + done + IFS="$ac_save_IFS" +dnl If no 4th arg is given, leave the cache variable unset, +dnl so AC_PATH_PROGS will keep looking. +ifelse([$4], , , [ test -z "[$]ac_cv_path_$1" && ac_cv_path_$1="$4" +])dnl + ;; +esac])dnl +$1="$ac_cv_path_$1" +if test ifelse([$4], , [-n "[$]$1"], ["[$]$1" != "$4"]); then + AC_MSG_RESULT([$][$1]) +else + AC_MSG_RESULT([no]) +fi +AC_SUBST([$1])dnl +]) diff --git a/poky/meta/recipes-core/gettext/gettext-minimal-0.21/config.rpath b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/config.rpath new file mode 100755 index 000000000..24be79cfb --- /dev/null +++ b/poky/meta/recipes-core/gettext/gettext-minimal-0.21/config.rpath @@ -0,0 +1,684 @@ +#! /bin/sh +# Output a system dependent set of variables, describing how to set the +# run time search path of shared libraries in an executable. +# +# Copyright 1996-2020 Free Software Foundation, Inc. +# Taken from GNU libtool, 2001 +# Originally by Gordon Matzigkeit , 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. +# +# The first argument passed to this file is the canonical host specification, +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# The environment variables CC, GCC, LDFLAGS, LD, with_gnu_ld +# should be set by the caller. +# +# The set of defined variables is at the end of this script. + +# Known limitations: +# - On IRIX 6.5 with CC="cc", the run time search patch must not be longer +# than 256 bytes, otherwise the compiler driver will dump core. The only +# known workaround is to choose shorter directory names for the build +# directory and/or the installation directory. + +# All known linkers require a '.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a +shrext=.so + +host="$1" +host_cpu=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\1/'` +host_vendor=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\2/'` +host_os=`echo "$host" | sed 's/^\([^-]*\)-\([^-]*\)-\(.*\)$/\3/'` + +# Code taken from libtool.m4's _LT_CC_BASENAME. + +for cc_temp in $CC""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`echo "$cc_temp" | sed -e 's%^.*/%%'` + +# Code taken from libtool.m4's _LT_COMPILER_PIC. + +wl= +if test "$GCC" = yes; then + wl='-Wl,' +else + case "$host_os" in + aix*) + wl='-Wl,' + ;; + mingw* | cygwin* | pw32* | os2* | cegcc*) + ;; + hpux9* | hpux10* | hpux11*) + wl='-Wl,' + ;; + irix5* | irix6* | nonstopux*) + wl='-Wl,' + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + ecc*) + wl='-Wl,' + ;; + icc* | ifort*) + wl='-Wl,' + ;; + lf95*) + wl='-Wl,' + ;; + nagfor*) + wl='-Wl,-Wl,,' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + wl='-Wl,' + ;; + ccc*) + wl='-Wl,' + ;; + xl* | bgxl* | bgf* | mpixl*) + wl='-Wl,' + ;; + como) + wl='-lopt=' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ F* | *Sun*Fortran*) + wl= + ;; + *Sun\ C*) + wl='-Wl,' + ;; + esac + ;; + esac + ;; + newsos6) + ;; + *nto* | *qnx*) + ;; + osf3* | osf4* | osf5*) + wl='-Wl,' + ;; + rdos*) + ;; + solaris*) + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + wl='-Qoption ld ' + ;; + *) + wl='-Wl,' + ;; + esac + ;; + sunos4*) + wl='-Qoption ld ' + ;; + sysv4 | sysv4.2uw2* | sysv4.3*) + wl='-Wl,' + ;; + sysv4*MP*) + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + wl='-Wl,' + ;; + unicos*) + wl='-Wl,' + ;; + uts4*) + ;; + esac +fi + +# Code taken from libtool.m4's _LT_LINKER_SHLIBS. + +hardcode_libdir_flag_spec= +hardcode_libdir_separator= +hardcode_direct=no +hardcode_minus_L=no + +case "$host_os" in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; +esac + +ld_shlibs=yes +if test "$with_gnu_ld" = yes; then + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + # Unlike libtool, we use -rpath here, not --rpath, since the documented + # option of GNU ld is called -rpath, not --rpath. + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + case "$host_os" in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + fi + ;; + amigaos*) + case "$host_cpu" in + powerpc) + ;; + m68k) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + beos*) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + cygwin* | mingw* | pw32* | cegcc*) + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + if $LD --help 2>&1 | grep 'auto-import' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + haiku*) + ;; + interix[3-9]*) + hardcode_direct=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + netbsd*) + ;; + solaris*) + if $LD -v 2>&1 | grep 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + elif $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-rpath,$libdir`' + else + ld_shlibs=no + fi + ;; + esac + ;; + sunos4*) + hardcode_direct=yes + ;; + *) + if $LD --help 2>&1 | grep ': supported targets:.* elf' > /dev/null; then + : + else + ld_shlibs=no + fi + ;; + esac + if test "$ld_shlibs" = no; then + hardcode_libdir_flag_spec= + fi +else + case "$host_os" in + aix3*) + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + else + aix_use_runtimelinking=no + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + fi + hardcode_direct=yes + hardcode_libdir_separator=':' + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && \ + strings "$collect2name" | grep resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + fi + # Begin _LT_AC_SYS_LIBPATH_AIX. + echo 'int main () { return 0; }' > conftest.c + ${CC} ${LDFLAGS} conftest.c -o conftest + aix_libpath=`dump -H conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` + if test -z "$aix_libpath"; then + aix_libpath=`dump -HX64 conftest 2>/dev/null | sed -n -e '/Import File Strings/,/^$/ { /^0/ { s/^0 *\(.*\)$/\1/; p; } +}'` + fi + if test -z "$aix_libpath"; then + aix_libpath="/usr/lib:/lib" + fi + rm -f conftest.c conftest + # End _LT_AC_SYS_LIBPATH_AIX. + if test "$aix_use_runtimelinking" = yes; then + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + else + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + fi + fi + ;; + amigaos*) + case "$host_cpu" in + powerpc) + ;; + m68k) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + bsdi[45]*) + ;; + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec=' ' + libext=lib + ;; + darwin* | rhapsody*) + hardcode_direct=no + if { case $cc_basename in ifort*) true;; *) test "$GCC" = yes;; esac; }; then + : + else + ld_shlibs=no + fi + ;; + dgux*) + hardcode_libdir_flag_spec='-L$libdir' + ;; + freebsd2.[01]*) + hardcode_direct=yes + hardcode_minus_L=yes + ;; + freebsd* | dragonfly*) + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + ;; + hpux9*) + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + hpux10*) + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + hpux11*) + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + ;; + *) + hardcode_direct=yes + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + irix5* | irix6* | nonstopux*) + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + netbsd*) + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + ;; + newsos6) + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + *nto* | *qnx*) + ;; + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + else + case "$host_os" in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + osf3*) + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + osf4* | osf5*) + if test "$GCC" = yes; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + # Both cc and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + hardcode_libdir_separator=: + ;; + solaris*) + hardcode_libdir_flag_spec='-R$libdir' + ;; + sunos4*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + ;; + sysv4) + case $host_vendor in + sni) + hardcode_direct=yes # is this really true??? + ;; + siemens) + hardcode_direct=no + ;; + motorola) + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + ;; + sysv4.3*) + ;; + sysv4*MP*) + if test -d /usr/nec; then + ld_shlibs=yes + fi + ;; + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + ;; + sysv5* | sco3.2v5* | sco5v6*) + hardcode_libdir_flag_spec='`test -z "$SCOABSPATH" && echo ${wl}-R,$libdir`' + hardcode_libdir_separator=':' + ;; + uts4*) + hardcode_libdir_flag_spec='-L$libdir' + ;; + *) + ld_shlibs=no + ;; + esac +fi + +# Check dynamic linker characteristics +# Code taken from libtool.m4's _LT_SYS_DYNAMIC_LINKER. +# Unlike libtool.m4, here we don't care about _all_ names of the library, but +# only about the one the linker finds when passed -lNAME. This is the last +# element of library_names_spec in libtool.m4, or possibly two of them if the +# linker has special search rules. +library_names_spec= # the last element of library_names_spec in libtool.m4 +libname_spec='lib$name' +case "$host_os" in + aix3*) + library_names_spec='$libname.a' + ;; + aix[4-9]*) + library_names_spec='$libname$shrext' + ;; + amigaos*) + case "$host_cpu" in + powerpc*) + library_names_spec='$libname$shrext' ;; + m68k) + library_names_spec='$libname.a' ;; + esac + ;; + beos*) + library_names_spec='$libname$shrext' + ;; + bsdi[45]*) + library_names_spec='$libname$shrext' + ;; + cygwin* | mingw* | pw32* | cegcc*) + shrext=.dll + library_names_spec='$libname.dll.a $libname.lib' + ;; + darwin* | rhapsody*) + shrext=.dylib + library_names_spec='$libname$shrext' + ;; + dgux*) + library_names_spec='$libname$shrext' + ;; + freebsd[23].*) + library_names_spec='$libname$shrext$versuffix' + ;; + freebsd* | dragonfly*) + library_names_spec='$libname$shrext' + ;; + gnu*) + library_names_spec='$libname$shrext' + ;; + haiku*) + library_names_spec='$libname$shrext' + ;; + hpux9* | hpux10* | hpux11*) + case $host_cpu in + ia64*) + shrext=.so + ;; + hppa*64*) + shrext=.sl + ;; + *) + shrext=.sl + ;; + esac + library_names_spec='$libname$shrext' + ;; + interix[3-9]*) + library_names_spec='$libname$shrext' + ;; + irix5* | irix6* | nonstopux*) + library_names_spec='$libname$shrext' + case "$host_os" in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= ;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 ;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 ;; + *) libsuff= shlibsuff= ;; + esac + ;; + esac + ;; + linux*oldld* | linux*aout* | linux*coff*) + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + library_names_spec='$libname$shrext' + ;; + knetbsd*-gnu) + library_names_spec='$libname$shrext' + ;; + netbsd*) + library_names_spec='$libname$shrext' + ;; + newsos6) + library_names_spec='$libname$shrext' + ;; + *nto* | *qnx*) + library_names_spec='$libname$shrext' + ;; + openbsd*) + library_names_spec='$libname$shrext$versuffix' + ;; + os2*) + libname_spec='$name' + shrext=.dll + library_names_spec='$libname.a' + ;; + osf3* | osf4* | osf5*) + library_names_spec='$libname$shrext' + ;; + rdos*) + ;; + solaris*) + library_names_spec='$libname$shrext' + ;; + sunos4*) + library_names_spec='$libname$shrext$versuffix' + ;; + sysv4 | sysv4.3*) + library_names_spec='$libname$shrext' + ;; + sysv4*MP*) + library_names_spec='$libname$shrext' + ;; + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + library_names_spec='$libname$shrext' + ;; + tpf*) + library_names_spec='$libname$shrext' + ;; + uts4*) + library_names_spec='$libname$shrext' + ;; +esac + +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' +escaped_wl=`echo "X$wl" | sed -e 's/^X//' -e "$sed_quote_subst"` +shlibext=`echo "$shrext" | sed -e 's,^\.,,'` +escaped_libname_spec=`echo "X$libname_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` +escaped_library_names_spec=`echo "X$library_names_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` +escaped_hardcode_libdir_flag_spec=`echo "X$hardcode_libdir_flag_spec" | sed -e 's/^X//' -e "$sed_quote_subst"` + +LC_ALL=C sed -e 's/^\([a-zA-Z0-9_]*\)=/acl_cv_\1=/' < found - files = collections.OrderedDict() - for path in d.getVar("FILESPATH").split(":"): - for element in sitedata: - filename = os.path.normpath(os.path.join(path, "meson.cross.d", element)) - files[filename.replace(corebase, "${COREBASE}")] = os.path.exists(filename) - - items = ["--cross-file=" + k for k,v in files.items() if v] - d.appendVar("EXTRA_OEMESON", " " + " ".join(items)) - items = ["%s:%s" % (k, "True" if v else "False") for k,v in files.items()] - d.appendVarFlag("do_configure", "file-checksums", " " + " ".join(items)) - -python () { - find_meson_cross_files(d) -} diff --git a/poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.5.bb b/poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.5.bb new file mode 100644 index 000000000..a1233e692 --- /dev/null +++ b/poky/meta/recipes-core/glib-2.0/glib-2.0_2.64.5.bb @@ -0,0 +1,49 @@ +require glib.inc + +PE = "1" + +SHRT_VER = "${@oe.utils.trim_version("${PV}", 2)}" + +SRC_URI = "${GNOME_MIRROR}/glib/${SHRT_VER}/glib-${PV}.tar.xz \ + file://run-ptest \ + file://0001-Fix-DATADIRNAME-on-uclibc-Linux.patch \ + file://Enable-more-tests-while-cross-compiling.patch \ + file://0001-Remove-the-warning-about-deprecated-paths-in-schemas.patch \ + file://0001-Install-gio-querymodules-as-libexec_PROGRAM.patch \ + file://0001-Do-not-ignore-return-value-of-write.patch \ + file://0010-Do-not-hardcode-python-path-into-various-tools.patch \ + file://0001-Set-host_machine-correctly-when-building-with-mingw3.patch \ + file://0001-Do-not-write-bindir-into-pkg-config-files.patch \ + file://0001-meson-Run-atomics-test-on-clang-as-well.patch \ + file://0001-gio-tests-resources.c-comment-out-a-build-host-only-.patch \ + " + +SRC_URI_append_class-native = " file://relocate-modules.patch" + +SRC_URI[sha256sum] = "9cbd5bd2715ead1c28d53c46f7b7b6ff6166f5887b772c1a9e3bf2910cfecc11" + +# Find any meson cross files in FILESPATH that are relevant for the current +# build (using siteinfo) and add them to EXTRA_OEMESON. +inherit siteinfo +def find_meson_cross_files(d): + if bb.data.inherits_class('native', d): + return "" + + corebase = d.getVar("COREBASE") + import collections + sitedata = siteinfo_data(d) + # filename -> found + files = collections.OrderedDict() + for path in d.getVar("FILESPATH").split(":"): + for element in sitedata: + filename = os.path.normpath(os.path.join(path, "meson.cross.d", element)) + files[filename.replace(corebase, "${COREBASE}")] = os.path.exists(filename) + + items = ["--cross-file=" + k for k,v in files.items() if v] + d.appendVar("EXTRA_OEMESON", " " + " ".join(items)) + items = ["%s:%s" % (k, "True" if v else "False") for k,v in files.items()] + d.appendVarFlag("do_configure", "file-checksums", " " + " ".join(items)) + +python () { + find_meson_cross_files(d) +} diff --git a/poky/meta/recipes-core/glib-networking/glib-networking/run-ptest b/poky/meta/recipes-core/glib-networking/glib-networking/run-ptest new file mode 100644 index 000000000..6d520f94f --- /dev/null +++ b/poky/meta/recipes-core/glib-networking/glib-networking/run-ptest @@ -0,0 +1,3 @@ +#! /bin/sh + +gnome-desktop-testing-runner glib-networking diff --git a/poky/meta/recipes-core/glib-networking/glib-networking_2.64.3.bb b/poky/meta/recipes-core/glib-networking/glib-networking_2.64.3.bb index c34ae508f..074389f55 100644 --- a/poky/meta/recipes-core/glib-networking/glib-networking_2.64.3.bb +++ b/poky/meta/recipes-core/glib-networking/glib-networking_2.64.3.bb @@ -12,16 +12,19 @@ DEPENDS = "glib-2.0" SRC_URI[archive.md5sum] = "eb382907ec941fe2fb1a9676b75acf7a" SRC_URI[archive.sha256sum] = "937a06b124052813bfc0b0b86bff42016ff01067582e1aca65bb6dbe0845a168" -PACKAGECONFIG ??= "gnutls" +PACKAGECONFIG ??= "gnutls ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}" PACKAGECONFIG[gnutls] = "-Dgnutls=enabled,-Dgnutls=disabled,gnutls" PACKAGECONFIG[openssl] = "-Dopenssl=enabled,-Dopenssl=disabled,openssl" PACKAGECONFIG[libproxy] = "-Dlibproxy=enabled,-Dlibproxy=disabled,libproxy" +PACKAGECONFIG[tests] = "-Dinstalled_tests=true,-Dinstalled_tests=false" EXTRA_OEMESON = "-Dgnome_proxy=disabled" GNOMEBASEBUILDCLASS = "meson" -inherit gnomebase gettext upstream-version-is-even gio-module-cache +inherit gnomebase gettext upstream-version-is-even gio-module-cache ptest-gnome + +SRC_URI += "file://run-ptest" FILES_${PN} += "\ ${libdir}/gio/modules/libgio*.so \ diff --git a/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb b/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb index c9367dd0c..134e1da99 100644 --- a/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb +++ b/poky/meta/recipes-core/images/build-appliance-image_15.0.0.bb @@ -6,7 +6,7 @@ LICENSE = "MIT" LIC_FILES_CHKSUM = "file://${COREBASE}/meta/COPYING.MIT;md5=3da9cfbcb788c80a0384361b4de20420" IMAGE_INSTALL = "packagegroup-core-boot packagegroup-core-ssh-openssh packagegroup-self-hosted \ - kernel-dev kernel-devsrc connman connman-plugin-ethernet dhcp-client \ + kernel-dev kernel-devsrc connman connman-plugin-ethernet dhcpcd \ tzdata python3-pip perl-misc" IMAGE_FEATURES += "x11-base package-management splash" @@ -24,7 +24,7 @@ IMAGE_FSTYPES = "wic.vmdk" inherit core-image module-base setuptools3 -SRCREV ?= "33903932ad87bfa8e8bf7baf2256781714993b79" +SRCREV ?= "e9f2c011573ec906756e5e2b767b36c4f9795623" SRC_URI = "git://git.yoctoproject.org/poky \ file://Yocto_Build_Appliance.vmx \ file://Yocto_Build_Appliance.vmxf \ diff --git a/poky/meta/recipes-core/images/core-image-minimal-initramfs.bb b/poky/meta/recipes-core/images/core-image-minimal-initramfs.bb index 83d0eaa8d..664fe7310 100644 --- a/poky/meta/recipes-core/images/core-image-minimal-initramfs.bb +++ b/poky/meta/recipes-core/images/core-image-minimal-initramfs.bb @@ -17,6 +17,7 @@ PACKAGE_INSTALL = "${INITRAMFS_SCRIPTS} ${VIRTUAL-RUNTIME_base-utils} udev base- IMAGE_FEATURES = "" export IMAGE_BASENAME = "${MLPREFIX}core-image-minimal-initramfs" +IMAGE_NAME_SUFFIX ?= "" IMAGE_LINGUAS = "" LICENSE = "MIT" diff --git a/poky/meta/recipes-core/images/core-image-tiny-initramfs.bb b/poky/meta/recipes-core/images/core-image-tiny-initramfs.bb index 0eca6d994..584990074 100644 --- a/poky/meta/recipes-core/images/core-image-tiny-initramfs.bb +++ b/poky/meta/recipes-core/images/core-image-tiny-initramfs.bb @@ -13,6 +13,7 @@ PACKAGE_INSTALL = "initramfs-live-boot-tiny packagegroup-core-boot dropbear ${VI IMAGE_FEATURES = "" export IMAGE_BASENAME = "core-image-tiny-initramfs" +IMAGE_NAME_SUFFIX ?= "" IMAGE_LINGUAS = "" LICENSE = "MIT" diff --git a/poky/meta/recipes-core/initrdscripts/initramfs-framework/init b/poky/meta/recipes-core/initrdscripts/initramfs-framework/init index c71ce0ce8..567694aff 100755 --- a/poky/meta/recipes-core/initrdscripts/initramfs-framework/init +++ b/poky/meta/recipes-core/initrdscripts/initramfs-framework/init @@ -88,12 +88,25 @@ fi # populate bootparam environment for p in `cat /proc/cmdline`; do + if [ -n "$quoted" ]; then + value="$value $p" + if [ "`echo $p | sed -e 's/\"$//'`" != "$p" ]; then + eval "bootparam_${quoted}=${value}" + unset quoted + fi + continue + fi + opt=`echo $p | cut -d'=' -f1` opt=`echo $opt | sed -e 'y/.-/__/'` if [ "`echo $p | cut -d'=' -f1`" = "$p" ]; then eval "bootparam_${opt}=true" else value="`echo $p | cut -d'=' -f2-`" + if [ "`echo $value | sed -e 's/^\"//'`" != "$value" ]; then + quoted=${opt} + continue + fi eval "bootparam_${opt}=\"${value}\"" fi done diff --git a/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.16.bb b/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.16.bb deleted file mode 100644 index ba74eb1f9..000000000 --- a/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.16.bb +++ /dev/null @@ -1,18 +0,0 @@ -# -# This provides libcrypto.so.1 which contains obsolete APIs, needed for uninative in particular -# - -require libxcrypt.inc - -PROVIDES = "" -AUTO_LIBNAME_PKGS = "" -EXCLUDE_FROM_WORLD = "1" - -API = "--enable-obsolete-api" - -do_install_append () { - rm -rf ${D}${includedir} - rm -rf ${D}${libdir}/pkgconfig - rm -rf ${D}${datadir} -} - diff --git a/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.17.bb b/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.17.bb new file mode 100644 index 000000000..ba74eb1f9 --- /dev/null +++ b/poky/meta/recipes-core/libxcrypt/libxcrypt-compat_4.4.17.bb @@ -0,0 +1,18 @@ +# +# This provides libcrypto.so.1 which contains obsolete APIs, needed for uninative in particular +# + +require libxcrypt.inc + +PROVIDES = "" +AUTO_LIBNAME_PKGS = "" +EXCLUDE_FROM_WORLD = "1" + +API = "--enable-obsolete-api" + +do_install_append () { + rm -rf ${D}${includedir} + rm -rf ${D}${libdir}/pkgconfig + rm -rf ${D}${datadir} +} + diff --git a/poky/meta/recipes-core/libxcrypt/libxcrypt.inc b/poky/meta/recipes-core/libxcrypt/libxcrypt.inc index da7607aef..104a2af4d 100644 --- a/poky/meta/recipes-core/libxcrypt/libxcrypt.inc +++ b/poky/meta/recipes-core/libxcrypt/libxcrypt.inc @@ -3,14 +3,14 @@ DESCRIPTION = "Forked code from glibc libary to extract only crypto part." HOMEPAGE = "https://github.com/besser82/libxcrypt" SECTION = "libs" LICENSE = "LGPLv2.1" -LIC_FILES_CHKSUM ?= "file://LICENSING;md5=3bb6614cf5880cbf1b9dbd9e3d145e2c \ - file://COPYING.LIB;md5=4fbd65380cdd255951079008b364516c \ -" +LIC_FILES_CHKSUM = "file://LICENSING;md5=102923b6e1b02a85c5a1203fa87d151d \ + file://COPYING.LIB;md5=4fbd65380cdd255951079008b364516c \ + " inherit autotools pkgconfig SRC_URI = "git://github.com/besser82/libxcrypt.git;branch=${SRCBRANCH}" -SRCREV = "4ffa4d38396c334a1e002427c22281b047a3d6a6" +SRCREV = "6b110bcd4f4caa61fc39c7339d30adc20a7dd177" SRCBRANCH ?= "develop" PROVIDES = "virtual/crypt" diff --git a/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.16.bb b/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.16.bb deleted file mode 100644 index 79dba2f6d..000000000 --- a/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.16.bb +++ /dev/null @@ -1,2 +0,0 @@ -require libxcrypt.inc - diff --git a/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.17.bb b/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.17.bb new file mode 100644 index 000000000..79dba2f6d --- /dev/null +++ b/poky/meta/recipes-core/libxcrypt/libxcrypt_4.4.17.bb @@ -0,0 +1,2 @@ +require libxcrypt.inc + diff --git a/poky/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch b/poky/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch new file mode 100644 index 000000000..822434666 --- /dev/null +++ b/poky/meta/recipes-core/libxml/libxml2/CVE-2020-24977.patch @@ -0,0 +1,41 @@ +From 50f06b3efb638efb0abd95dc62dca05ae67882c2 Mon Sep 17 00:00:00 2001 +From: Nick Wellnhofer +Date: Fri, 7 Aug 2020 21:54:27 +0200 +Subject: [PATCH] Fix out-of-bounds read with 'xmllint --htmlout' + +Make sure that truncated UTF-8 sequences don't cause an out-of-bounds +array access. + +Thanks to @SuhwanSong and the Agency for Defense Development (ADD) for +the report. + +Fixes #178. + +CVE: CVE-2020-24977 +Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/libxml2/-/commit/50f06b3efb638efb0abd95dc62dca05ae67882c2] + +Signed-off-by: Ovidiu Panait +--- + xmllint.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/xmllint.c b/xmllint.c +index f6a8e463..c647486f 100644 +--- a/xmllint.c ++++ b/xmllint.c +@@ -528,6 +528,12 @@ static void + xmlHTMLEncodeSend(void) { + char *result; + ++ /* ++ * xmlEncodeEntitiesReentrant assumes valid UTF-8, but the buffer might ++ * end with a truncated UTF-8 sequence. This is a hack to at least avoid ++ * an out-of-bounds read. ++ */ ++ memset(&buffer[sizeof(buffer)-4], 0, 4); + result = (char *) xmlEncodeEntitiesReentrant(NULL, BAD_CAST buffer); + if (result) { + xmlGenericError(xmlGenericErrorContext, "%s", result); +-- +2.17.1 + diff --git a/poky/meta/recipes-core/libxml/libxml2_2.9.10.bb b/poky/meta/recipes-core/libxml/libxml2_2.9.10.bb index d11b083e8..90890ffae 100644 --- a/poky/meta/recipes-core/libxml/libxml2_2.9.10.bb +++ b/poky/meta/recipes-core/libxml/libxml2_2.9.10.bb @@ -22,6 +22,7 @@ SRC_URI = "http://www.xmlsoft.org/sources/libxml2-${PV}.tar.gz;name=libtar \ file://fix-execution-of-ptests.patch \ file://CVE-2020-7595.patch \ file://CVE-2019-20388.patch \ + file://CVE-2020-24977.patch \ " SRC_URI[libtar.md5sum] = "10942a1dc23137a8aa07f0639cbfece5" diff --git a/poky/meta/recipes-core/meta/buildtools-tarball.bb b/poky/meta/recipes-core/meta/buildtools-tarball.bb index 3785941c2..75b71f553 100644 --- a/poky/meta/recipes-core/meta/buildtools-tarball.bb +++ b/poky/meta/recipes-core/meta/buildtools-tarball.bb @@ -6,6 +6,7 @@ LICENSE = "MIT" TOOLCHAIN_TARGET_TASK ?= "" TOOLCHAIN_HOST_TASK ?= "\ + nativesdk-sdk-provides-dummy \ nativesdk-python3-core \ nativesdk-python3-modules \ nativesdk-python3-misc \ diff --git a/poky/meta/recipes-core/meta/cve-update-db-native.bb b/poky/meta/recipes-core/meta/cve-update-db-native.bb index 32d6dbdff..cf2b251e2 100644 --- a/poky/meta/recipes-core/meta/cve-update-db-native.bb +++ b/poky/meta/recipes-core/meta/cve-update-db-native.bb @@ -13,23 +13,17 @@ deltask do_install deltask do_populate_sysroot python () { - cve_check_db_file = d.getVar("CVE_CHECK_DB_FILE") - if not cve_check_db_file: + if not bb.data.inherits_class("cve-check", d): raise bb.parse.SkipRecipe("Skip recipe when cve-check class is not loaded.") - - if os.path.exists("%s-journal" % cve_check_db_file ): - os.remove("%s-journal" % cve_check_db_file) - - if os.path.exists(cve_check_db_file): - os.remove(cve_check_db_file) } -python do_populate_cve_db() { +python do_fetch() { """ Update NVD database with json data feed """ import bb.utils - import sqlite3, urllib, urllib.parse, shutil, gzip + import bb.progress + import sqlite3, urllib, urllib.parse, gzip from datetime import date bb.utils.export_proxies(d) @@ -39,20 +33,25 @@ python do_populate_cve_db() { db_file = d.getVar("CVE_CHECK_DB_FILE") db_dir = os.path.dirname(db_file) - json_tmpfile = os.path.join(db_dir, 'nvd.json.gz') + + if os.path.exists("{0}-journal".format(db_file)): + # If a journal is present the last update might have been interrupted. In that case, + # just wipe any leftovers and force the DB to be recreated. + os.remove("{0}-journal".format(db_file)) + + if os.path.exists(db_file): + os.remove(db_file) # Don't refresh the database more than once an hour try: import time if time.time() - os.path.getmtime(db_file) < (60*60): + bb.debug(2, "Recently updated, skipping") return except OSError: pass - cve_f = open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') - - if not os.path.isdir(db_dir): - os.mkdir(db_dir) + bb.utils.mkdirhier(db_dir) # Connect to database conn = sqlite3.connect(db_file) @@ -60,56 +59,65 @@ python do_populate_cve_db() { initialize_db(c) - for year in range(YEAR_START, date.today().year + 1): - year_url = BASE_URL + str(year) - meta_url = year_url + ".meta" - json_url = year_url + ".json.gz" - - # Retrieve meta last modified date - try: - response = urllib.request.urlopen(meta_url) - except urllib.error.URLError as e: - cve_f.write('Warning: CVE db update error, Unable to fetch CVE data.\n\n') - bb.warn("Failed to fetch CVE data (%s)" % e.reason) - return - - if response: - for l in response.read().decode("utf-8").splitlines(): - key, value = l.split(":", 1) - if key == "lastModifiedDate": - last_modified = value - break - else: - bb.warn("Cannot parse CVE metadata, update failed") - return + with bb.progress.ProgressHandler(d) as ph, open(os.path.join(d.getVar("TMPDIR"), 'cve_check'), 'a') as cve_f: + total_years = date.today().year + 1 - YEAR_START + for i, year in enumerate(range(YEAR_START, date.today().year + 1)): + bb.debug(2, "Updating %d" % year) + ph.update((float(i + 1) / total_years) * 100) + year_url = BASE_URL + str(year) + meta_url = year_url + ".meta" + json_url = year_url + ".json.gz" - # Compare with current db last modified date - c.execute("select DATE from META where YEAR = ?", (year,)) - meta = c.fetchone() - if not meta or meta[0] != last_modified: - # Clear products table entries corresponding to current year - c.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,)) - - # Update db with current year json file + # Retrieve meta last modified date try: - response = urllib.request.urlopen(json_url) - if response: - update_db(c, gzip.decompress(response.read()).decode('utf-8')) - c.execute("insert or replace into META values (?, ?)", [year, last_modified]) + response = urllib.request.urlopen(meta_url) except urllib.error.URLError as e: - cve_f.write('Warning: CVE db update error, CVE data is outdated.\n\n') - bb.warn("Cannot parse CVE data (%s), update failed" % e.reason) + cve_f.write('Warning: CVE db update error, Unable to fetch CVE data.\n\n') + bb.warn("Failed to fetch CVE data (%s)" % e.reason) return - # Update success, set the date to cve_check file. - if year == date.today().year: - cve_f.write('CVE database update : %s\n\n' % date.today()) + if response: + for l in response.read().decode("utf-8").splitlines(): + key, value = l.split(":", 1) + if key == "lastModifiedDate": + last_modified = value + break + else: + bb.warn("Cannot parse CVE metadata, update failed") + return + + # Compare with current db last modified date + c.execute("select DATE from META where YEAR = ?", (year,)) + meta = c.fetchone() + if not meta or meta[0] != last_modified: + bb.debug(2, "Updating entries") + # Clear products table entries corresponding to current year + c.execute("delete from PRODUCTS where ID like ?", ('CVE-%d%%' % year,)) + + # Update db with current year json file + try: + response = urllib.request.urlopen(json_url) + if response: + update_db(c, gzip.decompress(response.read()).decode('utf-8')) + c.execute("insert or replace into META values (?, ?)", [year, last_modified]) + except urllib.error.URLError as e: + cve_f.write('Warning: CVE db update error, CVE data is outdated.\n\n') + bb.warn("Cannot parse CVE data (%s), update failed" % e.reason) + return + else: + bb.debug(2, "Already up to date (last modified %s)" % last_modified) + # Update success, set the date to cve_check file. + if year == date.today().year: + cve_f.write('CVE database update : %s\n\n' % date.today()) - cve_f.close() - conn.commit() - conn.close() + conn.commit() + conn.close() } +do_fetch[lockfiles] += "${CVE_CHECK_DB_FILE_LOCK}" +do_fetch[file-checksums] = "" +do_fetch[vardeps] = "" + def initialize_db(c): c.execute("CREATE TABLE IF NOT EXISTS META (YEAR INTEGER UNIQUE, DATE TEXT)") @@ -200,7 +208,6 @@ def update_db(c, jsondata): parse_node_and_insert(c, config, cveId) -addtask do_populate_cve_db before do_fetch -do_populate_cve_db[nostamp] = "1" +do_fetch[nostamp] = "1" EXCLUDE_FROM_WORLD = "1" diff --git a/poky/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb b/poky/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb index 29f4dd363..1d71f373a 100644 --- a/poky/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb +++ b/poky/meta/recipes-core/meta/nativesdk-sdk-provides-dummy.bb @@ -4,8 +4,8 @@ DUMMYPROVIDES_PACKAGES = "\ pkgconfig \ " -# Add /bin/sh? DUMMYPROVIDES = "\ + /bin/sh \ /bin/bash \ /usr/bin/env \ libGL.so()(64bit) \ diff --git a/poky/meta/recipes-core/meta/testexport-tarball.bb b/poky/meta/recipes-core/meta/testexport-tarball.bb index c38ac902a..daedd78cb 100644 --- a/poky/meta/recipes-core/meta/testexport-tarball.bb +++ b/poky/meta/recipes-core/meta/testexport-tarball.bb @@ -8,7 +8,7 @@ TEST_EXPORT_SDK_PACKAGES ??= "" TOOLCHAIN_TARGET_TASK ?= "" -TOOLCHAIN_HOST_TASK ?= "${TEST_EXPORT_SDK_PACKAGES}" +TOOLCHAIN_HOST_TASK ?= "${TEST_EXPORT_SDK_PACKAGES} nativesdk-sdk-provides-dummy" MULTIMACH_TARGET_SYS = "${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}" PACKAGE_ARCH = "${SDK_ARCH}_${SDK_OS}" diff --git a/poky/meta/recipes-core/meta/uninative-tarball.bb b/poky/meta/recipes-core/meta/uninative-tarball.bb index 39638eb8c..c4a6c96b4 100644 --- a/poky/meta/recipes-core/meta/uninative-tarball.bb +++ b/poky/meta/recipes-core/meta/uninative-tarball.bb @@ -18,6 +18,7 @@ TOOLCHAIN_HOST_TASK = "\ nativesdk-libxcrypt \ nativesdk-libxcrypt-compat \ nativesdk-libnss-nis \ + nativesdk-sdk-provides-dummy \ " INHIBIT_DEFAULT_DEPS = "1" diff --git a/poky/meta/recipes-core/musl/libucontext_git.bb b/poky/meta/recipes-core/musl/libucontext_git.bb index ec988f192..734ad9c95 100644 --- a/poky/meta/recipes-core/musl/libucontext_git.bb +++ b/poky/meta/recipes-core/musl/libucontext_git.bb @@ -43,6 +43,7 @@ def map_kernel_arch(a, d): elif re.match('p(pc|owerpc)', a): return 'ppc' elif re.match('p(pc64|owerpc64)', a): return 'ppc64' elif re.match('riscv64$', a): return 'riscv64' + elif re.match('riscv32$', a): return 'riscv32' else: if not d.getVar("TARGET_OS").startswith("linux"): return a diff --git a/poky/meta/recipes-core/musl/musl_git.bb b/poky/meta/recipes-core/musl/musl_git.bb index 51b19e89a..e72b05a36 100644 --- a/poky/meta/recipes-core/musl/musl_git.bb +++ b/poky/meta/recipes-core/musl/musl_git.bb @@ -4,7 +4,7 @@ require musl.inc inherit linuxloader -SRCREV = "73cc775bee53300c7cf759f37580220b18ac13d3" +SRCREV = "ffac0c229986725c0d0f3c806bafa7e3ca409f3b" BASEVER = "1.2.1" diff --git a/poky/meta/recipes-core/ncurses/files/config.cache b/poky/meta/recipes-core/ncurses/files/config.cache deleted file mode 100644 index 6a9217d5b..000000000 --- a/poky/meta/recipes-core/ncurses/files/config.cache +++ /dev/null @@ -1,4 +0,0 @@ -#! /bin/sh - -cf_cv_func_nanosleep=yes -cf_cv_func_mkstemp=yes diff --git a/poky/meta/recipes-core/ncurses/ncurses.inc b/poky/meta/recipes-core/ncurses/ncurses.inc index 4156bf4f7..1627fb91d 100644 --- a/poky/meta/recipes-core/ncurses/ncurses.inc +++ b/poky/meta/recipes-core/ncurses/ncurses.inc @@ -16,7 +16,8 @@ inherit autotools binconfig-disabled multilib_header pkgconfig SRC_URI = "git://salsa.debian.org/debian/ncurses.git;protocol=https" EXTRA_AUTORECONF = "-I m4" -CONFIG_SITE =+ "${WORKDIR}/config.cache" + +CACHED_CONFIGUREVARS = "cf_cv_func_nanosleep=yes" EXTRASITECONFIG = "CFLAGS='${CFLAGS} -I${SYSROOT_DESTDIR}${includedir}'" diff --git a/poky/meta/recipes-core/ncurses/ncurses_6.2.bb b/poky/meta/recipes-core/ncurses/ncurses_6.2.bb index 723e685a9..5c02db854 100644 --- a/poky/meta/recipes-core/ncurses/ncurses_6.2.bb +++ b/poky/meta/recipes-core/ncurses/ncurses_6.2.bb @@ -2,12 +2,11 @@ require ncurses.inc SRC_URI += "file://0001-tic-hang.patch \ file://0002-configure-reproducible.patch \ - file://config.cache \ " # commit id corresponds to the revision in package version SRCREV = "a669013cd5e9d6434e5301348ea51baf306c93c4" S = "${WORKDIR}/git" -EXTRA_OECONF += "--with-abi-version=5 --cache-file=${B}/config.cache" +EXTRA_OECONF += "--with-abi-version=5" UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+(\.\d+)+(\+\d+)*)" # This is needed when using patchlevel versions like 6.1+20181013 diff --git a/poky/meta/recipes-core/packagegroups/packagegroup-core-tools-profile.bb b/poky/meta/recipes-core/packagegroups/packagegroup-core-tools-profile.bb index 8aed1e845..17b1391a4 100644 --- a/poky/meta/recipes-core/packagegroups/packagegroup-core-tools-profile.bb +++ b/poky/meta/recipes-core/packagegroups/packagegroup-core-tools-profile.bb @@ -35,16 +35,9 @@ SYSTEMTAP_libc-musl = "" SYSTEMTAP_nios2 = "" SYSTEMTAP_riscv64 = "" -# lttng-ust uses sched_getcpu() which is not there on for some platforms. -LTTNGUST = "lttng-ust" -LTTNGUST_arc = "" - LTTNGTOOLS = "lttng-tools" LTTNGTOOLS_arc = "" -LTTNGMODULES = "lttng-modules" -LTTNGMODULES_arc = "" - BABELTRACE = "babeltrace" BABELTRACE2 = "babeltrace2" @@ -67,9 +60,7 @@ VALGRIND_linux-gnun32 = "" RDEPENDS_${PN} = "\ ${PROFILETOOLS} \ - ${LTTNGUST} \ ${LTTNGTOOLS} \ - ${LTTNGMODULES} \ ${BABELTRACE} \ ${BABELTRACE2} \ ${SYSTEMTAP} \ diff --git a/poky/meta/recipes-core/systemd/systemd-boot_246.1.bb b/poky/meta/recipes-core/systemd/systemd-boot_246.1.bb deleted file mode 100644 index f92c63981..000000000 --- a/poky/meta/recipes-core/systemd/systemd-boot_246.1.bb +++ /dev/null @@ -1,70 +0,0 @@ -require systemd.inc -FILESEXTRAPATHS =. "${FILE_DIRNAME}/systemd:" - -require conf/image-uefi.conf - -DEPENDS = "intltool-native libcap util-linux gnu-efi gperf-native" - -inherit meson pkgconfig gettext -inherit deploy - -LDFLAGS_prepend = "${@ " ".join(d.getVar('LD').split()[1:])} " - -do_write_config[vardeps] += "CC OBJCOPY" -do_write_config_append() { - cat >${WORKDIR}/meson-${PN}.cross <${WORKDIR}/meson-${PN}.cross </dev/null 2>&1; then - # TMPDIR=/mnt/.psplash psplash-write "PROGRESS $progress" || true - #fi - if [ -e /mnt/.psplash/psplash_fifo ]; then - echo "PROGRESS $progress" > /mnt/.psplash/psplash_fifo + if type psplash-write >/dev/null 2>&1; then + PSPLASH_FIFO_DIR=/mnt/.psplash psplash-write "PROGRESS $progress" || true fi } @@ -176,7 +173,7 @@ startup() { #Uncomment to cause psplash to exit manually, otherwise it exits when it sees a VC switch if [ "x$runlevel" != "xS" ] && [ ! -x /etc/rc${runlevel}.d/S??xserver-nodm ]; then if type psplash-write >/dev/null 2>&1; then - TMPDIR=/mnt/.psplash psplash-write "QUIT" || true + PSPLASH_FIFO_DIR=/mnt/.psplash psplash-write "QUIT" || true umount -l /mnt/.psplash fi fi diff --git a/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb b/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb index 80ab9d70e..98916f7f1 100644 --- a/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb +++ b/poky/meta/recipes-core/sysvinit/sysvinit_2.97.bb @@ -24,7 +24,6 @@ SRC_URI = "${SAVANNAH_GNU_MIRROR}/sysvinit/sysvinit-${PV}.tar.xz \ SRC_URI[sha256sum] = "2d5996857519bfd8634d2e1debabb3238fb38440f65fbfdc46420ee8bdf25110" S = "${WORKDIR}/sysvinit-${PV}" -B = "${S}/src" inherit update-alternatives features_check DEPENDS_append = " update-rc.d-native base-passwd virtual/crypt" diff --git a/poky/meta/recipes-core/util-linux/util-linux.inc b/poky/meta/recipes-core/util-linux/util-linux.inc deleted file mode 100644 index 06fbf7a22..000000000 --- a/poky/meta/recipes-core/util-linux/util-linux.inc +++ /dev/null @@ -1,348 +0,0 @@ -SUMMARY = "A suite of basic system administration utilities" -HOMEPAGE = "http://userweb.kernel.org/~kzak/util-linux/" -DESCRIPTION = "Util-linux includes a suite of basic system administration utilities \ -commonly found on most Linux systems. Some of the more important utilities include \ -disk partitioning, kernel message management, filesystem creation, and system login." - -SECTION = "base" - -LICENSE = "GPLv2+ & LGPLv2.1+ & BSD-3-Clause & BSD-4-Clause" -LICENSE_${PN}-libblkid = "LGPLv2.1+" -LICENSE_${PN}-libfdisk = "LGPLv2.1+" -LICENSE_${PN}-libmount = "LGPLv2.1+" -LICENSE_${PN}-libsmartcols = "LGPLv2.1+" -LICENSE_${PN}-libuuid = "BSD-3-Clause" - -LIC_FILES_CHKSUM = "file://README.licensing;md5=0fd5c050c6187d2bf0a4492b7f4e33da \ - file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://Documentation/licenses/COPYING.GPL-2.0-or-later;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://Documentation/licenses/COPYING.LGPL-2.1-or-later;md5=4fbd65380cdd255951079008b364516c \ - file://Documentation/licenses/COPYING.BSD-3-Clause;md5=58dcd8452651fc8b07d1f65ce07ca8af \ - file://Documentation/licenses/COPYING.BSD-4-Clause-UC;md5=263860f8968d8bafa5392cab74285262 \ - file://libuuid/COPYING;md5=6d2cafc999feb2c2de84d4d24b23290c \ - file://libmount/COPYING;md5=7c7e39fb7d70ffe5d693a643e29987c2 \ - file://libblkid/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ - file://libfdisk/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ - file://libsmartcols/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ -" - -#gtk-doc is not enabled as it requires xmlto which requires util-linux -inherit autotools gettext manpages pkgconfig systemd update-alternatives python3-dir bash-completion ptest -DEPENDS = "libcap-ng ncurses virtual/crypt zlib" - -MAJOR_VERSION = "${@'.'.join(d.getVar('PV').split('.')[0:2])}" -SRC_URI = "${KERNELORG_MIRROR}/linux/utils/${BPN}/v${MAJOR_VERSION}/${BP}.tar.xz \ - " - -PACKAGES =+ "${PN}-swaponoff" -PACKAGES += "${@bb.utils.contains('PACKAGECONFIG', 'pylibmount', '${PN}-pylibmount', '', d)}" - -python util_linux_binpackages () { - def pkg_hook(f, pkg, file_regex, output_pattern, modulename): - pn = d.getVar('PN') - d.appendVar('RRECOMMENDS_%s' % pn, ' %s' % pkg) - - if d.getVar('ALTERNATIVE_' + pkg): - return - if d.getVarFlag('ALTERNATIVE_LINK_NAME', modulename): - d.setVar('ALTERNATIVE_' + pkg, modulename) - - bindirs = sorted(list(set(d.expand("${base_sbindir} ${base_bindir} ${sbindir} ${bindir}").split()))) - for dir in bindirs: - do_split_packages(d, root=dir, - file_regex=r'(.*)', output_pattern='${PN}-%s', - description='${PN} %s', - hook=pkg_hook, extra_depends='') - - # There are some symlinks for some binaries which we have ignored - # above. Add them to the package owning the binary they are - # pointing to - extras = {} - dvar = d.getVar('PKGD') - for root in bindirs: - for walkroot, dirs, files in os.walk(dvar + root): - for f in files: - file = os.path.join(walkroot, f) - if not os.path.islink(file): - continue - - pkg = os.path.basename(os.readlink(file)) - extras[pkg] = extras.get(pkg, '') + ' ' + file.replace(dvar, '', 1) - - pn = d.getVar('PN') - for pkg, links in extras.items(): - of = d.getVar('FILES_' + pn + '-' + pkg) - links = of + links - d.setVar('FILES_' + pn + '-' + pkg, links) -} - -# we must execute before update-alternatives PACKAGE_PREPROCESS_FUNCS -PACKAGE_PREPROCESS_FUNCS =+ "util_linux_binpackages " - -python util_linux_libpackages() { - do_split_packages(d, root=d.getVar('UTIL_LINUX_LIBDIR'), file_regex=r'^lib(.*)\.so\..*$', - output_pattern='${PN}-lib%s', - description='${PN} lib%s', - extra_depends='', prepend=True, allow_links=True) -} - -PACKAGESPLITFUNCS =+ "util_linux_libpackages" - -PACKAGES_DYNAMIC = "^${PN}-.*" - -CACHED_CONFIGUREVARS += "scanf_cv_alloc_modifier=ms" -UTIL_LINUX_LIBDIR = "${libdir}" -UTIL_LINUX_LIBDIR_class-target = "${base_libdir}" -EXTRA_OECONF = "\ - --enable-libuuid --enable-libblkid \ - \ - --enable-fsck --enable-kill --enable-last --enable-mesg \ - --enable-mount --enable-partx --enable-raw --enable-rfkill \ - --enable-unshare --enable-write \ - \ - --disable-bfs --disable-chfn-chsh --disable-login \ - --disable-makeinstall-chown --disable-minix --disable-newgrp \ - --disable-use-tty-group --disable-vipw \ - \ - --without-udev \ - \ - usrsbin_execdir='${sbindir}' \ - --libdir='${UTIL_LINUX_LIBDIR}' \ -" - -EXTRA_OECONF_append_class-target = " --enable-setpriv" -EXTRA_OECONF_append_class-native = " --without-cap-ng --disable-setpriv" -EXTRA_OECONF_append_class-nativesdk = " --without-cap-ng --disable-setpriv" -EXTRA_OECONF_append = " --disable-hwclock-gplv3" - -# enable pcre2 for native/nativesdk to match host distros -# this helps to keep same expectations when using the SDK or -# build host versions during development -# -PACKAGECONFIG ?= "pcre2" -PACKAGECONFIG_class-target ?= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" -# inherit manpages requires this to be present, however util-linux does not have -# configuration options, and installs manpages always -PACKAGECONFIG[manpages] = "" -PACKAGECONFIG[pam] = "--enable-su --enable-runuser,--disable-su --disable-runuser, libpam," -# Respect the systemd feature for uuidd -PACKAGECONFIG[systemd] = "--with-systemd --with-systemdsystemunitdir=${systemd_system_unitdir}, --without-systemd --without-systemdsystemunitdir,systemd" -# Build python bindings for libmount -PACKAGECONFIG[pylibmount] = "--with-python=3 --enable-pylibmount,--without-python --disable-pylibmount,python3" -# Readline support -PACKAGECONFIG[readline] = "--with-readline,--without-readline,readline" -# PCRE support in hardlink -PACKAGECONFIG[pcre2] = ",,libpcre2" - -EXTRA_OEMAKE = "ARCH=${TARGET_ARCH} CPU= CPUOPT= 'OPT=${CFLAGS}'" - -ALLOW_EMPTY_${PN} = "1" -FILES_${PN} = "" -FILES_${PN}-doc += "${datadir}/getopt/getopt-*.*" -FILES_${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/libmount/pylibmount.la" -FILES_${PN}-mount = "${sysconfdir}/default/mountall" -FILES_${PN}-runuser = "${sysconfdir}/pam.d/runuser*" -FILES_${PN}-su = "${sysconfdir}/pam.d/su-l" -CONFFILES_${PN}-su = "${sysconfdir}/pam.d/su-l" -FILES_${PN}-pylibmount = "${PYTHON_SITEPACKAGES_DIR}/libmount/pylibmount.so \ - ${PYTHON_SITEPACKAGES_DIR}/libmount/__init__.* \ - ${PYTHON_SITEPACKAGES_DIR}/libmount/__pycache__/*" - -# Util-linux' blkid replaces the e2fsprogs one -RCONFLICTS_${PN}-blkid = "${MLPREFIX}e2fsprogs-blkid" -RREPLACES_${PN}-blkid = "${MLPREFIX}e2fsprogs-blkid" - -RRECOMMENDS_${PN}_class-native = "" -RRECOMMENDS_${PN}_class-nativesdk = "" -RDEPENDS_${PN}_class-native = "" -RDEPENDS_${PN}_class-nativesdk = "" - -RPROVIDES_${PN}-dev = "${PN}-libblkid-dev ${PN}-libmount-dev ${PN}-libuuid-dev" - -RDEPENDS_${PN}-bash-completion += "${PN}-lsblk" -RDEPENDS_${PN}-ptest += "bash bc btrfs-tools coreutils e2fsprogs grep iproute2 kmod mdadm procps sed socat which xz" -RRECOMMENDS_${PN}-ptest += "kernel-module-scsi-debug" -RDEPENDS_${PN}-swaponoff = "${PN}-swapon ${PN}-swapoff" -ALLOW_EMPTY_${PN}-swaponoff = "1" - -#SYSTEMD_PACKAGES = "${PN}-uuidd ${PN}-fstrim" -SYSTEMD_SERVICE_${PN}-uuidd = "uuidd.socket uuidd.service" -SYSTEMD_AUTO_ENABLE_${PN}-uuidd = "disable" -SYSTEMD_SERVICE_${PN}-fstrim = "fstrim.timer fstrim.service" -SYSTEMD_AUTO_ENABLE_${PN}-fstrim = "disable" - -do_install () { - # with ccache the timestamps on compiled files may - # end up earlier than on their inputs, this allows - # for the resultant compilation in the install step. - oe_runmake 'CC=${CC}' 'LD=${LD}' \ - 'LDFLAGS=${LDFLAGS}' 'DESTDIR=${D}' install - - mkdir -p ${D}${base_bindir} - - sbinprogs="agetty ctrlaltdel cfdisk vipw vigr" - sbinprogs_a="pivot_root hwclock mkswap losetup swapon swapoff fdisk fsck blkid blockdev fstrim sulogin switch_root nologin" - binprogs_a="dmesg getopt kill more umount mount login su mountpoint" - - if [ "${base_sbindir}" != "${sbindir}" ]; then - mkdir -p ${D}${base_sbindir} - for p in $sbinprogs $sbinprogs_a; do - if [ -f "${D}${sbindir}/$p" ]; then - mv "${D}${sbindir}/$p" "${D}${base_sbindir}/$p" - fi - done - fi - - if [ "${base_bindir}" != "${bindir}" ]; then - mkdir -p ${D}${base_bindir} - for p in $binprogs_a; do - if [ -f "${D}${bindir}/$p" ]; then - mv "${D}${bindir}/$p" "${D}${base_bindir}/$p" - fi - done - fi - - install -d ${D}${sysconfdir}/default/ - echo 'MOUNTALL="-t nonfs,nosmbfs,noncpfs"' > ${D}${sysconfdir}/default/mountall - - rm -f ${D}${bindir}/chkdupexe -} - -do_install_append_class-target () { - if [ "${@bb.utils.filter('PACKAGECONFIG', 'pam', d)}" ]; then - install -d ${D}${sysconfdir}/pam.d - install -m 0644 ${WORKDIR}/runuser.pamd ${D}${sysconfdir}/pam.d/runuser - install -m 0644 ${WORKDIR}/runuser-l.pamd ${D}${sysconfdir}/pam.d/runuser-l - # Required for "su -" aka "su --login" because - # otherwise it uses "other", which has "auth pam_deny.so" - # and thus prevents the operation. - ln -s su ${D}${sysconfdir}/pam.d/su-l - fi -} -# nologin causes a conflict with shadow-native -# kill causes a conflict with coreutils-native (if ${bindir}==${base_bindir}) -do_install_append_class-native () { - rm -f ${D}${base_sbindir}/nologin - rm -f ${D}${base_bindir}/kill -} - -ALTERNATIVE_PRIORITY = "80" - -ALTERNATIVE_LINK_NAME[blkid] = "${base_sbindir}/blkid" -ALTERNATIVE_LINK_NAME[blockdev] = "${base_sbindir}/blockdev" -ALTERNATIVE_LINK_NAME[cal] = "${bindir}/cal" -ALTERNATIVE_LINK_NAME[chrt] = "${bindir}/chrt" -ALTERNATIVE_LINK_NAME[dmesg] = "${base_bindir}/dmesg" -ALTERNATIVE_LINK_NAME[eject] = "${bindir}/eject" -ALTERNATIVE_LINK_NAME[fallocate] = "${bindir}/fallocate" -ALTERNATIVE_LINK_NAME[fdisk] = "${base_sbindir}/fdisk" -ALTERNATIVE_LINK_NAME[flock] = "${bindir}/flock" -ALTERNATIVE_LINK_NAME[fsck] = "${base_sbindir}/fsck" -ALTERNATIVE_LINK_NAME[fsfreeze] = "${sbindir}/fsfreeze" -ALTERNATIVE_LINK_NAME[fstrim] = "${base_sbindir}/fstrim" -ALTERNATIVE_LINK_NAME[getopt] = "${base_bindir}/getopt" -ALTERNATIVE_${PN}-agetty = "getty" -ALTERNATIVE_LINK_NAME[getty] = "${base_sbindir}/getty" -ALTERNATIVE_TARGET[getty] = "${base_sbindir}/agetty" -ALTERNATIVE_LINK_NAME[hexdump] = "${bindir}/hexdump" -ALTERNATIVE_LINK_NAME[hwclock] = "${base_sbindir}/hwclock" -ALTERNATIVE_LINK_NAME[ionice] = "${bindir}/ionice" -ALTERNATIVE_LINK_NAME[kill] = "${base_bindir}/kill" -ALTERNATIVE_${PN}-last = "last lastb" -ALTERNATIVE_LINK_NAME[last] = "${bindir}/last" -ALTERNATIVE_LINK_NAME[lastb] = "${bindir}/lastb" -ALTERNATIVE_LINK_NAME[logger] = "${bindir}/logger" -ALTERNATIVE_LINK_NAME[losetup] = "${base_sbindir}/losetup" -ALTERNATIVE_LINK_NAME[mesg] = "${bindir}/mesg" -ALTERNATIVE_LINK_NAME[mkswap] = "${base_sbindir}/mkswap" -ALTERNATIVE_LINK_NAME[more] = "${base_bindir}/more" -ALTERNATIVE_LINK_NAME[mount] = "${base_bindir}/mount" -ALTERNATIVE_LINK_NAME[mountpoint] = "${base_bindir}/mountpoint" -ALTERNATIVE_LINK_NAME[nologin] = "${base_sbindir}/nologin" -ALTERNATIVE_LINK_NAME[nsenter] = "${bindir}/nsenter" -ALTERNATIVE_LINK_NAME[pivot_root] = "${base_sbindir}/pivot_root" -ALTERNATIVE_LINK_NAME[readprofile] = "${sbindir}/readprofile" -ALTERNATIVE_LINK_NAME[renice] = "${bindir}/renice" -ALTERNATIVE_LINK_NAME[rev] = "${bindir}/rev" -ALTERNATIVE_LINK_NAME[rfkill] = "${sbindir}/rfkill" -ALTERNATIVE_LINK_NAME[rtcwake] = "${sbindir}/rtcwake" -ALTERNATIVE_LINK_NAME[setpriv] = "${bindir}/setpriv" -ALTERNATIVE_LINK_NAME[setsid] = "${bindir}/setsid" -ALTERNATIVE_LINK_NAME[su] = "${base_bindir}/su" -ALTERNATIVE_LINK_NAME[sulogin] = "${base_sbindir}/sulogin" -ALTERNATIVE_LINK_NAME[swapoff] = "${base_sbindir}/swapoff" -ALTERNATIVE_LINK_NAME[swapon] = "${base_sbindir}/swapon" -ALTERNATIVE_LINK_NAME[switch_root] = "${base_sbindir}/switch_root" -ALTERNATIVE_LINK_NAME[taskset] = "${bindir}/taskset" -ALTERNATIVE_LINK_NAME[umount] = "${base_bindir}/umount" -ALTERNATIVE_LINK_NAME[unshare] = "${bindir}/unshare" -ALTERNATIVE_LINK_NAME[utmpdump] = "${bindir}/utmpdump" -ALTERNATIVE_LINK_NAME[wall] = "${bindir}/wall" - -ALTERNATIVE_${PN}-doc = "\ -blkid.8 eject.1 findfs.8 fsck.8 kill.1 last.1 lastb.1 libblkid.3 logger.1 mesg.1 \ -mountpoint.1 nologin.8 rfkill.8 sulogin.8 utmpdump.1 uuid.3 wall.1\ -" -ALTERNATIVE_${PN}-doc += "${@bb.utils.contains('PACKAGECONFIG', 'pam', 'su.1', '', d)}" - -ALTERNATIVE_LINK_NAME[blkid.8] = "${mandir}/man8/blkid.8" -ALTERNATIVE_LINK_NAME[eject.1] = "${mandir}/man1/eject.1" -ALTERNATIVE_LINK_NAME[findfs.8] = "${mandir}/man8/findfs.8" -ALTERNATIVE_LINK_NAME[fsck.8] = "${mandir}/man8/fsck.8" -ALTERNATIVE_LINK_NAME[kill.1] = "${mandir}/man1/kill.1" -ALTERNATIVE_LINK_NAME[last.1] = "${mandir}/man1/last.1" -ALTERNATIVE_LINK_NAME[lastb.1] = "${mandir}/man1/lastb.1" -ALTERNATIVE_LINK_NAME[libblkid.3] = "${mandir}/man3/libblkid.3" -ALTERNATIVE_LINK_NAME[logger.1] = "${mandir}/man1/logger.1" -ALTERNATIVE_LINK_NAME[mesg.1] = "${mandir}/man1/mesg.1" -ALTERNATIVE_LINK_NAME[mountpoint.1] = "${mandir}/man1/mountpoint.1" -ALTERNATIVE_LINK_NAME[nologin.8] = "${mandir}/man8/nologin.8" -ALTERNATIVE_LINK_NAME[rfkill.8] = "${mandir}/man8/rfkill.8" -ALTERNATIVE_LINK_NAME[setpriv.1] = "${mandir}/man1/setpriv.1" -ALTERNATIVE_LINK_NAME[su.1] = "${mandir}/man1/su.1" -ALTERNATIVE_LINK_NAME[sulogin.8] = "${mandir}/man8/sulogin.8" -ALTERNATIVE_LINK_NAME[utmpdump.1] = "${mandir}/man1/utmpdump.1" -ALTERNATIVE_LINK_NAME[uuid.3] = "${mandir}/man3/uuid.3" -ALTERNATIVE_LINK_NAME[wall.1] = "${mandir}/man1/wall.1" - - -BBCLASSEXTEND = "native nativesdk" - -PTEST_BINDIR = "1" -do_compile_ptest() { - oe_runmake buildtest-TESTS -} - -do_install_ptest() { - mkdir -p ${D}${PTEST_PATH}/tests/ts - find . -name 'test*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; - find ./.libs -name 'sample*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; - find ./.libs -name 'test*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; - - cp ${S}/tests/*.sh ${D}${PTEST_PATH}/tests/ - cp -pR ${S}/tests/expected ${D}${PTEST_PATH}/tests/expected - cp -pR ${S}/tests/ts ${D}${PTEST_PATH}/tests/ - cp ${WORKDIR}/build/config.h ${D}${PTEST_PATH} - - # The original paths of executables to be tested point to a local folder containing - # the executables. We want to test the installed executables, not the local copies. - # So strip the paths, the executables will be located via "which" - sed -i \ - -e '/^TS_CMD/ s|$top_builddir/||g' \ - -e '/^TS_HELPER/ s|$top_builddir|${PTEST_PATH}|g' \ - ${D}${PTEST_PATH}/tests/commands.sh - - # Change 'if [ ! -x "$1" ]' to 'if [ ! -x "`which $1 2>/dev/null`"]' - sed -i -e \ - '/^\tif[[:space:]]\[[[:space:]]![[:space:]]-x[[:space:]]"$1"/s|$1|`which $1 2>/dev/null`|g' \ - ${D}${PTEST_PATH}/tests/functions.sh - - # Running "kill" without the the complete path would use the shell's built-in kill - sed -i -e \ - '/^TS_CMD_KILL/ s|kill|${PTEST_PATH}/bin/kill|g' \ - ${D}${PTEST_PATH}/tests/commands.sh - - - sed -i 's|@base_sbindir@|${base_sbindir}|g' ${D}${PTEST_PATH}/run-ptest - -} diff --git a/poky/meta/recipes-core/util-linux/util-linux/0001-include-cleanup-pidfd-inckudes.patch b/poky/meta/recipes-core/util-linux/util-linux/0001-include-cleanup-pidfd-inckudes.patch deleted file mode 100644 index 0ef6fb4ec..000000000 --- a/poky/meta/recipes-core/util-linux/util-linux/0001-include-cleanup-pidfd-inckudes.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0a4035ff2e4fd5b5ae0cf8f8665696c2aff53b75 Mon Sep 17 00:00:00 2001 -From: Karel Zak -Date: Tue, 10 Mar 2020 11:43:16 +0100 -Subject: [PATCH] include: cleanup pidfd inckudes - -Upstream-Status: Backport [https://github.com/karelzak/util-linux/commit/0a4035ff2e4fd5b5ae0cf8f8665696c2aff53b75] - -Signed-off-by: Karel Zak -Signed-off-by: Benjamin Fair ---- - include/pidfd-utils.h | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/include/pidfd-utils.h b/include/pidfd-utils.h -index 0baedd2c9..4a6c3a604 100644 ---- a/include/pidfd-utils.h -+++ b/include/pidfd-utils.h -@@ -3,10 +3,10 @@ - - #if defined(__linux__) - # include --# if defined(SYS_pidfd_send_signal) -+# if defined(SYS_pidfd_send_signal) && defined(SYS_pidfd_open) - # include - --# ifndef HAVE_PIDFD_OPEN -+# ifndef HAVE_PIDFD_SEND_SIGNAL - static inline int pidfd_send_signal(int pidfd, int sig, siginfo_t *info, - unsigned int flags) - { -@@ -14,7 +14,7 @@ static inline int pidfd_send_signal(int pidfd, int sig, siginfo_t *info, - } - # endif - --# ifndef HAVE_PIDFD_SEND_SIGNAL -+# ifndef HAVE_PIDFD_OPEN - static inline int pidfd_open(pid_t pid, unsigned int flags) - { - return syscall(SYS_pidfd_open, pid, flags); --- -2.26.1.301.g55bc3eb7cb9-goog - diff --git a/poky/meta/recipes-core/util-linux/util-linux_2.35.2.bb b/poky/meta/recipes-core/util-linux/util-linux_2.35.2.bb deleted file mode 100644 index 752a6fa21..000000000 --- a/poky/meta/recipes-core/util-linux/util-linux_2.35.2.bb +++ /dev/null @@ -1,13 +0,0 @@ -require util-linux.inc - -SRC_URI += "file://configure-sbindir.patch \ - file://runuser.pamd \ - file://runuser-l.pamd \ - file://ptest.patch \ - file://run-ptest \ - file://display_testname_for_subtest.patch \ - file://avoid_parallel_tests.patch \ - file://0001-include-cleanup-pidfd-inckudes.patch \ -" -SRC_URI[md5sum] = "248a4d0810c9193e0e9a4bb3f26b93d8" -SRC_URI[sha256sum] = "21b7431e82f6bcd9441a01beeec3d57ed33ee948f8a5b41da577073c372eb58a" diff --git a/poky/meta/recipes-core/util-linux/util-linux_2.36.bb b/poky/meta/recipes-core/util-linux/util-linux_2.36.bb new file mode 100644 index 000000000..2ad00ff0a --- /dev/null +++ b/poky/meta/recipes-core/util-linux/util-linux_2.36.bb @@ -0,0 +1,359 @@ +SUMMARY = "A suite of basic system administration utilities" +HOMEPAGE = "http://userweb.kernel.org/~kzak/util-linux/" +DESCRIPTION = "Util-linux includes a suite of basic system administration utilities \ +commonly found on most Linux systems. Some of the more important utilities include \ +disk partitioning, kernel message management, filesystem creation, and system login." + +SECTION = "base" + +LICENSE = "GPLv2+ & LGPLv2.1+ & BSD-3-Clause & BSD-4-Clause" +LICENSE_${PN}-libblkid = "LGPLv2.1+" +LICENSE_${PN}-libfdisk = "LGPLv2.1+" +LICENSE_${PN}-libmount = "LGPLv2.1+" +LICENSE_${PN}-libsmartcols = "LGPLv2.1+" +LICENSE_${PN}-libuuid = "BSD-3-Clause" + +LIC_FILES_CHKSUM = "file://README.licensing;md5=0fd5c050c6187d2bf0a4492b7f4e33da \ + file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://Documentation/licenses/COPYING.GPL-2.0-or-later;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://Documentation/licenses/COPYING.LGPL-2.1-or-later;md5=4fbd65380cdd255951079008b364516c \ + file://Documentation/licenses/COPYING.BSD-3-Clause;md5=58dcd8452651fc8b07d1f65ce07ca8af \ + file://Documentation/licenses/COPYING.BSD-4-Clause-UC;md5=263860f8968d8bafa5392cab74285262 \ + file://libuuid/COPYING;md5=6d2cafc999feb2c2de84d4d24b23290c \ + file://libmount/COPYING;md5=7c7e39fb7d70ffe5d693a643e29987c2 \ + file://libblkid/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ + file://libfdisk/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ + file://libsmartcols/COPYING;md5=693bcbbe16d3a4a4b37bc906bc01cc04 \ +" + +#gtk-doc is not enabled as it requires xmlto which requires util-linux +inherit autotools gettext manpages pkgconfig systemd update-alternatives python3-dir bash-completion ptest +DEPENDS = "libcap-ng ncurses virtual/crypt zlib" + +MAJOR_VERSION = "${@'.'.join(d.getVar('PV').split('.')[0:2])}" +SRC_URI = "${KERNELORG_MIRROR}/linux/utils/${BPN}/v${MAJOR_VERSION}/${BP}.tar.xz \ + file://configure-sbindir.patch \ + file://runuser.pamd \ + file://runuser-l.pamd \ + file://ptest.patch \ + file://run-ptest \ + file://display_testname_for_subtest.patch \ + file://avoid_parallel_tests.patch \ + " +SRC_URI[sha256sum] = "9e4b1c67eb13b9b67feb32ae1dc0d50e08ce9e5d82e1cccd0ee771ad2fa9e0b1" + +PACKAGES =+ "${PN}-swaponoff" +PACKAGES += "${@bb.utils.contains('PACKAGECONFIG', 'pylibmount', '${PN}-pylibmount', '', d)}" + +python util_linux_binpackages () { + def pkg_hook(f, pkg, file_regex, output_pattern, modulename): + pn = d.getVar('PN') + d.appendVar('RRECOMMENDS_%s' % pn, ' %s' % pkg) + + if d.getVar('ALTERNATIVE_' + pkg): + return + if d.getVarFlag('ALTERNATIVE_LINK_NAME', modulename): + d.setVar('ALTERNATIVE_' + pkg, modulename) + + bindirs = sorted(list(set(d.expand("${base_sbindir} ${base_bindir} ${sbindir} ${bindir}").split()))) + for dir in bindirs: + do_split_packages(d, root=dir, + file_regex=r'(.*)', output_pattern='${PN}-%s', + description='${PN} %s', + hook=pkg_hook, extra_depends='') + + # There are some symlinks for some binaries which we have ignored + # above. Add them to the package owning the binary they are + # pointing to + extras = {} + dvar = d.getVar('PKGD') + for root in bindirs: + for walkroot, dirs, files in os.walk(dvar + root): + for f in files: + file = os.path.join(walkroot, f) + if not os.path.islink(file): + continue + + pkg = os.path.basename(os.readlink(file)) + extras[pkg] = extras.get(pkg, '') + ' ' + file.replace(dvar, '', 1) + + pn = d.getVar('PN') + for pkg, links in extras.items(): + of = d.getVar('FILES_' + pn + '-' + pkg) + links = of + links + d.setVar('FILES_' + pn + '-' + pkg, links) +} + +# we must execute before update-alternatives PACKAGE_PREPROCESS_FUNCS +PACKAGE_PREPROCESS_FUNCS =+ "util_linux_binpackages " + +python util_linux_libpackages() { + do_split_packages(d, root=d.getVar('UTIL_LINUX_LIBDIR'), file_regex=r'^lib(.*)\.so\..*$', + output_pattern='${PN}-lib%s', + description='${PN} lib%s', + extra_depends='', prepend=True, allow_links=True) +} + +PACKAGESPLITFUNCS =+ "util_linux_libpackages" + +PACKAGES_DYNAMIC = "^${PN}-.*" + +CACHED_CONFIGUREVARS += "scanf_cv_alloc_modifier=ms" +UTIL_LINUX_LIBDIR = "${libdir}" +UTIL_LINUX_LIBDIR_class-target = "${base_libdir}" +EXTRA_OECONF = "\ + --enable-libuuid --enable-libblkid \ + \ + --enable-fsck --enable-kill --enable-last --enable-mesg \ + --enable-mount --enable-partx --enable-raw --enable-rfkill \ + --enable-unshare --enable-write \ + \ + --disable-bfs --disable-chfn-chsh --disable-login \ + --disable-makeinstall-chown --disable-minix --disable-newgrp \ + --disable-use-tty-group --disable-vipw \ + \ + --without-udev \ + \ + usrsbin_execdir='${sbindir}' \ + --libdir='${UTIL_LINUX_LIBDIR}' \ +" + +EXTRA_OECONF_append_class-target = " --enable-setpriv" +EXTRA_OECONF_append_class-native = " --without-cap-ng --disable-setpriv" +EXTRA_OECONF_append_class-nativesdk = " --without-cap-ng --disable-setpriv" +EXTRA_OECONF_append = " --disable-hwclock-gplv3" + +# enable pcre2 for native/nativesdk to match host distros +# this helps to keep same expectations when using the SDK or +# build host versions during development +# +PACKAGECONFIG ?= "pcre2" +PACKAGECONFIG_class-target ?= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" +# inherit manpages requires this to be present, however util-linux does not have +# configuration options, and installs manpages always +PACKAGECONFIG[manpages] = "" +PACKAGECONFIG[pam] = "--enable-su --enable-runuser,--disable-su --disable-runuser, libpam," +# Respect the systemd feature for uuidd +PACKAGECONFIG[systemd] = "--with-systemd --with-systemdsystemunitdir=${systemd_system_unitdir}, --without-systemd --without-systemdsystemunitdir,systemd" +# Build python bindings for libmount +PACKAGECONFIG[pylibmount] = "--with-python=3 --enable-pylibmount,--without-python --disable-pylibmount,python3" +# Readline support +PACKAGECONFIG[readline] = "--with-readline,--without-readline,readline" +# PCRE support in hardlink +PACKAGECONFIG[pcre2] = ",,libpcre2" + +EXTRA_OEMAKE = "ARCH=${TARGET_ARCH} CPU= CPUOPT= 'OPT=${CFLAGS}'" + +ALLOW_EMPTY_${PN} = "1" +FILES_${PN} = "" +FILES_${PN}-doc += "${datadir}/getopt/getopt-*.*" +FILES_${PN}-dev += "${PYTHON_SITEPACKAGES_DIR}/libmount/pylibmount.la" +FILES_${PN}-mount = "${sysconfdir}/default/mountall" +FILES_${PN}-runuser = "${sysconfdir}/pam.d/runuser*" +FILES_${PN}-su = "${sysconfdir}/pam.d/su-l" +CONFFILES_${PN}-su = "${sysconfdir}/pam.d/su-l" +FILES_${PN}-pylibmount = "${PYTHON_SITEPACKAGES_DIR}/libmount/pylibmount.so \ + ${PYTHON_SITEPACKAGES_DIR}/libmount/__init__.* \ + ${PYTHON_SITEPACKAGES_DIR}/libmount/__pycache__/*" + +# Util-linux' blkid replaces the e2fsprogs one +RCONFLICTS_${PN}-blkid = "${MLPREFIX}e2fsprogs-blkid" +RREPLACES_${PN}-blkid = "${MLPREFIX}e2fsprogs-blkid" + +RRECOMMENDS_${PN}_class-native = "" +RRECOMMENDS_${PN}_class-nativesdk = "" +RDEPENDS_${PN}_class-native = "" +RDEPENDS_${PN}_class-nativesdk = "" + +RPROVIDES_${PN}-dev = "${PN}-libblkid-dev ${PN}-libmount-dev ${PN}-libuuid-dev" + +RDEPENDS_${PN}-bash-completion += "${PN}-lsblk" +RDEPENDS_${PN}-ptest += "bash bc btrfs-tools coreutils e2fsprogs grep iproute2 kmod mdadm procps sed socat which xz" +RRECOMMENDS_${PN}-ptest += "kernel-module-scsi-debug" +RDEPENDS_${PN}-swaponoff = "${PN}-swapon ${PN}-swapoff" +ALLOW_EMPTY_${PN}-swaponoff = "1" + +#SYSTEMD_PACKAGES = "${PN}-uuidd ${PN}-fstrim" +SYSTEMD_SERVICE_${PN}-uuidd = "uuidd.socket uuidd.service" +SYSTEMD_AUTO_ENABLE_${PN}-uuidd = "disable" +SYSTEMD_SERVICE_${PN}-fstrim = "fstrim.timer fstrim.service" +SYSTEMD_AUTO_ENABLE_${PN}-fstrim = "disable" + +do_install () { + # with ccache the timestamps on compiled files may + # end up earlier than on their inputs, this allows + # for the resultant compilation in the install step. + oe_runmake 'CC=${CC}' 'LD=${LD}' \ + 'LDFLAGS=${LDFLAGS}' 'DESTDIR=${D}' install + + mkdir -p ${D}${base_bindir} + + sbinprogs="agetty ctrlaltdel cfdisk vipw vigr" + sbinprogs_a="pivot_root hwclock mkswap losetup swapon swapoff fdisk fsck blkid blockdev fstrim sulogin switch_root nologin" + binprogs_a="dmesg getopt kill more umount mount login su mountpoint" + + if [ "${base_sbindir}" != "${sbindir}" ]; then + mkdir -p ${D}${base_sbindir} + for p in $sbinprogs $sbinprogs_a; do + if [ -f "${D}${sbindir}/$p" ]; then + mv "${D}${sbindir}/$p" "${D}${base_sbindir}/$p" + fi + done + fi + + if [ "${base_bindir}" != "${bindir}" ]; then + mkdir -p ${D}${base_bindir} + for p in $binprogs_a; do + if [ -f "${D}${bindir}/$p" ]; then + mv "${D}${bindir}/$p" "${D}${base_bindir}/$p" + fi + done + fi + + install -d ${D}${sysconfdir}/default/ + echo 'MOUNTALL="-t nonfs,nosmbfs,noncpfs"' > ${D}${sysconfdir}/default/mountall + + rm -f ${D}${bindir}/chkdupexe +} + +do_install_append_class-target () { + if [ "${@bb.utils.filter('PACKAGECONFIG', 'pam', d)}" ]; then + install -d ${D}${sysconfdir}/pam.d + install -m 0644 ${WORKDIR}/runuser.pamd ${D}${sysconfdir}/pam.d/runuser + install -m 0644 ${WORKDIR}/runuser-l.pamd ${D}${sysconfdir}/pam.d/runuser-l + # Required for "su -" aka "su --login" because + # otherwise it uses "other", which has "auth pam_deny.so" + # and thus prevents the operation. + ln -s su ${D}${sysconfdir}/pam.d/su-l + fi +} +# nologin causes a conflict with shadow-native +# kill causes a conflict with coreutils-native (if ${bindir}==${base_bindir}) +do_install_append_class-native () { + rm -f ${D}${base_sbindir}/nologin + rm -f ${D}${base_bindir}/kill +} + +ALTERNATIVE_PRIORITY = "80" + +ALTERNATIVE_LINK_NAME[blkid] = "${base_sbindir}/blkid" +ALTERNATIVE_LINK_NAME[blockdev] = "${base_sbindir}/blockdev" +ALTERNATIVE_LINK_NAME[cal] = "${bindir}/cal" +ALTERNATIVE_LINK_NAME[chrt] = "${bindir}/chrt" +ALTERNATIVE_LINK_NAME[dmesg] = "${base_bindir}/dmesg" +ALTERNATIVE_LINK_NAME[eject] = "${bindir}/eject" +ALTERNATIVE_LINK_NAME[fallocate] = "${bindir}/fallocate" +ALTERNATIVE_LINK_NAME[fdisk] = "${base_sbindir}/fdisk" +ALTERNATIVE_LINK_NAME[flock] = "${bindir}/flock" +ALTERNATIVE_LINK_NAME[fsck] = "${base_sbindir}/fsck" +ALTERNATIVE_LINK_NAME[fsfreeze] = "${sbindir}/fsfreeze" +ALTERNATIVE_LINK_NAME[fstrim] = "${base_sbindir}/fstrim" +ALTERNATIVE_LINK_NAME[getopt] = "${base_bindir}/getopt" +ALTERNATIVE_${PN}-agetty = "getty" +ALTERNATIVE_LINK_NAME[getty] = "${base_sbindir}/getty" +ALTERNATIVE_TARGET[getty] = "${base_sbindir}/agetty" +ALTERNATIVE_LINK_NAME[hexdump] = "${bindir}/hexdump" +ALTERNATIVE_LINK_NAME[hwclock] = "${base_sbindir}/hwclock" +ALTERNATIVE_LINK_NAME[ionice] = "${bindir}/ionice" +ALTERNATIVE_LINK_NAME[kill] = "${base_bindir}/kill" +ALTERNATIVE_${PN}-last = "last lastb" +ALTERNATIVE_LINK_NAME[last] = "${bindir}/last" +ALTERNATIVE_LINK_NAME[lastb] = "${bindir}/lastb" +ALTERNATIVE_LINK_NAME[logger] = "${bindir}/logger" +ALTERNATIVE_LINK_NAME[losetup] = "${base_sbindir}/losetup" +ALTERNATIVE_LINK_NAME[mesg] = "${bindir}/mesg" +ALTERNATIVE_LINK_NAME[mkswap] = "${base_sbindir}/mkswap" +ALTERNATIVE_LINK_NAME[mcookie] = "${bindir}/mcookie" +ALTERNATIVE_LINK_NAME[more] = "${base_bindir}/more" +ALTERNATIVE_LINK_NAME[mount] = "${base_bindir}/mount" +ALTERNATIVE_LINK_NAME[mountpoint] = "${base_bindir}/mountpoint" +ALTERNATIVE_LINK_NAME[nologin] = "${base_sbindir}/nologin" +ALTERNATIVE_LINK_NAME[nsenter] = "${bindir}/nsenter" +ALTERNATIVE_LINK_NAME[pivot_root] = "${base_sbindir}/pivot_root" +ALTERNATIVE_LINK_NAME[prlimit] = "${bindir}/prlimit" +ALTERNATIVE_LINK_NAME[readprofile] = "${sbindir}/readprofile" +ALTERNATIVE_LINK_NAME[renice] = "${bindir}/renice" +ALTERNATIVE_LINK_NAME[rev] = "${bindir}/rev" +ALTERNATIVE_LINK_NAME[rfkill] = "${sbindir}/rfkill" +ALTERNATIVE_LINK_NAME[rtcwake] = "${sbindir}/rtcwake" +ALTERNATIVE_LINK_NAME[setpriv] = "${bindir}/setpriv" +ALTERNATIVE_LINK_NAME[setsid] = "${bindir}/setsid" +ALTERNATIVE_LINK_NAME[su] = "${base_bindir}/su" +ALTERNATIVE_LINK_NAME[sulogin] = "${base_sbindir}/sulogin" +ALTERNATIVE_LINK_NAME[swapoff] = "${base_sbindir}/swapoff" +ALTERNATIVE_LINK_NAME[swapon] = "${base_sbindir}/swapon" +ALTERNATIVE_LINK_NAME[switch_root] = "${base_sbindir}/switch_root" +ALTERNATIVE_LINK_NAME[taskset] = "${bindir}/taskset" +ALTERNATIVE_LINK_NAME[umount] = "${base_bindir}/umount" +ALTERNATIVE_LINK_NAME[unshare] = "${bindir}/unshare" +ALTERNATIVE_LINK_NAME[utmpdump] = "${bindir}/utmpdump" +ALTERNATIVE_LINK_NAME[uuidgen] = "${bindir}/uuidgen" +ALTERNATIVE_LINK_NAME[wall] = "${bindir}/wall" + +ALTERNATIVE_${PN}-doc = "\ +blkid.8 eject.1 findfs.8 fsck.8 kill.1 last.1 lastb.1 libblkid.3 logger.1 mesg.1 \ +mountpoint.1 nologin.8 rfkill.8 sulogin.8 utmpdump.1 uuid.3 wall.1\ +" +ALTERNATIVE_${PN}-doc += "${@bb.utils.contains('PACKAGECONFIG', 'pam', 'su.1', '', d)}" + +ALTERNATIVE_LINK_NAME[blkid.8] = "${mandir}/man8/blkid.8" +ALTERNATIVE_LINK_NAME[eject.1] = "${mandir}/man1/eject.1" +ALTERNATIVE_LINK_NAME[findfs.8] = "${mandir}/man8/findfs.8" +ALTERNATIVE_LINK_NAME[fsck.8] = "${mandir}/man8/fsck.8" +ALTERNATIVE_LINK_NAME[kill.1] = "${mandir}/man1/kill.1" +ALTERNATIVE_LINK_NAME[last.1] = "${mandir}/man1/last.1" +ALTERNATIVE_LINK_NAME[lastb.1] = "${mandir}/man1/lastb.1" +ALTERNATIVE_LINK_NAME[libblkid.3] = "${mandir}/man3/libblkid.3" +ALTERNATIVE_LINK_NAME[logger.1] = "${mandir}/man1/logger.1" +ALTERNATIVE_LINK_NAME[mesg.1] = "${mandir}/man1/mesg.1" +ALTERNATIVE_LINK_NAME[mountpoint.1] = "${mandir}/man1/mountpoint.1" +ALTERNATIVE_LINK_NAME[nologin.8] = "${mandir}/man8/nologin.8" +ALTERNATIVE_LINK_NAME[rfkill.8] = "${mandir}/man8/rfkill.8" +ALTERNATIVE_LINK_NAME[setpriv.1] = "${mandir}/man1/setpriv.1" +ALTERNATIVE_LINK_NAME[su.1] = "${mandir}/man1/su.1" +ALTERNATIVE_LINK_NAME[sulogin.8] = "${mandir}/man8/sulogin.8" +ALTERNATIVE_LINK_NAME[utmpdump.1] = "${mandir}/man1/utmpdump.1" +ALTERNATIVE_LINK_NAME[uuid.3] = "${mandir}/man3/uuid.3" +ALTERNATIVE_LINK_NAME[wall.1] = "${mandir}/man1/wall.1" + + +BBCLASSEXTEND = "native nativesdk" + +PTEST_BINDIR = "1" +do_compile_ptest() { + oe_runmake buildtest-TESTS +} + +do_install_ptest() { + mkdir -p ${D}${PTEST_PATH}/tests/ts + find . -name 'test*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; + find ./.libs -name 'sample*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; + find ./.libs -name 'test*' -maxdepth 1 -type f -perm -111 -exec cp {} ${D}${PTEST_PATH} \; + + cp ${S}/tests/*.sh ${D}${PTEST_PATH}/tests/ + cp -pR ${S}/tests/expected ${D}${PTEST_PATH}/tests/expected + cp -pR ${S}/tests/ts ${D}${PTEST_PATH}/tests/ + cp ${WORKDIR}/build/config.h ${D}${PTEST_PATH} + + # The original paths of executables to be tested point to a local folder containing + # the executables. We want to test the installed executables, not the local copies. + # So strip the paths, the executables will be located via "which" + sed -i \ + -e '/^TS_CMD/ s|$top_builddir/||g' \ + -e '/^TS_HELPER/ s|$top_builddir|${PTEST_PATH}|g' \ + ${D}${PTEST_PATH}/tests/commands.sh + + # Change 'if [ ! -x "$1" ]' to 'if [ ! -x "`which $1 2>/dev/null`"]' + sed -i -e \ + '/^\tif[[:space:]]\[[[:space:]]![[:space:]]-x[[:space:]]"$1"/s|$1|`which $1 2>/dev/null`|g' \ + ${D}${PTEST_PATH}/tests/functions.sh + + # Running "kill" without the the complete path would use the shell's built-in kill + sed -i -e \ + '/^TS_CMD_KILL/ s|kill|${PTEST_PATH}/bin/kill|g' \ + ${D}${PTEST_PATH}/tests/commands.sh + + + sed -i 's|@base_sbindir@|${base_sbindir}|g' ${D}${PTEST_PATH}/run-ptest + +} diff --git a/poky/meta/recipes-devtools/autoconf/autoconf.inc b/poky/meta/recipes-devtools/autoconf/autoconf.inc index 2c87bf829..787f30a09 100644 --- a/poky/meta/recipes-devtools/autoconf/autoconf.inc +++ b/poky/meta/recipes-devtools/autoconf/autoconf.inc @@ -5,9 +5,8 @@ file that lists the operating system features that the package can use, in the f LICENSE = "GPLv3" HOMEPAGE = "http://www.gnu.org/software/autoconf/" SECTION = "devel" -DEPENDS += "m4-native" -DEPENDS_class-native = "m4-native gnu-config-native" -DEPENDS_class-nativesdk = "nativesdk-m4 nativesdk-gnu-config" +DEPENDS = "m4-native gnu-config-native" + RDEPENDS_${PN} = "m4 gnu-config \ perl \ perl-module-bytes \ @@ -62,11 +61,6 @@ RDEPENDS_${PN}_class-nativesdk = "\ nativesdk-perl-module-threads \ " - - -SRC_URI = "${GNU_MIRROR}/autoconf/autoconf-${PV}.tar.gz \ - file://program_prefix.patch" - inherit autotools texinfo PERL = "${USRBINPATH}/perl" diff --git a/poky/meta/recipes-devtools/autoconf/autoconf_2.69.bb b/poky/meta/recipes-devtools/autoconf/autoconf_2.69.bb index 8e67f4b82..2e1b9bdc9 100644 --- a/poky/meta/recipes-devtools/autoconf/autoconf_2.69.bb +++ b/poky/meta/recipes-devtools/autoconf/autoconf_2.69.bb @@ -5,17 +5,20 @@ PR = "r11" LICENSE = "GPLv2 & GPLv3" LIC_FILES_CHKSUM = "file://COPYING;md5=751419260aa954499f7abaabaa882bbe \ file://COPYINGv3;md5=d32239bcb673463ab874e80d47fae504" -SRC_URI += "file://check-automake-cross-warning.patch \ - file://autoreconf-exclude.patch \ - file://autoreconf-gnuconfigize.patch \ - file://config_site.patch \ - file://remove-usr-local-lib-from-m4.patch \ - file://preferbash.patch \ - file://autotest-automake-result-format.patch \ - file://add_musl_config.patch \ - file://performance.patch \ - file://AC_HEADER_MAJOR-port-to-glibc-2.25.patch \ - file://autoconf-replace-w-option-in-shebangs-with-modern-use-warnings.patch \ + +SRC_URI = "${GNU_MIRROR}/autoconf/autoconf-${PV}.tar.gz \ + file://program_prefix.patch \ + file://check-automake-cross-warning.patch \ + file://autoreconf-exclude.patch \ + file://autoreconf-gnuconfigize.patch \ + file://config_site.patch \ + file://remove-usr-local-lib-from-m4.patch \ + file://preferbash.patch \ + file://autotest-automake-result-format.patch \ + file://add_musl_config.patch \ + file://performance.patch \ + file://AC_HEADER_MAJOR-port-to-glibc-2.25.patch \ + file://autoconf-replace-w-option-in-shebangs-with-modern-use-warnings.patch \ " SRC_URI[md5sum] = "82d05e03b93e45f5a39b828dc9c6c29b" diff --git a/poky/meta/recipes-devtools/bison/bison/0001-bison-fix-the-parallel-build.patch b/poky/meta/recipes-devtools/bison/bison/0001-bison-fix-the-parallel-build.patch deleted file mode 100644 index c3be91a10..000000000 --- a/poky/meta/recipes-devtools/bison/bison/0001-bison-fix-the-parallel-build.patch +++ /dev/null @@ -1,63 +0,0 @@ -From e0dbcee6e25b3c0cb11a627bbfe3af45ef67ec30 Mon Sep 17 00:00:00 2001 -From: Mingli Yu -Date: Thu, 14 May 2020 15:23:16 +0800 -Subject: [PATCH] bison: fix the parallel build - -Explicitly make the BUILT_SOURCES which -are the generated headers such as stdio.h, -fcntl.h and etc to be the dependencies of -the gl_LIBOBJS such as libbison_a-sprintf.o, -libbison_a-printf.o and etc to guarantee the -BUILT_SOURCES is generated before begin to -compile EXTRA_lib_libbison_a_SOURCES such as -fprintf.c in parallel builid, otherwise there -may come below error: - | muscle-tab.c:(.text+0x77a): undefined reference to `rpl_sprintf' - -It does the same for src_bison_OBJECTS and -lib_libbison_a_OBJECTS to make sure BUILT_SOURCES -generated before begin to compile src_bison_SOURCES -which contains AnnotationList.c and etc. - -BTW, the MOSTLYCLEANFILES also contains the -generated header needs to be created early -in the build process, so add it also in to -avoid below error: - | ./lib/uniwidth/width.c:21:10: fatal error: uniwidth.h: No such file or directory - -Upstream-Status: Submitted [bison-patches@gnu.org maillist] - -Signed-off-by: Mingli Yu ---- - lib/gnulib.mk | 2 ++ - src/local.mk | 1 + - 2 files changed, 3 insertions(+) - -diff --git a/lib/gnulib.mk b/lib/gnulib.mk -index c21c656..27fb3dc 100644 ---- a/lib/gnulib.mk -+++ b/lib/gnulib.mk -@@ -120,6 +120,8 @@ lib_libbison_a_SOURCES = - lib_libbison_a_LIBADD = $(gl_LIBOBJS) - lib_libbison_a_DEPENDENCIES = $(gl_LIBOBJS) - EXTRA_lib_libbison_a_SOURCES = -+$(lib_libbison_a_OBJECTS): $(BUILT_SOURCES) $(MOSTLYCLEANFILES:%.h) -+$(gl_LIBOBJS): $(BUILT_SOURCES) $(MOSTLYCLEANFILES:%.h) - - lib_libbison_a_CPPFLAGS = $(AM_CPPFLAGS) -DDEFAULT_TEXT_DOMAIN=\"bison-gnulib\" - -diff --git a/src/local.mk b/src/local.mk -index 61dc573..b5b9079 100644 ---- a/src/local.mk -+++ b/src/local.mk -@@ -24,6 +24,7 @@ if RELOCATABLE_VIA_LD - src_bison_LDFLAGS = `$(RELOCATABLE_LDFLAGS) $(bindir)` - endif - -+$(src_bison_OBJECTS): $(BUILT_SOURCES) $(MOSTLYCLEANFILES:%.h) - src_bison_CFLAGS = $(AM_CFLAGS) $(WERROR_CFLAGS) - src_bison_SOURCES = \ - src/AnnotationList.c \ --- -2.17.1 - diff --git a/poky/meta/recipes-devtools/bison/bison_3.6.4.bb b/poky/meta/recipes-devtools/bison/bison_3.6.4.bb deleted file mode 100644 index f3d4d4244..000000000 --- a/poky/meta/recipes-devtools/bison/bison_3.6.4.bb +++ /dev/null @@ -1,44 +0,0 @@ -SUMMARY = "GNU Project parser generator (yacc replacement)" -DESCRIPTION = "Bison is a general-purpose parser generator that converts an annotated context-free grammar into \ -an LALR(1) or GLR parser for that grammar. Bison is upward compatible with Yacc: all properly-written Yacc \ -grammars ought to work with Bison with no change. Anyone familiar with Yacc should be able to use Bison with \ -little trouble." -HOMEPAGE = "http://www.gnu.org/software/bison/" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" -SECTION = "devel" -DEPENDS = "bison-native flex-native" - -SRC_URI = "${GNU_MIRROR}/bison/bison-${PV}.tar.xz \ - file://add-with-bisonlocaledir.patch \ - file://0001-bison-fix-the-parallel-build.patch \ - " -SRC_URI[sha256sum] = "8b13473b31ca7fcf65e5e8a74224368ffd5df19275602a9c9567ba393f18577d" - -# No point in hardcoding path to m4, just use PATH -EXTRA_OECONF += "M4=m4" - -# Reset any loadavg set via environment, it breaks parallel build -# | ../bison-3.5.2/lib/uniwidth/width.c:21:10: fatal error: uniwidth.h: No such file or directory -# | #include "uniwidth.h" -# | ^~~~~~~~~~~~ -EXTRA_OEMAKE_append = " -l" - -inherit autotools gettext texinfo - -# The automatic m4 path detection gets confused, so force the right value -acpaths = "-I ./m4" - -do_compile_prepend() { - for i in mfcalc calc++ rpcalc; do mkdir -p ${B}/examples/$i; done -} - -do_install_append_class-native() { - create_wrapper ${D}/${bindir}/bison \ - BISON_PKGDATADIR=${STAGING_DATADIR_NATIVE}/bison -} -do_install_append_class-nativesdk() { - create_wrapper ${D}/${bindir}/bison \ - BISON_PKGDATADIR=${datadir}/bison -} -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/bison/bison_3.7.2.bb b/poky/meta/recipes-devtools/bison/bison_3.7.2.bb new file mode 100644 index 000000000..ace4ea5c3 --- /dev/null +++ b/poky/meta/recipes-devtools/bison/bison_3.7.2.bb @@ -0,0 +1,37 @@ +SUMMARY = "GNU Project parser generator (yacc replacement)" +DESCRIPTION = "Bison is a general-purpose parser generator that converts an annotated context-free grammar into \ +an LALR(1) or GLR parser for that grammar. Bison is upward compatible with Yacc: all properly-written Yacc \ +grammars ought to work with Bison with no change. Anyone familiar with Yacc should be able to use Bison with \ +little trouble." +HOMEPAGE = "http://www.gnu.org/software/bison/" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" +SECTION = "devel" +DEPENDS = "bison-native flex-native" + +SRC_URI = "${GNU_MIRROR}/bison/bison-${PV}.tar.xz \ + file://add-with-bisonlocaledir.patch \ + " +SRC_URI[sha256sum] = "7948d193104d979c0fb0294a1854c73c89d72ae41acfc081826142578a78a91b" + +# No point in hardcoding path to m4, just use PATH +EXTRA_OECONF += "M4=m4" + +inherit autotools gettext texinfo + +# The automatic m4 path detection gets confused, so force the right value +acpaths = "-I ./m4" + +do_compile_prepend() { + for i in mfcalc calc++ rpcalc; do mkdir -p ${B}/examples/$i; done +} + +do_install_append_class-native() { + create_wrapper ${D}/${bindir}/bison \ + BISON_PKGDATADIR=${STAGING_DATADIR_NATIVE}/bison +} +do_install_append_class-nativesdk() { + create_wrapper ${D}/${bindir}/bison \ + BISON_PKGDATADIR=${datadir}/bison +} +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb b/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb deleted file mode 100644 index a938b2da4..000000000 --- a/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.8.bb +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright (c) 2013 LG Electronics, Inc. -# Copyright (C) 2014 Intel Corp. - -# This recipe creates packages for the bootchart2 system-wide profiler daemon -# and related utilities. Depending on the images you're building, additional -# configuration may be needed in order to use it. -# -# Packages: -# * bootchart2 - The daemon itself. -# * pybootchartgui - Python program to visualize and display the data -# collected by bootchart2 or compatible daemons such as the original -# bootchart. -# * bootchartd-stop-initscript - A SysV init script to stop data collection -# when booting completes (see below for details.) -# -# While bootchart2 is designed to stop collecting data roughly when the boot -# process completes, it is not exactly a stopwatch. It has a list of programs -# which are supposed signify that the boot process has completed (for example, -# openbox or gnome-shell,) but it waits a full 20 seconds after such a program -# is launched before stopping itself, to collect additional data. -# -# If you are using a window manager or GUI which isn't included in bootchart2's -# default configuration file, you should write bbappend file to amend -# bootchartd.conf and add it to EXIT_PROC. An example of this is shown in this -# recipe, where the Matchbox window manager (used by Sato) is added. -# -# If you want data collection to end at a certain point exactly, you should -# arrange for the following command to be run: -# bootchartd stop -# You might set this command to be launched by the desktop environment shipped -# on the image you're building after the other startup programs are complete. -# This will not incur the 20 second wait period and will cause bootchart2 to -# behave a bit more like a stopwatch. An example of this is shown in this -# recipe, specifically the bootchartd-stop-initscript package, which stops data -# collection as the last action when switching to runlevels 2 through 5. You can -# add bootchartd-stop-initscript to IMAGE_INSTALL if you need to use it. -# -# Unless you're doing something special, if your image does not launch an X -# window manager, you will need to add bootchartd-stop-initscript to your image. -# -# Bootchart2 can be started in two ways. Data collection can be initiated by -# running the following command: -# bootchartd start -# However, for the most complete data, the bootchart2 developers recommend -# running it as PID 1. This can be done by adding the following to the kernel -# command line parameters in the bootloader setup: -# init=/sbin/bootchartd -# When invoked this way, bootchart2 will set itself up and then automatically -# run /sbin/init. For example, when booting the default qemux86 image, one might -# use a command like this: -# runqemu qemux86 bootparams="initcall_debug printk.time=y quiet \ -# init=/sbin/bootchartd" -# -# Neither method is actually implemented here, choose what works for you. -# -# If you are building your image with systemd instead of SysV init, bootchart2 -# includes systemd service files to begin collection automatically at boot and -# end collection automatically 20 seconds after the boot process has completed. -# However, be aware that systemd tends to start bootchart2 relatively late into -# the boot process, so it's highly recommended to use bootchart2 as PID 1. If -# you're using systemd and you wish to use another method to stop data -# collection at a time of your choosing, you may do so as long as you get to it -# before the 20 second timeout of the systemd service files. Also, you may write -# a bbappend to patch bootchart2-done.timer.in to increase or decrease the -# timeout. Decreasing it to 0 will make it behave like -# bootchartd-stop-initscript. -# -# By default, when data collection is stopped, a file named bootchart.tgz will -# be created in /var/log. If pybootchartgui is included in your image, -# bootchart.png will also be created at the same time. However, this results in -# a noticeable hitch or pause at boot time, which may not be what you want on an -# embedded device. So you may prefer to omit pybootchartgui from your image. In -# that case, copy bootchart.tgz over to your development system and generate -# bootchart.png there. To get pybootchartgui on your development system, you can -# either install it directly from some other source, or build bootchart2-native -# and find pybootchartgui in the native sysroot: -# bitbake bootchart2-native -# ./tmp/sysroots/x86_64-linux/usr/bin/pybootchartgui /path/to/bootchart.tgz -# Note that, whether installed on your build system or on your image, the -# pybootchartgui provided by this recipe does not support the -i option. You -# will need to install pybootchartgui by other means in order to run it in -# interactive mode. - -SUMMARY = "Booting sequence and CPU,I/O usage monitor" -DESCRIPTION = "Monitors where the system spends its time at start, creating a graph of all processes, disk utilization, and wait time." -AUTHOR = "Wonhong Kwon " -HOMEPAGE = "https://github.com/mmeeks/bootchart" -LICENSE = "GPL-3.0" -LIC_FILES_CHKSUM = "file://COPYING;md5=44ac4678311254db62edf8fd39cb8124" - -UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+\.\d+(\.\d+)*)" - -SRC_URI = "git://github.com/xrmx/bootchart.git \ - file://bootchartd_stop.sh \ - file://0001-collector-Allocate-space-on-heap-for-chunks.patch \ - file://0001-bootchart2-support-usrmerge.patch \ - " - -S = "${WORKDIR}/git" -SRCREV = "331ada031f1d65f6d934d918f896e1c708c64bf7" -PV .= "+git${SRCPV}" - -inherit systemd update-rc.d python3native update-alternatives - -ALTERNATIVE_${PN} = "bootchartd" -ALTERNATIVE_LINK_NAME[bootchartd] = "${base_sbindir}/bootchartd" -ALTERNATIVE_PRIORITY = "100" - -# The only reason to build bootchart2-native is for a native pybootchartgui. -BBCLASSEXTEND = "native" - -SYSTEMD_SERVICE_${PN} = "bootchart2.service bootchart2-done.service bootchart2-done.timer" - -UPDATERCPN = "bootchartd-stop-initscript" -INITSCRIPT_NAME = "bootchartd_stop.sh" -INITSCRIPT_PARAMS = "start 99 2 3 4 5 ." - -EXTRA_OEMAKE = 'BASE_SBINDIR="${base_sbindir}"' - -do_compile_prepend () { - export PY_LIBDIR="${libdir}/${PYTHON_DIR}" - export BINDIR="${bindir}" - export LIBDIR="${base_libdir}" -} - -do_install () { - install -d ${D}${sysconfdir} # needed for -native - export PY_LIBDIR="${libdir}/${PYTHON_DIR}" - export BINDIR="${bindir}" - export DESTDIR="${D}" - export LIBDIR="${base_libdir}" - export PKGLIBDIR="${base_libdir}/bootchart" - export SYSTEMD_UNIT_DIR="${systemd_unitdir}/system" - - oe_runmake install - install -d ${D}${sysconfdir}/init.d - install -m 0755 ${WORKDIR}/bootchartd_stop.sh ${D}${sysconfdir}/init.d - - echo 'EXIT_PROC="$EXIT_PROC matchbox-window-manager"' >> ${D}${sysconfdir}/bootchartd.conf - - # Use python 3 instead of python 2 - sed -i -e '1s,#!.*python.*,#!${USRBINPATH}/env python3,' ${D}${bindir}/pybootchartgui -} - -PACKAGES =+ "pybootchartgui" -FILES_pybootchartgui += "${PYTHON_SITEPACKAGES_DIR}/pybootchartgui ${bindir}/pybootchartgui" -RDEPENDS_pybootchartgui = "python3-pycairo python3-compression python3-image python3-shell python3-compression python3-codecs" -RDEPENDS_${PN}_class-target += "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'sysvinit-pidof', 'procps', d)}" -RDEPENDS_${PN}_class-target += "lsb-release" -DEPENDS_append_class-native = " python3-pycairo-native" - -PACKAGES =+ "bootchartd-stop-initscript" -FILES_bootchartd-stop-initscript += "${sysconfdir}/init.d ${sysconfdir}/rc*.d" -RDEPENDS_bootchartd-stop-initscript = "${PN}" - -FILES_${PN} += "${base_libdir}/bootchart/bootchart-collector" -FILES_${PN} += "${base_libdir}/bootchart/tmpfs" -FILES_${PN} += "${libdir}" -FILES_${PN}-doc += "${datadir}/docs" - -RCONFLICTS_${PN} = "bootchart" diff --git a/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb b/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb new file mode 100644 index 000000000..6571c1993 --- /dev/null +++ b/poky/meta/recipes-devtools/bootchart2/bootchart2_0.14.9.bb @@ -0,0 +1,160 @@ +# Copyright (c) 2013 LG Electronics, Inc. +# Copyright (C) 2014 Intel Corp. + +# This recipe creates packages for the bootchart2 system-wide profiler daemon +# and related utilities. Depending on the images you're building, additional +# configuration may be needed in order to use it. +# +# Packages: +# * bootchart2 - The daemon itself. +# * pybootchartgui - Python program to visualize and display the data +# collected by bootchart2 or compatible daemons such as the original +# bootchart. +# * bootchartd-stop-initscript - A SysV init script to stop data collection +# when booting completes (see below for details.) +# +# While bootchart2 is designed to stop collecting data roughly when the boot +# process completes, it is not exactly a stopwatch. It has a list of programs +# which are supposed signify that the boot process has completed (for example, +# openbox or gnome-shell,) but it waits a full 20 seconds after such a program +# is launched before stopping itself, to collect additional data. +# +# If you are using a window manager or GUI which isn't included in bootchart2's +# default configuration file, you should write bbappend file to amend +# bootchartd.conf and add it to EXIT_PROC. An example of this is shown in this +# recipe, where the Matchbox window manager (used by Sato) is added. +# +# If you want data collection to end at a certain point exactly, you should +# arrange for the following command to be run: +# bootchartd stop +# You might set this command to be launched by the desktop environment shipped +# on the image you're building after the other startup programs are complete. +# This will not incur the 20 second wait period and will cause bootchart2 to +# behave a bit more like a stopwatch. An example of this is shown in this +# recipe, specifically the bootchartd-stop-initscript package, which stops data +# collection as the last action when switching to runlevels 2 through 5. You can +# add bootchartd-stop-initscript to IMAGE_INSTALL if you need to use it. +# +# Unless you're doing something special, if your image does not launch an X +# window manager, you will need to add bootchartd-stop-initscript to your image. +# +# Bootchart2 can be started in two ways. Data collection can be initiated by +# running the following command: +# bootchartd start +# However, for the most complete data, the bootchart2 developers recommend +# running it as PID 1. This can be done by adding the following to the kernel +# command line parameters in the bootloader setup: +# init=/sbin/bootchartd +# When invoked this way, bootchart2 will set itself up and then automatically +# run /sbin/init. For example, when booting the default qemux86 image, one might +# use a command like this: +# runqemu qemux86 bootparams="initcall_debug printk.time=y quiet \ +# init=/sbin/bootchartd" +# +# Neither method is actually implemented here, choose what works for you. +# +# If you are building your image with systemd instead of SysV init, bootchart2 +# includes systemd service files to begin collection automatically at boot and +# end collection automatically 20 seconds after the boot process has completed. +# However, be aware that systemd tends to start bootchart2 relatively late into +# the boot process, so it's highly recommended to use bootchart2 as PID 1. If +# you're using systemd and you wish to use another method to stop data +# collection at a time of your choosing, you may do so as long as you get to it +# before the 20 second timeout of the systemd service files. Also, you may write +# a bbappend to patch bootchart2-done.timer.in to increase or decrease the +# timeout. Decreasing it to 0 will make it behave like +# bootchartd-stop-initscript. +# +# By default, when data collection is stopped, a file named bootchart.tgz will +# be created in /var/log. If pybootchartgui is included in your image, +# bootchart.png will also be created at the same time. However, this results in +# a noticeable hitch or pause at boot time, which may not be what you want on an +# embedded device. So you may prefer to omit pybootchartgui from your image. In +# that case, copy bootchart.tgz over to your development system and generate +# bootchart.png there. To get pybootchartgui on your development system, you can +# either install it directly from some other source, or build bootchart2-native +# and find pybootchartgui in the native sysroot: +# bitbake bootchart2-native +# ./tmp/sysroots/x86_64-linux/usr/bin/pybootchartgui /path/to/bootchart.tgz +# Note that, whether installed on your build system or on your image, the +# pybootchartgui provided by this recipe does not support the -i option. You +# will need to install pybootchartgui by other means in order to run it in +# interactive mode. + +SUMMARY = "Booting sequence and CPU,I/O usage monitor" +DESCRIPTION = "Monitors where the system spends its time at start, creating a graph of all processes, disk utilization, and wait time." +AUTHOR = "Wonhong Kwon " +HOMEPAGE = "https://github.com/mmeeks/bootchart" +LICENSE = "GPL-3.0" +LIC_FILES_CHKSUM = "file://COPYING;md5=44ac4678311254db62edf8fd39cb8124" + +UPSTREAM_CHECK_GITTAGREGEX = "(?P\d+\.\d+(\.\d+)*)" + +SRC_URI = "git://github.com/xrmx/bootchart.git \ + file://bootchartd_stop.sh \ + file://0001-collector-Allocate-space-on-heap-for-chunks.patch \ + file://0001-bootchart2-support-usrmerge.patch \ + " + +S = "${WORKDIR}/git" +SRCREV = "868a2afab9da34f32c007d773b77253c93104636" + +inherit systemd update-rc.d python3native update-alternatives + +ALTERNATIVE_${PN} = "bootchartd" +ALTERNATIVE_LINK_NAME[bootchartd] = "${base_sbindir}/bootchartd" +ALTERNATIVE_PRIORITY = "100" + +# The only reason to build bootchart2-native is for a native pybootchartgui. +BBCLASSEXTEND = "native" + +SYSTEMD_SERVICE_${PN} = "bootchart2.service bootchart2-done.service bootchart2-done.timer" + +UPDATERCPN = "bootchartd-stop-initscript" +INITSCRIPT_NAME = "bootchartd_stop.sh" +INITSCRIPT_PARAMS = "start 99 2 3 4 5 ." + +EXTRA_OEMAKE = 'BASE_SBINDIR="${base_sbindir}"' + +do_compile_prepend () { + export PY_LIBDIR="${libdir}/${PYTHON_DIR}" + export BINDIR="${bindir}" + export LIBDIR="${base_libdir}" +} + +do_install () { + install -d ${D}${sysconfdir} # needed for -native + export PY_LIBDIR="${libdir}/${PYTHON_DIR}" + export BINDIR="${bindir}" + export DESTDIR="${D}" + export LIBDIR="${base_libdir}" + export PKGLIBDIR="${base_libdir}/bootchart" + export SYSTEMD_UNIT_DIR="${systemd_unitdir}/system" + + oe_runmake install + install -d ${D}${sysconfdir}/init.d + install -m 0755 ${WORKDIR}/bootchartd_stop.sh ${D}${sysconfdir}/init.d + + echo 'EXIT_PROC="$EXIT_PROC matchbox-window-manager"' >> ${D}${sysconfdir}/bootchartd.conf + + # Use python 3 instead of python 2 + sed -i -e '1s,#!.*python.*,#!${USRBINPATH}/env python3,' ${D}${bindir}/pybootchartgui +} + +PACKAGES =+ "pybootchartgui" +FILES_pybootchartgui += "${PYTHON_SITEPACKAGES_DIR}/pybootchartgui ${bindir}/pybootchartgui" +RDEPENDS_pybootchartgui = "python3-pycairo python3-compression python3-image python3-shell python3-compression python3-codecs" +RDEPENDS_${PN}_class-target += "${@bb.utils.contains('DISTRO_FEATURES', 'sysvinit', 'sysvinit-pidof', 'procps', d)}" +RDEPENDS_${PN}_class-target += "lsb-release" +DEPENDS_append_class-native = " python3-pycairo-native" + +PACKAGES =+ "bootchartd-stop-initscript" +FILES_bootchartd-stop-initscript += "${sysconfdir}/init.d ${sysconfdir}/rc*.d" +RDEPENDS_bootchartd-stop-initscript = "${PN}" + +FILES_${PN} += "${base_libdir}/bootchart/bootchart-collector" +FILES_${PN} += "${base_libdir}/bootchart/tmpfs" +FILES_${PN} += "${libdir}" +FILES_${PN}-doc += "${datadir}/docs" + +RCONFLICTS_${PN} = "bootchart" diff --git a/poky/meta/recipes-devtools/cmake/cmake-native_3.17.3.bb b/poky/meta/recipes-devtools/cmake/cmake-native_3.17.3.bb deleted file mode 100644 index d91e42ef9..000000000 --- a/poky/meta/recipes-devtools/cmake/cmake-native_3.17.3.bb +++ /dev/null @@ -1,53 +0,0 @@ -require cmake.inc -inherit native - -DEPENDS += "bzip2-replacement-native xz-native zlib-native curl-native ncurses-native" - -SRC_URI += "file://OEToolchainConfig.cmake \ - file://environment.d-cmake.sh \ - file://0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch \ - file://0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch \ - " - - -B = "${WORKDIR}/build" -do_configure[cleandirs] = "${B}" - -CMAKE_EXTRACONF = "\ - -DCMAKE_LIBRARY_PATH=${STAGING_LIBDIR_NATIVE} \ - -DBUILD_CursesDialog=1 \ - -DCMAKE_USE_SYSTEM_LIBRARIES=1 \ - -DCMAKE_USE_SYSTEM_LIBRARY_JSONCPP=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_LIBARCHIVE=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_LIBUV=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_LIBRHASH=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_EXPAT=0 \ - -DENABLE_ACL=0 -DHAVE_ACL_LIBACL_H=0 \ - -DHAVE_SYS_ACL_H=0 \ -" - -do_configure () { - ${S}/configure --verbose --prefix=${prefix} \ - ${@oe.utils.parallel_make_argument(d, '--parallel=%d')} \ - ${@bb.utils.contains('CCACHE', 'ccache ', '--enable-ccache', '', d)} \ - -- ${CMAKE_EXTRACONF} -} - -do_compile() { - oe_runmake -} - -do_install() { - oe_runmake 'DESTDIR=${D}' install - - # The following codes are here because eSDK needs to provide compatibilty - # for SDK. That is, eSDK could also be used like traditional SDK. - mkdir -p ${D}${datadir}/cmake - install -m 644 ${WORKDIR}/OEToolchainConfig.cmake ${D}${datadir}/cmake/ - mkdir -p ${D}${base_prefix}/environment-setup.d - install -m 644 ${WORKDIR}/environment.d-cmake.sh ${D}${base_prefix}/environment-setup.d/cmake.sh -} - -do_compile[progress] = "percent" - -SYSROOT_DIRS_NATIVE += "${datadir}/cmake ${base_prefix}/environment-setup.d" diff --git a/poky/meta/recipes-devtools/cmake/cmake-native_3.18.2.bb b/poky/meta/recipes-devtools/cmake/cmake-native_3.18.2.bb new file mode 100644 index 000000000..d91e42ef9 --- /dev/null +++ b/poky/meta/recipes-devtools/cmake/cmake-native_3.18.2.bb @@ -0,0 +1,53 @@ +require cmake.inc +inherit native + +DEPENDS += "bzip2-replacement-native xz-native zlib-native curl-native ncurses-native" + +SRC_URI += "file://OEToolchainConfig.cmake \ + file://environment.d-cmake.sh \ + file://0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch \ + file://0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch \ + " + + +B = "${WORKDIR}/build" +do_configure[cleandirs] = "${B}" + +CMAKE_EXTRACONF = "\ + -DCMAKE_LIBRARY_PATH=${STAGING_LIBDIR_NATIVE} \ + -DBUILD_CursesDialog=1 \ + -DCMAKE_USE_SYSTEM_LIBRARIES=1 \ + -DCMAKE_USE_SYSTEM_LIBRARY_JSONCPP=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_LIBARCHIVE=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_LIBUV=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_LIBRHASH=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_EXPAT=0 \ + -DENABLE_ACL=0 -DHAVE_ACL_LIBACL_H=0 \ + -DHAVE_SYS_ACL_H=0 \ +" + +do_configure () { + ${S}/configure --verbose --prefix=${prefix} \ + ${@oe.utils.parallel_make_argument(d, '--parallel=%d')} \ + ${@bb.utils.contains('CCACHE', 'ccache ', '--enable-ccache', '', d)} \ + -- ${CMAKE_EXTRACONF} +} + +do_compile() { + oe_runmake +} + +do_install() { + oe_runmake 'DESTDIR=${D}' install + + # The following codes are here because eSDK needs to provide compatibilty + # for SDK. That is, eSDK could also be used like traditional SDK. + mkdir -p ${D}${datadir}/cmake + install -m 644 ${WORKDIR}/OEToolchainConfig.cmake ${D}${datadir}/cmake/ + mkdir -p ${D}${base_prefix}/environment-setup.d + install -m 644 ${WORKDIR}/environment.d-cmake.sh ${D}${base_prefix}/environment-setup.d/cmake.sh +} + +do_compile[progress] = "percent" + +SYSROOT_DIRS_NATIVE += "${datadir}/cmake ${base_prefix}/environment-setup.d" diff --git a/poky/meta/recipes-devtools/cmake/cmake.inc b/poky/meta/recipes-devtools/cmake/cmake.inc index 7f0307c15..fa1b818ae 100644 --- a/poky/meta/recipes-devtools/cmake/cmake.inc +++ b/poky/meta/recipes-devtools/cmake/cmake.inc @@ -22,6 +22,10 @@ SRC_URI = "https://cmake.org/files/v${CMAKE_MAJOR_VERSION}/cmake-${PV}.tar.gz \ file://0004-Fail-silently-if-system-Qt-installation-is-broken.patch \ " -SRC_URI[sha256sum] = "0bd60d512275dc9f6ef2a2865426a184642ceb3761794e6b65bff233b91d8c40" +SRC_URI[sha256sum] = "5d4e40fc775d3d828c72e5c45906b4d9b59003c9433ff1b36a1cb552bbd51d7e" UPSTREAM_CHECK_REGEX = "cmake-(?P\d+(\.\d+)+)\.tar" + +# This is specific to the npm package that installs cmake, so isn't +# relevant to OpenEmbedded +CVE_CHECK_WHITELIST += "CVE-2016-10642" diff --git a/poky/meta/recipes-devtools/cmake/cmake/0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch b/poky/meta/recipes-devtools/cmake/cmake/0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch index f5c1a212f..c4f81b27b 100644 --- a/poky/meta/recipes-devtools/cmake/cmake/0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch +++ b/poky/meta/recipes-devtools/cmake/cmake/0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch @@ -1,4 +1,4 @@ -From d4aa2dbdc69774ad668756586d59331ad215a2c6 Mon Sep 17 00:00:00 2001 +From dd0fe8d54def4684d360b3e9b10e963ef0208202 Mon Sep 17 00:00:00 2001 From: Cody P Schafer Date: Thu, 27 Apr 2017 11:35:05 -0400 Subject: [PATCH] CMakeDetermineSystem: use oe environment vars to load default @@ -25,7 +25,7 @@ Signed-off-by: Otavio Salvador 1 file changed, 7 insertions(+) diff --git a/Modules/CMakeDetermineSystem.cmake b/Modules/CMakeDetermineSystem.cmake -index dc208c6..e0af4ca 100644 +index f3ec4da..bb05656 100644 --- a/Modules/CMakeDetermineSystem.cmake +++ b/Modules/CMakeDetermineSystem.cmake @@ -81,6 +81,13 @@ else() diff --git a/poky/meta/recipes-devtools/cmake/cmake/0002-cmake-Prevent-the-detection-of-Qt5.patch b/poky/meta/recipes-devtools/cmake/cmake/0002-cmake-Prevent-the-detection-of-Qt5.patch index cf1dda7c9..162bfe578 100644 --- a/poky/meta/recipes-devtools/cmake/cmake/0002-cmake-Prevent-the-detection-of-Qt5.patch +++ b/poky/meta/recipes-devtools/cmake/cmake/0002-cmake-Prevent-the-detection-of-Qt5.patch @@ -1,4 +1,4 @@ -From 126994379ea75c8826874656bb55f955182ae911 Mon Sep 17 00:00:00 2001 +From 106cf5134d22db889e4ddf2f98ec302d5f4b9ca7 Mon Sep 17 00:00:00 2001 From: Otavio Salvador Date: Wed, 17 Jan 2018 10:02:14 -0200 Subject: [PATCH] cmake: Prevent the detection of Qt5 @@ -38,7 +38,7 @@ index 98dd0e2..252302b 100644 include_directories(${Qt5Widgets_INCLUDE_DIRS}) add_definitions(${Qt5Widgets_DEFINITONS}) diff --git a/Tests/CMakeLists.txt b/Tests/CMakeLists.txt -index aff7383..e220503 100644 +index db6dbf3..5b26879 100644 --- a/Tests/CMakeLists.txt +++ b/Tests/CMakeLists.txt @@ -215,7 +215,7 @@ if(BUILD_TESTING) @@ -96,11 +96,11 @@ index c08efc4..87e25d9 100644 set(CMAKE_CXX_STANDARD 11) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/output/bin) diff --git a/Tests/RunCMake/CMakeLists.txt b/Tests/RunCMake/CMakeLists.txt -index e9f8bca..3a2da34 100644 +index 10e66c3..ecc4eeb 100644 --- a/Tests/RunCMake/CMakeLists.txt +++ b/Tests/RunCMake/CMakeLists.txt -@@ -396,7 +396,7 @@ else() - endif() +@@ -440,7 +440,7 @@ if(NOT WIN32) + endif () find_package(Qt4 QUIET) -find_package(Qt5Core QUIET) diff --git a/poky/meta/recipes-devtools/cmake/cmake/0003-cmake-support-OpenEmbedded-Qt4-tool-binary-names.patch b/poky/meta/recipes-devtools/cmake/cmake/0003-cmake-support-OpenEmbedded-Qt4-tool-binary-names.patch index e30dc51e4..575a5cb7f 100644 --- a/poky/meta/recipes-devtools/cmake/cmake/0003-cmake-support-OpenEmbedded-Qt4-tool-binary-names.patch +++ b/poky/meta/recipes-devtools/cmake/cmake/0003-cmake-support-OpenEmbedded-Qt4-tool-binary-names.patch @@ -1,4 +1,4 @@ -From e528861023bf69df0ad061a59fd4e527c9dde1ce Mon Sep 17 00:00:00 2001 +From 2d02ac91d5a5d72eaddba4894eaa6db3ed8fee62 Mon Sep 17 00:00:00 2001 From: Otavio Salvador Date: Thu, 12 May 2011 15:36:03 +0000 Subject: [PATCH] cmake: support OpenEmbedded Qt4 tool binary names diff --git a/poky/meta/recipes-devtools/cmake/cmake/0004-Fail-silently-if-system-Qt-installation-is-broken.patch b/poky/meta/recipes-devtools/cmake/cmake/0004-Fail-silently-if-system-Qt-installation-is-broken.patch index 48f43be83..1b196db81 100644 --- a/poky/meta/recipes-devtools/cmake/cmake/0004-Fail-silently-if-system-Qt-installation-is-broken.patch +++ b/poky/meta/recipes-devtools/cmake/cmake/0004-Fail-silently-if-system-Qt-installation-is-broken.patch @@ -1,4 +1,4 @@ -From b0aeca35187983e22ec256a439cff4702dbde331 Mon Sep 17 00:00:00 2001 +From 60864efbe52cc12018efaafbc4e4c3c8b4af2b65 Mon Sep 17 00:00:00 2001 From: Otavio Salvador Date: Thu, 5 Jul 2018 10:26:48 -0300 Subject: [PATCH] Fail silently if system Qt installation is broken diff --git a/poky/meta/recipes-devtools/cmake/cmake/0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch b/poky/meta/recipes-devtools/cmake/cmake/0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch index ad42d409d..d6f7308fe 100644 --- a/poky/meta/recipes-devtools/cmake/cmake/0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch +++ b/poky/meta/recipes-devtools/cmake/cmake/0005-Disable-use-of-ext2fs-ext2_fs.h-by-cmake-s-internal-.patch @@ -1,7 +1,8 @@ -From 0df8d46d14c371f21c327966a553c4c187a8acfe Mon Sep 17 00:00:00 2001 +From fd9a04c1434e12f21c043385e306e0b52d38d749 Mon Sep 17 00:00:00 2001 From: Otavio Salvador Date: Thu, 5 Jul 2018 10:28:04 -0300 -Subject: [PATCH 5/5] Disable use of ext2fs/ext2_fs.h by cmake's internal +Subject: [PATCH] Disable use of ext2fs/ext2_fs.h by cmake's internal + libarchive copy Organization: O.S. Systems Software LTDA. @@ -12,15 +13,16 @@ Upstream-Status: Inappropriate [config] Signed-off-by: Paul Eggleton Signed-off-by: Otavio Salvador + --- Utilities/cmlibarchive/CMakeLists.txt | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/Utilities/cmlibarchive/CMakeLists.txt b/Utilities/cmlibarchive/CMakeLists.txt -index 206f3c6a5..642fb0dd9 100644 +index bfcaf30..2960683 100644 --- a/Utilities/cmlibarchive/CMakeLists.txt +++ b/Utilities/cmlibarchive/CMakeLists.txt -@@ -430,12 +430,8 @@ LA_CHECK_INCLUDE_FILE("copyfile.h" HAVE_COPYFILE_H) +@@ -682,12 +682,8 @@ LA_CHECK_INCLUDE_FILE("copyfile.h" HAVE_COPYFILE_H) LA_CHECK_INCLUDE_FILE("direct.h" HAVE_DIRECT_H) LA_CHECK_INCLUDE_FILE("dlfcn.h" HAVE_DLFCN_H) LA_CHECK_INCLUDE_FILE("errno.h" HAVE_ERRNO_H) @@ -34,7 +36,4 @@ index 206f3c6a5..642fb0dd9 100644 +SET(HAVE_WORKING_EXT2_IOC_GETFLAGS 0) LA_CHECK_INCLUDE_FILE("fcntl.h" HAVE_FCNTL_H) LA_CHECK_INCLUDE_FILE("grp.h" HAVE_GRP_H) - LA_CHECK_INCLUDE_FILE("inttypes.h" HAVE_INTTYPES_H) --- -2.18.0 - + LA_CHECK_INCLUDE_FILE("io.h" HAVE_IO_H) diff --git a/poky/meta/recipes-devtools/cmake/cmake_3.17.3.bb b/poky/meta/recipes-devtools/cmake/cmake_3.17.3.bb deleted file mode 100644 index e0457677e..000000000 --- a/poky/meta/recipes-devtools/cmake/cmake_3.17.3.bb +++ /dev/null @@ -1,53 +0,0 @@ -require cmake.inc - -inherit cmake - -DEPENDS += "curl expat zlib libarchive xz ncurses bzip2" - -SRC_URI_append_class-nativesdk = " \ - file://OEToolchainConfig.cmake \ - file://environment.d-cmake.sh \ - file://0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch \ -" - -# Strip ${prefix} from ${docdir}, set result into docdir_stripped -python () { - prefix=d.getVar("prefix") - docdir=d.getVar("docdir") - - if not docdir.startswith(prefix): - bb.fatal('docdir must contain prefix as its prefix') - - docdir_stripped = docdir[len(prefix):] - if len(docdir_stripped) > 0 and docdir_stripped[0] == '/': - docdir_stripped = docdir_stripped[1:] - - d.setVar("docdir_stripped", docdir_stripped) -} - -EXTRA_OECMAKE=" \ - -DCMAKE_DOC_DIR=${docdir_stripped}/cmake-${CMAKE_MAJOR_VERSION} \ - -DCMAKE_USE_SYSTEM_LIBRARIES=1 \ - -DCMAKE_USE_SYSTEM_LIBRARY_JSONCPP=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_LIBUV=0 \ - -DCMAKE_USE_SYSTEM_LIBRARY_LIBRHASH=0 \ - -DKWSYS_CHAR_IS_SIGNED=1 \ - -DBUILD_CursesDialog=0 \ - -DKWSYS_LFS_WORKS=1 \ -" - -do_install_append_class-nativesdk() { - mkdir -p ${D}${datadir}/cmake - install -m 644 ${WORKDIR}/OEToolchainConfig.cmake ${D}${datadir}/cmake/ - - mkdir -p ${D}${SDKPATHNATIVE}/environment-setup.d - install -m 644 ${WORKDIR}/environment.d-cmake.sh ${D}${SDKPATHNATIVE}/environment-setup.d/cmake.sh -} - -FILES_${PN}_append_class-nativesdk = " ${SDKPATHNATIVE}" - -FILES_${PN} += "${datadir}/cmake-${CMAKE_MAJOR_VERSION} ${datadir}/cmake ${datadir}/aclocal" -FILES_${PN}-doc += "${docdir}/cmake-${CMAKE_MAJOR_VERSION}" -FILES_${PN}-dev = "" - -BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-devtools/cmake/cmake_3.18.2.bb b/poky/meta/recipes-devtools/cmake/cmake_3.18.2.bb new file mode 100644 index 000000000..64c92b89f --- /dev/null +++ b/poky/meta/recipes-devtools/cmake/cmake_3.18.2.bb @@ -0,0 +1,53 @@ +require cmake.inc + +inherit cmake bash-completion + +DEPENDS += "curl expat zlib libarchive xz ncurses bzip2" + +SRC_URI_append_class-nativesdk = " \ + file://OEToolchainConfig.cmake \ + file://environment.d-cmake.sh \ + file://0001-CMakeDetermineSystem-use-oe-environment-vars-to-load.patch \ +" + +# Strip ${prefix} from ${docdir}, set result into docdir_stripped +python () { + prefix=d.getVar("prefix") + docdir=d.getVar("docdir") + + if not docdir.startswith(prefix): + bb.fatal('docdir must contain prefix as its prefix') + + docdir_stripped = docdir[len(prefix):] + if len(docdir_stripped) > 0 and docdir_stripped[0] == '/': + docdir_stripped = docdir_stripped[1:] + + d.setVar("docdir_stripped", docdir_stripped) +} + +EXTRA_OECMAKE=" \ + -DCMAKE_DOC_DIR=${docdir_stripped}/cmake-${CMAKE_MAJOR_VERSION} \ + -DCMAKE_USE_SYSTEM_LIBRARIES=1 \ + -DCMAKE_USE_SYSTEM_LIBRARY_JSONCPP=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_LIBUV=0 \ + -DCMAKE_USE_SYSTEM_LIBRARY_LIBRHASH=0 \ + -DKWSYS_CHAR_IS_SIGNED=1 \ + -DBUILD_CursesDialog=0 \ + -DKWSYS_LFS_WORKS=1 \ +" + +do_install_append_class-nativesdk() { + mkdir -p ${D}${datadir}/cmake + install -m 644 ${WORKDIR}/OEToolchainConfig.cmake ${D}${datadir}/cmake/ + + mkdir -p ${D}${SDKPATHNATIVE}/environment-setup.d + install -m 644 ${WORKDIR}/environment.d-cmake.sh ${D}${SDKPATHNATIVE}/environment-setup.d/cmake.sh +} + +FILES_${PN}_append_class-nativesdk = " ${SDKPATHNATIVE}" + +FILES_${PN} += "${datadir}/cmake-${CMAKE_MAJOR_VERSION} ${datadir}/cmake ${datadir}/aclocal ${datadir}/emacs ${datadir}/vim" +FILES_${PN}-doc += "${docdir}/cmake-${CMAKE_MAJOR_VERSION}" +FILES_${PN}-dev = "" + +BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb b/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb index 9f8bfc24f..61e46a4ed 100644 --- a/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb +++ b/poky/meta/recipes-devtools/elfutils/elfutils_0.180.bb @@ -114,6 +114,8 @@ FILES_libdw = "${libdir}/libdw-${PV}.so ${libdir}/libdw.so.* ${libdir}/elfutils # The package contains symlinks that trip up insane INSANE_SKIP_${MLPREFIX}libdw = "dev-so" +# The nlist binary in the tests uses explicitly minimal compiler flags +INSANE_SKIP_${PN}-ptest += "ldflags" # avoid stripping some generated binaries otherwise some of the tests such as test-nlist, # run-strip-reloc.sh, run-strip-strmerge.sh and so on will fail diff --git a/poky/meta/recipes-devtools/gcc/gcc-configure-common.inc b/poky/meta/recipes-devtools/gcc/gcc-configure-common.inc index 6cb40fcb7..a64c4caf0 100644 --- a/poky/meta/recipes-devtools/gcc/gcc-configure-common.inc +++ b/poky/meta/recipes-devtools/gcc/gcc-configure-common.inc @@ -34,7 +34,6 @@ EXTRA_OECONF = "\ --program-prefix=${TARGET_PREFIX} \ --without-local-prefix \ --disable-install-libiberty \ - --with-specs=%{!fno-common:%{!fcommon:-fcommon}} \ ${EXTRA_OECONF_BASE} \ ${EXTRA_OECONF_GCC_FLOAT} \ ${EXTRA_OECONF_PATHS} \ diff --git a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc index db17ae468..d3875c94d 100644 --- a/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc +++ b/poky/meta/recipes-devtools/gcc/gcc-cross-canadian.inc @@ -139,6 +139,29 @@ do_install () { chown -R root:root ${D} cross_canadian_bindirlinks + + for i in linux ${CANADIANEXTRAOS} + do + for v in ${CANADIANEXTRAVENDOR} + do + d=${D}${bindir}/../${TARGET_ARCH}$v-$i + install -d $d + for j in ${TARGET_PREFIX}gcc${EXEEXT} ${TARGET_PREFIX}g++${EXEEXT} + do + p=${TARGET_ARCH}$v-$i-`echo $j | sed -e s,${TARGET_PREFIX},,` + case $i in + *musl*) + rm -rf $d/$p + echo "#!/usr/bin/env sh" > $d/$p + echo "exec \`dirname \$0\`/../${TARGET_SYS}/$j -mmusl \$@" >> $d/$p + chmod 0755 $d/$p + ;; + *) + ;; + esac + done + done + done } ELFUTILS = "nativesdk-elfutils" diff --git a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb index 200b1f185..ae03a99a6 100644 --- a/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb +++ b/poky/meta/recipes-devtools/gnu-config/gnu-config_git.bb @@ -8,8 +8,8 @@ DEPENDS_class-native = "hostperl-runtime-native" INHIBIT_DEFAULT_DEPS = "1" -SRCREV = "2593751ef276497e312d7c4ce7fd049614c7bf80" -PV = "20200721+git${SRCPV}" +SRCREV = "0b5188819ba6091770064adf26360b204113317e" +PV = "20200831+git${SRCPV}" SRC_URI = "git://git.savannah.gnu.org/config.git \ file://gnu-configize.in" diff --git a/poky/meta/recipes-devtools/go/go-1.14.inc b/poky/meta/recipes-devtools/go/go-1.14.inc deleted file mode 100644 index 8f8ed89de..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14.inc +++ /dev/null @@ -1,21 +0,0 @@ -require go-common.inc - -GO_BASEVERSION = "1.14" -GO_MINOR = ".7" -PV .= "${GO_MINOR}" -FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:" - -LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" - -SRC_URI += "\ - file://0001-allow-CC-and-CXX-to-have-multiple-words.patch \ - file://0002-cmd-go-make-content-based-hash-generation-less-pedan.patch \ - file://0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch \ - file://0004-ld-add-soname-to-shareable-objects.patch \ - file://0005-make.bash-override-CC-when-building-dist-and-go_boot.patch \ - file://0006-cmd-dist-separate-host-and-target-builds.patch \ - file://0007-cmd-go-make-GOROOT-precious-by-default.patch \ - file://0008-use-GOBUILDMODE-to-set-buildmode.patch \ -" -SRC_URI_append_libc-musl = " file://0009-ld-replace-glibc-dynamic-linker-with-musl.patch" -SRC_URI[main.sha256sum] = "064392433563660c73186991c0a315787688e7c38a561e26647686f89b6c30e3" diff --git a/poky/meta/recipes-devtools/go/go-1.14/0001-allow-CC-and-CXX-to-have-multiple-words.patch b/poky/meta/recipes-devtools/go/go-1.14/0001-allow-CC-and-CXX-to-have-multiple-words.patch deleted file mode 100644 index d47664d8e..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0001-allow-CC-and-CXX-to-have-multiple-words.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 9e3dc44cdfa58d96504d0a789dc82617dd5bef55 Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:01:13 +0430 -Subject: [PATCH 1/9] cmd/go: Allow CC and CXX to have multiple words - -Upstream-Status: Inappropriate [OE specific] - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Signed-off-by: Alexander J Kube - ---- - src/cmd/go/internal/envcmd/env.go | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/src/cmd/go/internal/envcmd/env.go -+++ b/src/cmd/go/internal/envcmd/env.go -@@ -102,11 +102,11 @@ func MkEnv() []cfg.EnvVar { - - cc := cfg.DefaultCC(cfg.Goos, cfg.Goarch) - if env := strings.Fields(cfg.Getenv("CC")); len(env) > 0 { -- cc = env[0] -+ cc = strings.Join(env, " ") - } - cxx := cfg.DefaultCXX(cfg.Goos, cfg.Goarch) - if env := strings.Fields(cfg.Getenv("CXX")); len(env) > 0 { -- cxx = env[0] -+ cxx = strings.Join(env, " ") - } - env = append(env, cfg.EnvVar{Name: "AR", Value: envOr("AR", "ar")}) - env = append(env, cfg.EnvVar{Name: "CC", Value: cc}) diff --git a/poky/meta/recipes-devtools/go/go-1.14/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch b/poky/meta/recipes-devtools/go/go-1.14/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch deleted file mode 100644 index 9e88567eb..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch +++ /dev/null @@ -1,219 +0,0 @@ -From a13ae484e41139094505d2834437e9262a5315f7 Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:14:22 +0430 -Subject: [PATCH 2/9] cmd/go: make content-based hash generation less pedantic - -Upstream-Status: Inappropriate [OE specific] - -Go 1.10's build tool now uses content-based hashes to -determine when something should be built or re-built. -This same mechanism is used to maintain a built-artifact -cache for speeding up builds. - -However, the hashes it generates include information that -doesn't work well with OE, nor with using a shared runtime -library. - -First, it embeds path names to source files, unless -building within GOROOT. This prevents the building -of a package in GOPATH for later staging into GOROOT. - -This patch adds support for the environment variable -GOPATH_OMIT_IN_ACTIONID. If present, path name -embedding is disabled. - -Second, if cgo is enabled, the build ID for cgo-related -packages will include the current value of the environment -variables for invoking the compiler (CC, CXX, FC) and -any CGO_xxFLAGS variables. Only if the settings used -during a compilation exactly match, character for character, -the values used for compiling runtime/cgo or any other -cgo-enabled package being imported, will the tool -decide that the imported package is up-to-date. - -This is done to help ensure correctness, but is overly -simplistic and effectively prevents the reuse of built -artifacts that use cgo (or shared runtime, which includes -runtime/cgo). - -This patch filters out all compiler flags except those -beginning with '-m'. The default behavior can be restored -by setting the CGO_PEDANTIC environment variable. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Signed-off-by: Alexander J Kube ---- - src/cmd/go/internal/envcmd/env.go | 2 +- - src/cmd/go/internal/work/exec.go | 66 ++++++++++++++++++++++--------- - 2 files changed, 49 insertions(+), 19 deletions(-) - ---- a/src/cmd/go/internal/envcmd/env.go -+++ b/src/cmd/go/internal/envcmd/env.go -@@ -156,7 +156,7 @@ func ExtraEnvVars() []cfg.EnvVar { - func ExtraEnvVarsCostly() []cfg.EnvVar { - var b work.Builder - b.Init() -- cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{}) -+ cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{}, false) - if err != nil { - // Should not happen - b.CFlags was given an empty package. - fmt.Fprintf(os.Stderr, "go: invalid cflags: %v\n", err) ---- a/src/cmd/go/internal/work/exec.go -+++ b/src/cmd/go/internal/work/exec.go -@@ -32,6 +32,8 @@ import ( - "time" - ) - -+var omitGopath = os.Getenv("GOPATH_OMIT_IN_ACTIONID") != "" -+ - // actionList returns the list of actions in the dag rooted at root - // as visited in a depth-first post-order traversal. - func actionList(root *Action) []*Action { -@@ -208,7 +210,7 @@ func (b *Builder) buildActionID(a *Actio - // Assume b.WorkDir is being trimmed properly. - // When -trimpath is used with a package built from the module cache, - // use the module path and version instead of the directory. -- if !p.Goroot && !cfg.BuildTrimpath && !strings.HasPrefix(p.Dir, b.WorkDir) { -+ if !p.Goroot && !omitGopath && !cfg.BuildTrimpath && !strings.HasPrefix(p.Dir, b.WorkDir) { - fmt.Fprintf(h, "dir %s\n", p.Dir) - } else if cfg.BuildTrimpath && p.Module != nil { - fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version) -@@ -224,13 +226,13 @@ func (b *Builder) buildActionID(a *Actio - } - if len(p.CgoFiles)+len(p.SwigFiles) > 0 { - fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo")) -- cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p) -- fmt.Fprintf(h, "CC=%q %q %q %q\n", b.ccExe(), cppflags, cflags, ldflags) -+ cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p, true) -+ fmt.Fprintf(h, "CC=%q %q %q %q\n", b.ccExe(true), cppflags, cflags, ldflags) - if len(p.CXXFiles)+len(p.SwigFiles) > 0 { -- fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(), cxxflags) -+ fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(true), cxxflags) - } - if len(p.FFiles) > 0 { -- fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(), fflags) -+ fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(true), fflags) - } - // TODO(rsc): Should we include the SWIG version or Fortran/GCC/G++/Objective-C compiler versions? - } -@@ -2228,33 +2230,48 @@ var ( - // gccCmd returns a gcc command line prefix - // defaultCC is defined in zdefaultcc.go, written by cmd/dist. - func (b *Builder) GccCmd(incdir, workdir string) []string { -- return b.compilerCmd(b.ccExe(), incdir, workdir) -+ return b.compilerCmd(b.ccExe(false), incdir, workdir) - } - - // gxxCmd returns a g++ command line prefix - // defaultCXX is defined in zdefaultcc.go, written by cmd/dist. - func (b *Builder) GxxCmd(incdir, workdir string) []string { -- return b.compilerCmd(b.cxxExe(), incdir, workdir) -+ return b.compilerCmd(b.cxxExe(false), incdir, workdir) - } - - // gfortranCmd returns a gfortran command line prefix. - func (b *Builder) gfortranCmd(incdir, workdir string) []string { -- return b.compilerCmd(b.fcExe(), incdir, workdir) -+ return b.compilerCmd(b.fcExe(false), incdir, workdir) - } - - // ccExe returns the CC compiler setting without all the extra flags we add implicitly. --func (b *Builder) ccExe() []string { -- return b.compilerExe(origCC, cfg.DefaultCC(cfg.Goos, cfg.Goarch)) -+func (b *Builder) ccExe(filtered bool) []string { -+ return b.compilerExe(origCC, cfg.DefaultCC(cfg.Goos, cfg.Goarch), filtered) - } - - // cxxExe returns the CXX compiler setting without all the extra flags we add implicitly. --func (b *Builder) cxxExe() []string { -- return b.compilerExe(origCXX, cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) -+func (b *Builder) cxxExe(filtered bool) []string { -+ return b.compilerExe(origCXX, cfg.DefaultCXX(cfg.Goos, cfg.Goarch), filtered) - } - - // fcExe returns the FC compiler setting without all the extra flags we add implicitly. --func (b *Builder) fcExe() []string { -- return b.compilerExe(cfg.Getenv("FC"), "gfortran") -+func (b *Builder) fcExe(filtered bool) []string { -+ return b.compilerExe(os.Getenv("FC"), "gfortran", filtered) -+} -+ -+var filterFlags = os.Getenv("CGO_PEDANTIC") == "" -+ -+func filterCompilerFlags(flags []string) []string { -+ var newflags []string -+ if !filterFlags { -+ return flags -+ } -+ for _, flag := range flags { -+ if strings.HasPrefix(flag, "-m") { -+ newflags = append(newflags, flag) -+ } -+ } -+ return newflags - } - - // compilerExe returns the compiler to use given an -@@ -2263,11 +2280,16 @@ func (b *Builder) fcExe() []string { - // of the compiler but can have additional arguments if they - // were present in the environment value. - // For example if CC="gcc -DGOPHER" then the result is ["gcc", "-DGOPHER"]. --func (b *Builder) compilerExe(envValue string, def string) []string { -+func (b *Builder) compilerExe(envValue string, def string, filtered bool) []string { - compiler := strings.Fields(envValue) - if len(compiler) == 0 { - compiler = []string{def} - } -+ -+ if filtered { -+ return append(compiler[0:1], filterCompilerFlags(compiler[1:])...) -+ } -+ - return compiler - } - -@@ -2428,7 +2450,7 @@ func envList(key, def string) []string { - } - - // CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo. --func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) { -+func (b *Builder) CFlags(p *load.Package, filtered bool) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) { - defaults := "-g -O2" - - if cppflags, err = buildFlags("CPPFLAGS", "", p.CgoCPPFLAGS, checkCompilerFlags); err != nil { -@@ -2447,6 +2469,14 @@ func (b *Builder) CFlags(p *load.Package - return - } - -+ if filtered { -+ cppflags = filterCompilerFlags(cppflags) -+ cflags = filterCompilerFlags(cflags) -+ cxxflags = filterCompilerFlags(cxxflags) -+ fflags = filterCompilerFlags(fflags) -+ ldflags = filterCompilerFlags(ldflags) -+ } -+ - return - } - -@@ -2461,7 +2491,7 @@ var cgoRe = lazyregexp.New(`[/\\:]`) - - func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) { - p := a.Package -- cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p) -+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p, false) - if err != nil { - return nil, nil, err - } -@@ -2820,7 +2850,7 @@ func (b *Builder) swigIntSize(objdir str - - // Run SWIG on one SWIG input file. - func (b *Builder) swigOne(a *Action, p *load.Package, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { -- cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p) -+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p, false) - if err != nil { - return "", "", err - } diff --git a/poky/meta/recipes-devtools/go/go-1.14/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch b/poky/meta/recipes-devtools/go/go-1.14/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch deleted file mode 100644 index 662c70547..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 28ada8896b76d620240bafc22aa395071d601482 Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:15:37 +0430 -Subject: [PATCH 3/9] cmd/go: Allow GOTOOLDIR to be overridden in the environment - -to allow for split host/target build roots - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Upstream-Status: Inappropriate [OE specific] - -Signed-off-by: Alexander J Kube ---- - src/cmd/dist/build.go | 4 +++- - src/cmd/go/internal/cfg/cfg.go | 6 +++++- - 2 files changed, 8 insertions(+), 2 deletions(-) - ---- a/src/cmd/dist/build.go -+++ b/src/cmd/dist/build.go -@@ -246,7 +246,9 @@ func xinit() { - workdir = xworkdir() - xatexit(rmworkdir) - -- tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) -+ if tooldir = os.Getenv("GOTOOLDIR"); tooldir == "" { -+ tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) -+ } - } - - // compilerEnv returns a map from "goos/goarch" to the ---- a/src/cmd/go/internal/cfg/cfg.go -+++ b/src/cmd/go/internal/cfg/cfg.go -@@ -64,7 +64,11 @@ func defaultContext() build.Context { - // variables. This matches the initialization of ToolDir in - // go/build, except for using ctxt.GOROOT rather than - // runtime.GOROOT. -- build.ToolDir = filepath.Join(ctxt.GOROOT, "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) -+ if s := os.Getenv("GOTOOLDIR"); s != "" { -+ build.ToolDir = filepath.Clean(s) -+ } else { -+ build.ToolDir = filepath.Join(ctxt.GOROOT, "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) -+ } - } - - ctxt.GOPATH = envOr("GOPATH", ctxt.GOPATH) diff --git a/poky/meta/recipes-devtools/go/go-1.14/0004-ld-add-soname-to-shareable-objects.patch b/poky/meta/recipes-devtools/go/go-1.14/0004-ld-add-soname-to-shareable-objects.patch deleted file mode 100644 index 75c9c7521..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0004-ld-add-soname-to-shareable-objects.patch +++ /dev/null @@ -1,45 +0,0 @@ -From bf5cf5301ae5914498454c87293d1df2e1d8489f Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:16:32 +0430 -Subject: [PATCH 4/9] ld: add soname to shareable objects - -so that OE's shared library dependency handling -can find them. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Upstream-Status: Inappropriate [OE specific] - -Signed-off-by: Alexander J Kube ---- - src/cmd/link/internal/ld/lib.go | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/src/cmd/link/internal/ld/lib.go -+++ b/src/cmd/link/internal/ld/lib.go -@@ -1280,6 +1280,7 @@ func (ctxt *Link) hostlink() { - argv = append(argv, "-Wl,-z,relro") - } - argv = append(argv, "-shared") -+ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) - if ctxt.HeadType != objabi.Hwindows { - // Pass -z nodelete to mark the shared library as - // non-closeable: a dlclose will do nothing. -@@ -1291,6 +1292,7 @@ func (ctxt *Link) hostlink() { - argv = append(argv, "-Wl,-z,relro") - } - argv = append(argv, "-shared") -+ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) - case BuildModePlugin: - if ctxt.HeadType == objabi.Hdarwin { - argv = append(argv, "-dynamiclib") -@@ -1299,6 +1301,7 @@ func (ctxt *Link) hostlink() { - argv = append(argv, "-Wl,-z,relro") - } - argv = append(argv, "-shared") -+ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) - } - } - diff --git a/poky/meta/recipes-devtools/go/go-1.14/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch b/poky/meta/recipes-devtools/go/go-1.14/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch deleted file mode 100644 index 59c12d954..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch +++ /dev/null @@ -1,39 +0,0 @@ -From f05ef3ded52b98537c10efd0b15cd9612471524d Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:17:16 +0430 -Subject: [PATCH 5/9] make.bash: override CC when building dist and - go_bootstrap - -for handling OE cross-canadian builds. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Upstream-Status: Inappropriate [OE specific] - -Signed-off-by: Alexander J Kube ---- - src/make.bash | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/src/make.bash -+++ b/src/make.bash -@@ -178,7 +178,7 @@ if [ "$GOROOT_BOOTSTRAP" = "$GOROOT" ]; - exit 1 - fi - rm -f cmd/dist/dist --GOROOT="$GOROOT_BOOTSTRAP" GOOS="" GOARCH="" GO111MODULE=off "$GOROOT_BOOTSTRAP/bin/go" build -o cmd/dist/dist ./cmd/dist -+CC="${BUILD_CC:-${CC}}" GOROOT="$GOROOT_BOOTSTRAP" GOOS="" GOARCH="" GO111MODULE=off "$GOROOT_BOOTSTRAP/bin/go" build -o cmd/dist/dist ./cmd/dist - - # -e doesn't propagate out of eval, so check success by hand. - eval $(./cmd/dist/dist env -p || echo FAIL=true) -@@ -209,7 +209,7 @@ fi - # Run dist bootstrap to complete make.bash. - # Bootstrap installs a proper cmd/dist, built with the new toolchain. - # Throw ours, built with Go 1.4, away after bootstrap. --./cmd/dist/dist bootstrap $buildall $vflag $GO_DISTFLAGS "$@" -+CC="${BUILD_CC:-${CC}}" ./cmd/dist/dist bootstrap $buildall $vflag $GO_DISTFLAGS "$@" - rm -f ./cmd/dist/dist - - # DO NOT ADD ANY NEW CODE HERE. diff --git a/poky/meta/recipes-devtools/go/go-1.14/0006-cmd-dist-separate-host-and-target-builds.patch b/poky/meta/recipes-devtools/go/go-1.14/0006-cmd-dist-separate-host-and-target-builds.patch deleted file mode 100644 index 7aee0bac4..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0006-cmd-dist-separate-host-and-target-builds.patch +++ /dev/null @@ -1,274 +0,0 @@ -From 10735bb84df17ba657f76835f483cd8543a879c1 Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:18:12 +0430 -Subject: [PATCH 6/9] cmd/dist: separate host and target builds - -Upstream-Status: Inappropriate [OE specific] - -Change the dist tool to allow for OE-style cross- -and cross-canadian builds: - - - command flags --host-only and --target only are added; - if one is present, the other changes mentioned below - take effect, and arguments may also be specified on - the command line to enumerate the package(s) to be - built. - - - for OE cross builds, go_bootstrap is always built for - the current build host, and is moved, along with the supporting - toolchain (asm, compile, etc.) to a separate 'native_native' - directory under GOROOT/pkg/tool. - - - go_bootstrap is not automatically removed after the build, - so it can be reused later (e.g., building both static and - shared runtime). - -Note that for --host-only builds, it would be nice to specify -just the "cmd" package to build only the go commands/tools, -the staleness checks in the dist tool will fail if the "std" -library has not also been built. So host-only builds have to -build everything anyway. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Signed-off-by: Alexander J Kube ---- - src/cmd/dist/build.go | 155 ++++++++++++++++++++++++++++++------------ - 1 file changed, 112 insertions(+), 43 deletions(-) - ---- a/src/cmd/dist/build.go -+++ b/src/cmd/dist/build.go -@@ -41,6 +41,7 @@ var ( - goldflags string - workdir string - tooldir string -+ build_tooldir string - oldgoos string - oldgoarch string - exe string -@@ -53,6 +54,7 @@ var ( - - rebuildall bool - defaultclang bool -+ crossBuild bool - - vflag int // verbosity - ) -@@ -249,6 +251,8 @@ func xinit() { - if tooldir = os.Getenv("GOTOOLDIR"); tooldir == "" { - tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) - } -+ -+ build_tooldir = pathf("%s/pkg/tool/native_native", goroot) - } - - // compilerEnv returns a map from "goos/goarch" to the -@@ -480,8 +484,10 @@ func setup() { - p := pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch) - if rebuildall { - xremoveall(p) -+ xremoveall(build_tooldir) - } - xmkdirall(p) -+ xmkdirall(build_tooldir) - - if goos != gohostos || goarch != gohostarch { - p := pathf("%s/pkg/%s_%s", goroot, goos, goarch) -@@ -1244,12 +1250,29 @@ func cmdbootstrap() { - - var noBanner bool - var debug bool -+ var hostOnly bool -+ var targetOnly bool -+ var toBuild = []string{"std", "cmd"} -+ - flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all") - flag.BoolVar(&debug, "d", debug, "enable debugging of bootstrap process") - flag.BoolVar(&noBanner, "no-banner", noBanner, "do not print banner") -+ flag.BoolVar(&hostOnly, "host-only", hostOnly, "build only host binaries, not target") -+ flag.BoolVar(&targetOnly, "target-only", targetOnly, "build only target binaries, not host") - -- xflagparse(0) -+ xflagparse(-1) - -+ if hostOnly && targetOnly { -+ fatalf("specify only one of --host-only or --target-only\n") -+ } -+ crossBuild = hostOnly || targetOnly -+ if flag.NArg() > 0 { -+ if crossBuild { -+ toBuild = flag.Args() -+ } else { -+ fatalf("package names not permitted without --host-only or --target-only\n") -+ } -+ } - // Set GOPATH to an internal directory. We shouldn't actually - // need to store files here, since the toolchain won't - // depend on modules outside of vendor directories, but if -@@ -1303,8 +1326,13 @@ func cmdbootstrap() { - xprintf("\n") - } - -- gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now -- goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now -+ // For split host/target cross/cross-canadian builds, we don't -+ // want to be setting these flags until after we have compiled -+ // the toolchain that runs on the build host. -+ if !crossBuild { -+ gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now -+ goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now -+ } - goBootstrap := pathf("%s/go_bootstrap", tooldir) - cmdGo := pathf("%s/go", gobin) - if debug { -@@ -1333,7 +1361,11 @@ func cmdbootstrap() { - xprintf("\n") - } - xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n") -- os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) -+ if crossBuild { -+ os.Setenv("CC", defaultcc[""]) -+ } else { -+ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) -+ } - goInstall(goBootstrap, append([]string{"-i"}, toolchain...)...) - if debug { - run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") -@@ -1370,50 +1402,84 @@ func cmdbootstrap() { - } - checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) - -- if goos == oldgoos && goarch == oldgoarch { -- // Common case - not setting up for cross-compilation. -- timelog("build", "toolchain") -- if vflag > 0 { -- xprintf("\n") -+ if crossBuild { -+ gogcflags = os.Getenv("GO_GCFLAGS") -+ goldflags = os.Getenv("GO_LDFLAGS") -+ tool_files, _ := filepath.Glob(pathf("%s/*", tooldir)) -+ for _, f := range tool_files { -+ copyfile(pathf("%s/%s", build_tooldir, filepath.Base(f)), f, writeExec) -+ xremove(f) -+ } -+ os.Setenv("GOTOOLDIR", build_tooldir) -+ goBootstrap = pathf("%s/go_bootstrap", build_tooldir) -+ if hostOnly { -+ timelog("build", "host toolchain") -+ if vflag > 0 { -+ xprintf("\n") -+ } -+ xprintf("Building %s for host, %s/%s.\n", strings.Join(toBuild, ","), goos, goarch) -+ goInstall(goBootstrap, toBuild...) -+ checkNotStale(goBootstrap, toBuild...) -+ // Skip cmdGo staleness checks here, since we can't necessarily run the cmdGo binary -+ -+ timelog("build", "target toolchain") -+ if vflag > 0 { -+ xprintf("\n") -+ } -+ } else if targetOnly { -+ goos = oldgoos -+ goarch = oldgoarch -+ os.Setenv("GOOS", goos) -+ os.Setenv("GOARCH", goarch) -+ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) -+ xprintf("Building %s for target, %s/%s.\n", strings.Join(toBuild, ","), goos, goarch) -+ goInstall(goBootstrap, toBuild...) -+ checkNotStale(goBootstrap, toBuild...) -+ // Skip cmdGo staleness checks here, since we can't run the target's cmdGo binary - } -- xprintf("Building packages and commands for %s/%s.\n", goos, goarch) - } else { -- // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. -- // Finish GOHOSTOS/GOHOSTARCH installation and then -- // run GOOS/GOARCH installation. -- timelog("build", "host toolchain") -- if vflag > 0 { -- xprintf("\n") -+ -+ if goos == oldgoos && goarch == oldgoarch { -+ // Common case - not setting up for cross-compilation. -+ timelog("build", "toolchain") -+ if vflag > 0 { -+ xprintf("\n") -+ } -+ xprintf("Building packages and commands for %s/%s.\n", goos, goarch) -+ } else { -+ // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. -+ // Finish GOHOSTOS/GOHOSTARCH installation and then -+ // run GOOS/GOARCH installation. -+ timelog("build", "host toolchain") -+ if vflag > 0 { -+ xprintf("\n") -+ } -+ xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch) -+ goInstall(goBootstrap, "std", "cmd") -+ checkNotStale(goBootstrap, "std", "cmd") -+ checkNotStale(cmdGo, "std", "cmd") -+ -+ timelog("build", "target toolchain") -+ if vflag > 0 { -+ xprintf("\n") -+ } -+ goos = oldgoos -+ goarch = oldgoarch -+ os.Setenv("GOOS", goos) -+ os.Setenv("GOARCH", goarch) -+ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) -+ xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) - } -- xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch) - goInstall(goBootstrap, "std", "cmd") - checkNotStale(goBootstrap, "std", "cmd") - checkNotStale(cmdGo, "std", "cmd") - -- timelog("build", "target toolchain") -- if vflag > 0 { -- xprintf("\n") -- } -- goos = oldgoos -- goarch = oldgoarch -- os.Setenv("GOOS", goos) -- os.Setenv("GOARCH", goarch) -- os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) -- xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) -- } -- targets := []string{"std", "cmd"} -- if goos == "js" && goarch == "wasm" { -- // Skip the cmd tools for js/wasm. They're not usable. -- targets = targets[:1] -- } -- goInstall(goBootstrap, targets...) -- checkNotStale(goBootstrap, targets...) -- checkNotStale(cmdGo, targets...) -- if debug { -- run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") -- run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) -- checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) -- copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) -+ if debug { -+ run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") -+ run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) -+ checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) -+ copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) -+ } - } - - // Check that there are no new files in $GOROOT/bin other than -@@ -1430,8 +1496,11 @@ func cmdbootstrap() { - } - } - -- // Remove go_bootstrap now that we're done. -- xremove(pathf("%s/go_bootstrap", tooldir)) -+ // Except that for split host/target cross-builds, we need to -+ // keep it. -+ if !crossBuild { -+ xremove(pathf("%s/go_bootstrap", tooldir)) -+ } - - if goos == "android" { - // Make sure the exec wrapper will sync a fresh $GOROOT to the device. diff --git a/poky/meta/recipes-devtools/go/go-1.14/0007-cmd-go-make-GOROOT-precious-by-default.patch b/poky/meta/recipes-devtools/go/go-1.14/0007-cmd-go-make-GOROOT-precious-by-default.patch deleted file mode 100644 index b93f83de6..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0007-cmd-go-make-GOROOT-precious-by-default.patch +++ /dev/null @@ -1,104 +0,0 @@ -From 9ba507e076c744f4d394418e4a849e68cd426a4a Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:18:56 +0430 -Subject: [PATCH 7/9] cmd/go: make GOROOT precious by default - -Upstream-Status: Inappropriate [OE specific] - -The go build tool normally rebuilds whatever it detects is -stale. This can be a problem when GOROOT is intended to -be read-only and the go runtime has been built as a shared -library, since we don't want every application to be rebuilding -the shared runtime - particularly in cross-build/packaging -setups, since that would lead to 'abi mismatch' runtime errors. - -This patch prevents the install and linkshared actions from -installing to GOROOT unless overridden with the GOROOT_OVERRIDE -environment variable. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Signed-off-by: Alexander J Kube ---- - src/cmd/go/internal/work/action.go | 3 +++ - src/cmd/go/internal/work/build.go | 6 ++++++ - src/cmd/go/internal/work/exec.go | 25 +++++++++++++++++++++++++ - 3 files changed, 34 insertions(+) - ---- a/src/cmd/go/internal/work/action.go -+++ b/src/cmd/go/internal/work/action.go -@@ -670,6 +670,9 @@ func (b *Builder) addTransitiveLinkDeps( - if p1 == nil || p1.Shlib == "" || haveShlib[filepath.Base(p1.Shlib)] { - continue - } -+ if goRootPrecious && (p1.Standard || p1.Goroot) { -+ continue -+ } - haveShlib[filepath.Base(p1.Shlib)] = true - // TODO(rsc): The use of ModeInstall here is suspect, but if we only do ModeBuild, - // we'll end up building an overall library or executable that depends at runtime ---- a/src/cmd/go/internal/work/build.go -+++ b/src/cmd/go/internal/work/build.go -@@ -167,6 +167,8 @@ See also: go install, go get, go clean. - - const concurrentGCBackendCompilationEnabledByDefault = true - -+var goRootPrecious bool = true -+ - func init() { - // break init cycle - CmdBuild.Run = runBuild -@@ -179,6 +181,10 @@ func init() { - - AddBuildFlags(CmdBuild, DefaultBuildFlags) - AddBuildFlags(CmdInstall, DefaultBuildFlags) -+ -+ if x := os.Getenv("GOROOT_OVERRIDE"); x != "" { -+ goRootPrecious = false -+ } - } - - // Note that flags consulted by other parts of the code ---- a/src/cmd/go/internal/work/exec.go -+++ b/src/cmd/go/internal/work/exec.go -@@ -464,6 +464,23 @@ func (b *Builder) build(a *Action) (err - return errors.New("binary-only packages are no longer supported") - } - -+ if goRootPrecious && (a.Package.Standard || a.Package.Goroot) { -+ _, err := os.Stat(a.Package.Target) -+ if err == nil { -+ a.built = a.Package.Target -+ a.Target = a.Package.Target -+ a.buildID = b.fileHash(a.Package.Target) -+ a.Package.Stale = false -+ a.Package.StaleReason = "GOROOT-resident package" -+ return nil -+ } -+ a.Package.Stale = true -+ a.Package.StaleReason = "missing or invalid GOROOT-resident package" -+ if b.IsCmdList { -+ return nil -+ } -+ } -+ - if err := b.Mkdir(a.Objdir); err != nil { - return err - } -@@ -1493,6 +1510,14 @@ func BuildInstallFunc(b *Builder, a *Act - return nil - } - -+ if goRootPrecious && a.Package != nil { -+ p := a.Package -+ if p.Standard || p.Goroot { -+ err := fmt.Errorf("attempting to install package %s into read-only GOROOT", p.ImportPath) -+ return err -+ } -+ } -+ - if err := b.Mkdir(a.Objdir); err != nil { - return err - } diff --git a/poky/meta/recipes-devtools/go/go-1.14/0008-use-GOBUILDMODE-to-set-buildmode.patch b/poky/meta/recipes-devtools/go/go-1.14/0008-use-GOBUILDMODE-to-set-buildmode.patch deleted file mode 100644 index b15d9812a..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0008-use-GOBUILDMODE-to-set-buildmode.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 971b5626339ce0c4d57f9721c9a81af566c5a044 Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:19:26 +0430 -Subject: [PATCH 8/9] cmd/go: Use GOBUILDMODE to set buildmode - -Upstream-Status: Denied [upstream choose antoher solution: `17a256b -cmd/go: -buildmode=pie for android/arm'] - -While building go itself, the go build system does not support -to set `-buildmode=pie' from environment. - -Add GOBUILDMODE to support it which make PIE executables the default -build mode, as PIE executables are required as of Yocto - -Refers: https://groups.google.com/forum/#!topic/golang-dev/gRCe5URKewI - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Hongxu Jia - -Signed-off-by: Alexander J Kube ---- - src/cmd/go/internal/work/build.go | 8 +++++++- - 1 file changed, 7 insertions(+), 1 deletion(-) - ---- a/src/cmd/go/internal/work/build.go -+++ b/src/cmd/go/internal/work/build.go -@@ -251,7 +251,13 @@ func AddBuildFlags(cmd *base.Command, ma - - cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "") - cmd.Flag.Var(buildCompiler{}, "compiler", "") -- cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "") -+ -+ if bm := os.Getenv("GOBUILDMODE"); bm != "" { -+ cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", bm, "") -+ } else { -+ cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "") -+ } -+ - cmd.Flag.Var(&load.BuildGcflags, "gcflags", "") - cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "") - if mask&OmitModFlag == 0 { diff --git a/poky/meta/recipes-devtools/go/go-1.14/0009-ld-replace-glibc-dynamic-linker-with-musl.patch b/poky/meta/recipes-devtools/go/go-1.14/0009-ld-replace-glibc-dynamic-linker-with-musl.patch deleted file mode 100644 index 427cfb0dd..000000000 --- a/poky/meta/recipes-devtools/go/go-1.14/0009-ld-replace-glibc-dynamic-linker-with-musl.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 973251ae0c69a35721f6115345d3f57b2847979f Mon Sep 17 00:00:00 2001 -From: Alex Kube -Date: Wed, 23 Oct 2019 21:20:13 +0430 -Subject: [PATCH 9/9] ld: replace glibc dynamic linker with musl - -Rework of patch by Khem Raj -for go 1.10. Should be applied conditionally on -musl being the system C library. - -Adapted to Go 1.13 from patches originally submitted to -the meta/recipes-devtools/go tree by -Matt Madison . - -Upstream-Status: Inappropriate [Real fix should be portable across libcs] - -Signed-off-by: Alexander J Kube ---- - src/cmd/link/internal/amd64/obj.go | 2 +- - src/cmd/link/internal/arm/obj.go | 2 +- - src/cmd/link/internal/arm64/obj.go | 2 +- - src/cmd/link/internal/mips/obj.go | 2 +- - src/cmd/link/internal/mips64/obj.go | 2 +- - src/cmd/link/internal/ppc64/obj.go | 2 +- - src/cmd/link/internal/s390x/obj.go | 2 +- - src/cmd/link/internal/x86/obj.go | 2 +- - 8 files changed, 8 insertions(+), 8 deletions(-) - ---- a/src/cmd/link/internal/amd64/obj.go -+++ b/src/cmd/link/internal/amd64/obj.go -@@ -59,7 +59,7 @@ func Init() (*sys.Arch, ld.Arch) { - PEreloc1: pereloc1, - TLSIEtoLE: tlsIEtoLE, - -- Linuxdynld: "/lib64/ld-linux-x86-64.so.2", -+ Linuxdynld: "/lib64/ld-musl-x86-64.so.1", - Freebsddynld: "/libexec/ld-elf.so.1", - Openbsddynld: "/usr/libexec/ld.so", - Netbsddynld: "/libexec/ld.elf_so", ---- a/src/cmd/link/internal/arm/obj.go -+++ b/src/cmd/link/internal/arm/obj.go -@@ -59,7 +59,7 @@ func Init() (*sys.Arch, ld.Arch) { - Machoreloc1: machoreloc1, - PEreloc1: pereloc1, - -- Linuxdynld: "/lib/ld-linux.so.3", // 2 for OABI, 3 for EABI -+ Linuxdynld: "/lib/ld-musl-armhf.so.1", - Freebsddynld: "/usr/libexec/ld-elf.so.1", - Openbsddynld: "/usr/libexec/ld.so", - Netbsddynld: "/libexec/ld.elf_so", ---- a/src/cmd/link/internal/arm64/obj.go -+++ b/src/cmd/link/internal/arm64/obj.go -@@ -57,7 +57,7 @@ func Init() (*sys.Arch, ld.Arch) { - Gentext: gentext, - Machoreloc1: machoreloc1, - -- Linuxdynld: "/lib/ld-linux-aarch64.so.1", -+ Linuxdynld: "/lib/ld-musl-aarch64.so.1", - - Freebsddynld: "/usr/libexec/ld-elf.so.1", - Openbsddynld: "/usr/libexec/ld.so", ---- a/src/cmd/link/internal/mips/obj.go -+++ b/src/cmd/link/internal/mips/obj.go -@@ -60,7 +60,7 @@ func Init() (*sys.Arch, ld.Arch) { - Gentext: gentext, - Machoreloc1: machoreloc1, - -- Linuxdynld: "/lib/ld.so.1", -+ Linuxdynld: "/lib/ld-musl-mipsle.so.1", - - Freebsddynld: "XXX", - Openbsddynld: "XXX", ---- a/src/cmd/link/internal/mips64/obj.go -+++ b/src/cmd/link/internal/mips64/obj.go -@@ -59,7 +59,7 @@ func Init() (*sys.Arch, ld.Arch) { - Gentext: gentext, - Machoreloc1: machoreloc1, - -- Linuxdynld: "/lib64/ld64.so.1", -+ Linuxdynld: "/lib64/ld-musl-mips64le.so.1", - Freebsddynld: "XXX", - Openbsddynld: "XXX", - Netbsddynld: "XXX", ---- a/src/cmd/link/internal/ppc64/obj.go -+++ b/src/cmd/link/internal/ppc64/obj.go -@@ -63,7 +63,7 @@ func Init() (*sys.Arch, ld.Arch) { - Xcoffreloc1: xcoffreloc1, - - // TODO(austin): ABI v1 uses /usr/lib/ld.so.1, -- Linuxdynld: "/lib64/ld64.so.1", -+ Linuxdynld: "/lib64/ld-musl-powerpc64le.so.1", - - Freebsddynld: "XXX", - Openbsddynld: "XXX", ---- a/src/cmd/link/internal/s390x/obj.go -+++ b/src/cmd/link/internal/s390x/obj.go -@@ -57,7 +57,7 @@ func Init() (*sys.Arch, ld.Arch) { - Gentext: gentext, - Machoreloc1: machoreloc1, - -- Linuxdynld: "/lib64/ld64.so.1", -+ Linuxdynld: "/lib64/ld-musl-s390x.so.1", - - // not relevant for s390x - Freebsddynld: "XXX", ---- a/src/cmd/link/internal/x86/obj.go -+++ b/src/cmd/link/internal/x86/obj.go -@@ -58,7 +58,7 @@ func Init() (*sys.Arch, ld.Arch) { - Machoreloc1: machoreloc1, - PEreloc1: pereloc1, - -- Linuxdynld: "/lib/ld-linux.so.2", -+ Linuxdynld: "/lib/ld-musl-i386.so.1", - Freebsddynld: "/usr/libexec/ld-elf.so.1", - Openbsddynld: "/usr/libexec/ld.so", - Netbsddynld: "/usr/libexec/ld.elf_so", diff --git a/poky/meta/recipes-devtools/go/go-1.15.inc b/poky/meta/recipes-devtools/go/go-1.15.inc new file mode 100644 index 000000000..97d748b92 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15.inc @@ -0,0 +1,20 @@ +require go-common.inc + +GO_BASEVERSION = "1.15" +GO_MINOR = ".2" +PV .= "${GO_MINOR}" +FILESEXTRAPATHS_prepend := "${FILE_DIRNAME}/go-${GO_BASEVERSION}:" + +LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" + +SRC_URI += "\ + file://0001-allow-CC-and-CXX-to-have-multiple-words.patch \ + file://0002-cmd-go-make-content-based-hash-generation-less-pedan.patch \ + file://0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch \ + file://0004-ld-add-soname-to-shareable-objects.patch \ + file://0005-make.bash-override-CC-when-building-dist-and-go_boot.patch \ + file://0006-cmd-dist-separate-host-and-target-builds.patch \ + file://0007-cmd-go-make-GOROOT-precious-by-default.patch \ + file://0008-use-GOBUILDMODE-to-set-buildmode.patch \ +" +SRC_URI[main.sha256sum] = "28bf9d0bcde251011caae230a4a05d917b172ea203f2a62f2c2f9533589d4b4d" diff --git a/poky/meta/recipes-devtools/go/go-1.15/0001-allow-CC-and-CXX-to-have-multiple-words.patch b/poky/meta/recipes-devtools/go/go-1.15/0001-allow-CC-and-CXX-to-have-multiple-words.patch new file mode 100644 index 000000000..5f4823be2 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0001-allow-CC-and-CXX-to-have-multiple-words.patch @@ -0,0 +1,33 @@ +From 9e3dc44cdfa58d96504d0a789dc82617dd5bef55 Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:01:13 +0430 +Subject: [PATCH 1/9] cmd/go: Allow CC and CXX to have multiple words + +Upstream-Status: Inappropriate [OE specific] + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Signed-off-by: Alexander J Kube + +--- + src/cmd/go/internal/envcmd/env.go | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/src/cmd/go/internal/envcmd/env.go ++++ b/src/cmd/go/internal/envcmd/env.go +@@ -103,11 +103,11 @@ func MkEnv() []cfg.EnvVar { + + cc := cfg.DefaultCC(cfg.Goos, cfg.Goarch) + if env := strings.Fields(cfg.Getenv("CC")); len(env) > 0 { +- cc = env[0] ++ cc = strings.Join(env, " ") + } + cxx := cfg.DefaultCXX(cfg.Goos, cfg.Goarch) + if env := strings.Fields(cfg.Getenv("CXX")); len(env) > 0 { +- cxx = env[0] ++ cxx = strings.Join(env, " ") + } + env = append(env, cfg.EnvVar{Name: "AR", Value: envOr("AR", "ar")}) + env = append(env, cfg.EnvVar{Name: "CC", Value: cc}) diff --git a/poky/meta/recipes-devtools/go/go-1.15/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch b/poky/meta/recipes-devtools/go/go-1.15/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch new file mode 100644 index 000000000..d0511c0c4 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0002-cmd-go-make-content-based-hash-generation-less-pedan.patch @@ -0,0 +1,219 @@ +From a13ae484e41139094505d2834437e9262a5315f7 Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:14:22 +0430 +Subject: [PATCH 2/9] cmd/go: make content-based hash generation less pedantic + +Upstream-Status: Inappropriate [OE specific] + +Go 1.10's build tool now uses content-based hashes to +determine when something should be built or re-built. +This same mechanism is used to maintain a built-artifact +cache for speeding up builds. + +However, the hashes it generates include information that +doesn't work well with OE, nor with using a shared runtime +library. + +First, it embeds path names to source files, unless +building within GOROOT. This prevents the building +of a package in GOPATH for later staging into GOROOT. + +This patch adds support for the environment variable +GOPATH_OMIT_IN_ACTIONID. If present, path name +embedding is disabled. + +Second, if cgo is enabled, the build ID for cgo-related +packages will include the current value of the environment +variables for invoking the compiler (CC, CXX, FC) and +any CGO_xxFLAGS variables. Only if the settings used +during a compilation exactly match, character for character, +the values used for compiling runtime/cgo or any other +cgo-enabled package being imported, will the tool +decide that the imported package is up-to-date. + +This is done to help ensure correctness, but is overly +simplistic and effectively prevents the reuse of built +artifacts that use cgo (or shared runtime, which includes +runtime/cgo). + +This patch filters out all compiler flags except those +beginning with '-m'. The default behavior can be restored +by setting the CGO_PEDANTIC environment variable. + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Signed-off-by: Alexander J Kube +--- + src/cmd/go/internal/envcmd/env.go | 2 +- + src/cmd/go/internal/work/exec.go | 66 ++++++++++++++++++++++--------- + 2 files changed, 49 insertions(+), 19 deletions(-) + +--- a/src/cmd/go/internal/envcmd/env.go ++++ b/src/cmd/go/internal/envcmd/env.go +@@ -157,7 +157,7 @@ func ExtraEnvVars() []cfg.EnvVar { + func ExtraEnvVarsCostly() []cfg.EnvVar { + var b work.Builder + b.Init() +- cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{}) ++ cppflags, cflags, cxxflags, fflags, ldflags, err := b.CFlags(&load.Package{}, false) + if err != nil { + // Should not happen - b.CFlags was given an empty package. + fmt.Fprintf(os.Stderr, "go: invalid cflags: %v\n", err) +--- a/src/cmd/go/internal/work/exec.go ++++ b/src/cmd/go/internal/work/exec.go +@@ -33,6 +33,8 @@ import ( + "cmd/go/internal/str" + ) + ++var omitGopath = os.Getenv("GOPATH_OMIT_IN_ACTIONID") != "" ++ + // actionList returns the list of actions in the dag rooted at root + // as visited in a depth-first post-order traversal. + func actionList(root *Action) []*Action { +@@ -209,7 +211,7 @@ func (b *Builder) buildActionID(a *Actio + // Assume b.WorkDir is being trimmed properly. + // When -trimpath is used with a package built from the module cache, + // use the module path and version instead of the directory. +- if !p.Goroot && !cfg.BuildTrimpath && !strings.HasPrefix(p.Dir, b.WorkDir) { ++ if !p.Goroot && !omitGopath && !cfg.BuildTrimpath && !strings.HasPrefix(p.Dir, b.WorkDir) { + fmt.Fprintf(h, "dir %s\n", p.Dir) + } else if cfg.BuildTrimpath && p.Module != nil { + fmt.Fprintf(h, "module %s@%s\n", p.Module.Path, p.Module.Version) +@@ -228,13 +230,13 @@ func (b *Builder) buildActionID(a *Actio + } + if len(p.CgoFiles)+len(p.SwigFiles) > 0 { + fmt.Fprintf(h, "cgo %q\n", b.toolID("cgo")) +- cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p) +- fmt.Fprintf(h, "CC=%q %q %q %q\n", b.ccExe(), cppflags, cflags, ldflags) ++ cppflags, cflags, cxxflags, fflags, ldflags, _ := b.CFlags(p, true) ++ fmt.Fprintf(h, "CC=%q %q %q %q\n", b.ccExe(true), cppflags, cflags, ldflags) + if len(p.CXXFiles)+len(p.SwigFiles) > 0 { +- fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(), cxxflags) ++ fmt.Fprintf(h, "CXX=%q %q\n", b.cxxExe(true), cxxflags) + } + if len(p.FFiles) > 0 { +- fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(), fflags) ++ fmt.Fprintf(h, "FC=%q %q\n", b.fcExe(true), fflags) + } + // TODO(rsc): Should we include the SWIG version or Fortran/GCC/G++/Objective-C compiler versions? + } +@@ -2298,33 +2300,48 @@ var ( + // gccCmd returns a gcc command line prefix + // defaultCC is defined in zdefaultcc.go, written by cmd/dist. + func (b *Builder) GccCmd(incdir, workdir string) []string { +- return b.compilerCmd(b.ccExe(), incdir, workdir) ++ return b.compilerCmd(b.ccExe(false), incdir, workdir) + } + + // gxxCmd returns a g++ command line prefix + // defaultCXX is defined in zdefaultcc.go, written by cmd/dist. + func (b *Builder) GxxCmd(incdir, workdir string) []string { +- return b.compilerCmd(b.cxxExe(), incdir, workdir) ++ return b.compilerCmd(b.cxxExe(false), incdir, workdir) + } + + // gfortranCmd returns a gfortran command line prefix. + func (b *Builder) gfortranCmd(incdir, workdir string) []string { +- return b.compilerCmd(b.fcExe(), incdir, workdir) ++ return b.compilerCmd(b.fcExe(false), incdir, workdir) + } + + // ccExe returns the CC compiler setting without all the extra flags we add implicitly. +-func (b *Builder) ccExe() []string { +- return b.compilerExe(origCC, cfg.DefaultCC(cfg.Goos, cfg.Goarch)) ++func (b *Builder) ccExe(filtered bool) []string { ++ return b.compilerExe(origCC, cfg.DefaultCC(cfg.Goos, cfg.Goarch), filtered) + } + + // cxxExe returns the CXX compiler setting without all the extra flags we add implicitly. +-func (b *Builder) cxxExe() []string { +- return b.compilerExe(origCXX, cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) ++func (b *Builder) cxxExe(filtered bool) []string { ++ return b.compilerExe(origCXX, cfg.DefaultCXX(cfg.Goos, cfg.Goarch), filtered) + } + + // fcExe returns the FC compiler setting without all the extra flags we add implicitly. +-func (b *Builder) fcExe() []string { +- return b.compilerExe(cfg.Getenv("FC"), "gfortran") ++func (b *Builder) fcExe(filtered bool) []string { ++ return b.compilerExe(os.Getenv("FC"), "gfortran", filtered) ++} ++ ++var filterFlags = os.Getenv("CGO_PEDANTIC") == "" ++ ++func filterCompilerFlags(flags []string) []string { ++ var newflags []string ++ if !filterFlags { ++ return flags ++ } ++ for _, flag := range flags { ++ if strings.HasPrefix(flag, "-m") { ++ newflags = append(newflags, flag) ++ } ++ } ++ return newflags + } + + // compilerExe returns the compiler to use given an +@@ -2333,11 +2350,16 @@ func (b *Builder) fcExe() []string { + // of the compiler but can have additional arguments if they + // were present in the environment value. + // For example if CC="gcc -DGOPHER" then the result is ["gcc", "-DGOPHER"]. +-func (b *Builder) compilerExe(envValue string, def string) []string { ++func (b *Builder) compilerExe(envValue string, def string, filtered bool) []string { + compiler := strings.Fields(envValue) + if len(compiler) == 0 { + compiler = []string{def} + } ++ ++ if filtered { ++ return append(compiler[0:1], filterCompilerFlags(compiler[1:])...) ++ } ++ + return compiler + } + +@@ -2510,7 +2532,7 @@ func envList(key, def string) []string { + } + + // CFlags returns the flags to use when invoking the C, C++ or Fortran compilers, or cgo. +-func (b *Builder) CFlags(p *load.Package) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) { ++func (b *Builder) CFlags(p *load.Package, filtered bool) (cppflags, cflags, cxxflags, fflags, ldflags []string, err error) { + defaults := "-g -O2" + + if cppflags, err = buildFlags("CPPFLAGS", "", p.CgoCPPFLAGS, checkCompilerFlags); err != nil { +@@ -2529,6 +2551,14 @@ func (b *Builder) CFlags(p *load.Package + return + } + ++ if filtered { ++ cppflags = filterCompilerFlags(cppflags) ++ cflags = filterCompilerFlags(cflags) ++ cxxflags = filterCompilerFlags(cxxflags) ++ fflags = filterCompilerFlags(fflags) ++ ldflags = filterCompilerFlags(ldflags) ++ } ++ + return + } + +@@ -2543,7 +2573,7 @@ var cgoRe = lazyregexp.New(`[/\\:]`) + + func (b *Builder) cgo(a *Action, cgoExe, objdir string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles, ffiles []string) (outGo, outObj []string, err error) { + p := a.Package +- cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p) ++ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoFFLAGS, cgoLDFLAGS, err := b.CFlags(p, false) + if err != nil { + return nil, nil, err + } +@@ -2902,7 +2932,7 @@ func (b *Builder) swigIntSize(objdir str + + // Run SWIG on one SWIG input file. + func (b *Builder) swigOne(a *Action, p *load.Package, file, objdir string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) { +- cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p) ++ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _, _, err := b.CFlags(p, false) + if err != nil { + return "", "", err + } diff --git a/poky/meta/recipes-devtools/go/go-1.15/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch b/poky/meta/recipes-devtools/go/go-1.15/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch new file mode 100644 index 000000000..662c70547 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0003-allow-GOTOOLDIR-to-be-overridden-in-the-environment.patch @@ -0,0 +1,47 @@ +From 28ada8896b76d620240bafc22aa395071d601482 Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:15:37 +0430 +Subject: [PATCH 3/9] cmd/go: Allow GOTOOLDIR to be overridden in the environment + +to allow for split host/target build roots + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Upstream-Status: Inappropriate [OE specific] + +Signed-off-by: Alexander J Kube +--- + src/cmd/dist/build.go | 4 +++- + src/cmd/go/internal/cfg/cfg.go | 6 +++++- + 2 files changed, 8 insertions(+), 2 deletions(-) + +--- a/src/cmd/dist/build.go ++++ b/src/cmd/dist/build.go +@@ -246,7 +246,9 @@ func xinit() { + workdir = xworkdir() + xatexit(rmworkdir) + +- tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) ++ if tooldir = os.Getenv("GOTOOLDIR"); tooldir == "" { ++ tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) ++ } + } + + // compilerEnv returns a map from "goos/goarch" to the +--- a/src/cmd/go/internal/cfg/cfg.go ++++ b/src/cmd/go/internal/cfg/cfg.go +@@ -64,7 +64,11 @@ func defaultContext() build.Context { + // variables. This matches the initialization of ToolDir in + // go/build, except for using ctxt.GOROOT rather than + // runtime.GOROOT. +- build.ToolDir = filepath.Join(ctxt.GOROOT, "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) ++ if s := os.Getenv("GOTOOLDIR"); s != "" { ++ build.ToolDir = filepath.Clean(s) ++ } else { ++ build.ToolDir = filepath.Join(ctxt.GOROOT, "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) ++ } + } + + ctxt.GOPATH = envOr("GOPATH", ctxt.GOPATH) diff --git a/poky/meta/recipes-devtools/go/go-1.15/0004-ld-add-soname-to-shareable-objects.patch b/poky/meta/recipes-devtools/go/go-1.15/0004-ld-add-soname-to-shareable-objects.patch new file mode 100644 index 000000000..da2992392 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0004-ld-add-soname-to-shareable-objects.patch @@ -0,0 +1,45 @@ +From bf5cf5301ae5914498454c87293d1df2e1d8489f Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:16:32 +0430 +Subject: [PATCH 4/9] ld: add soname to shareable objects + +so that OE's shared library dependency handling +can find them. + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Upstream-Status: Inappropriate [OE specific] + +Signed-off-by: Alexander J Kube +--- + src/cmd/link/internal/ld/lib.go | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/src/cmd/link/internal/ld/lib.go ++++ b/src/cmd/link/internal/ld/lib.go +@@ -1446,6 +1446,7 @@ func (ctxt *Link) hostlink() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") ++ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) + if ctxt.HeadType != objabi.Hwindows { + // Pass -z nodelete to mark the shared library as + // non-closeable: a dlclose will do nothing. +@@ -1457,6 +1458,7 @@ func (ctxt *Link) hostlink() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") ++ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) + case BuildModePlugin: + if ctxt.HeadType == objabi.Hdarwin { + argv = append(argv, "-dynamiclib") +@@ -1465,6 +1467,7 @@ func (ctxt *Link) hostlink() { + argv = append(argv, "-Wl,-z,relro") + } + argv = append(argv, "-shared") ++ argv = append(argv, fmt.Sprintf("-Wl,-soname,%s", filepath.Base(*flagOutfile))) + } + } + diff --git a/poky/meta/recipes-devtools/go/go-1.15/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch b/poky/meta/recipes-devtools/go/go-1.15/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch new file mode 100644 index 000000000..59c12d954 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0005-make.bash-override-CC-when-building-dist-and-go_boot.patch @@ -0,0 +1,39 @@ +From f05ef3ded52b98537c10efd0b15cd9612471524d Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:17:16 +0430 +Subject: [PATCH 5/9] make.bash: override CC when building dist and + go_bootstrap + +for handling OE cross-canadian builds. + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Upstream-Status: Inappropriate [OE specific] + +Signed-off-by: Alexander J Kube +--- + src/make.bash | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/src/make.bash ++++ b/src/make.bash +@@ -178,7 +178,7 @@ if [ "$GOROOT_BOOTSTRAP" = "$GOROOT" ]; + exit 1 + fi + rm -f cmd/dist/dist +-GOROOT="$GOROOT_BOOTSTRAP" GOOS="" GOARCH="" GO111MODULE=off "$GOROOT_BOOTSTRAP/bin/go" build -o cmd/dist/dist ./cmd/dist ++CC="${BUILD_CC:-${CC}}" GOROOT="$GOROOT_BOOTSTRAP" GOOS="" GOARCH="" GO111MODULE=off "$GOROOT_BOOTSTRAP/bin/go" build -o cmd/dist/dist ./cmd/dist + + # -e doesn't propagate out of eval, so check success by hand. + eval $(./cmd/dist/dist env -p || echo FAIL=true) +@@ -209,7 +209,7 @@ fi + # Run dist bootstrap to complete make.bash. + # Bootstrap installs a proper cmd/dist, built with the new toolchain. + # Throw ours, built with Go 1.4, away after bootstrap. +-./cmd/dist/dist bootstrap $buildall $vflag $GO_DISTFLAGS "$@" ++CC="${BUILD_CC:-${CC}}" ./cmd/dist/dist bootstrap $buildall $vflag $GO_DISTFLAGS "$@" + rm -f ./cmd/dist/dist + + # DO NOT ADD ANY NEW CODE HERE. diff --git a/poky/meta/recipes-devtools/go/go-1.15/0006-cmd-dist-separate-host-and-target-builds.patch b/poky/meta/recipes-devtools/go/go-1.15/0006-cmd-dist-separate-host-and-target-builds.patch new file mode 100644 index 000000000..7aee0bac4 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0006-cmd-dist-separate-host-and-target-builds.patch @@ -0,0 +1,274 @@ +From 10735bb84df17ba657f76835f483cd8543a879c1 Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:18:12 +0430 +Subject: [PATCH 6/9] cmd/dist: separate host and target builds + +Upstream-Status: Inappropriate [OE specific] + +Change the dist tool to allow for OE-style cross- +and cross-canadian builds: + + - command flags --host-only and --target only are added; + if one is present, the other changes mentioned below + take effect, and arguments may also be specified on + the command line to enumerate the package(s) to be + built. + + - for OE cross builds, go_bootstrap is always built for + the current build host, and is moved, along with the supporting + toolchain (asm, compile, etc.) to a separate 'native_native' + directory under GOROOT/pkg/tool. + + - go_bootstrap is not automatically removed after the build, + so it can be reused later (e.g., building both static and + shared runtime). + +Note that for --host-only builds, it would be nice to specify +just the "cmd" package to build only the go commands/tools, +the staleness checks in the dist tool will fail if the "std" +library has not also been built. So host-only builds have to +build everything anyway. + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Signed-off-by: Alexander J Kube +--- + src/cmd/dist/build.go | 155 ++++++++++++++++++++++++++++++------------ + 1 file changed, 112 insertions(+), 43 deletions(-) + +--- a/src/cmd/dist/build.go ++++ b/src/cmd/dist/build.go +@@ -41,6 +41,7 @@ var ( + goldflags string + workdir string + tooldir string ++ build_tooldir string + oldgoos string + oldgoarch string + exe string +@@ -53,6 +54,7 @@ var ( + + rebuildall bool + defaultclang bool ++ crossBuild bool + + vflag int // verbosity + ) +@@ -249,6 +251,8 @@ func xinit() { + if tooldir = os.Getenv("GOTOOLDIR"); tooldir == "" { + tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch) + } ++ ++ build_tooldir = pathf("%s/pkg/tool/native_native", goroot) + } + + // compilerEnv returns a map from "goos/goarch" to the +@@ -480,8 +484,10 @@ func setup() { + p := pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch) + if rebuildall { + xremoveall(p) ++ xremoveall(build_tooldir) + } + xmkdirall(p) ++ xmkdirall(build_tooldir) + + if goos != gohostos || goarch != gohostarch { + p := pathf("%s/pkg/%s_%s", goroot, goos, goarch) +@@ -1244,12 +1250,29 @@ func cmdbootstrap() { + + var noBanner bool + var debug bool ++ var hostOnly bool ++ var targetOnly bool ++ var toBuild = []string{"std", "cmd"} ++ + flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all") + flag.BoolVar(&debug, "d", debug, "enable debugging of bootstrap process") + flag.BoolVar(&noBanner, "no-banner", noBanner, "do not print banner") ++ flag.BoolVar(&hostOnly, "host-only", hostOnly, "build only host binaries, not target") ++ flag.BoolVar(&targetOnly, "target-only", targetOnly, "build only target binaries, not host") + +- xflagparse(0) ++ xflagparse(-1) + ++ if hostOnly && targetOnly { ++ fatalf("specify only one of --host-only or --target-only\n") ++ } ++ crossBuild = hostOnly || targetOnly ++ if flag.NArg() > 0 { ++ if crossBuild { ++ toBuild = flag.Args() ++ } else { ++ fatalf("package names not permitted without --host-only or --target-only\n") ++ } ++ } + // Set GOPATH to an internal directory. We shouldn't actually + // need to store files here, since the toolchain won't + // depend on modules outside of vendor directories, but if +@@ -1303,8 +1326,13 @@ func cmdbootstrap() { + xprintf("\n") + } + +- gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now +- goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now ++ // For split host/target cross/cross-canadian builds, we don't ++ // want to be setting these flags until after we have compiled ++ // the toolchain that runs on the build host. ++ if !crossBuild { ++ gogcflags = os.Getenv("GO_GCFLAGS") // we were using $BOOT_GO_GCFLAGS until now ++ goldflags = os.Getenv("GO_LDFLAGS") // we were using $BOOT_GO_LDFLAGS until now ++ } + goBootstrap := pathf("%s/go_bootstrap", tooldir) + cmdGo := pathf("%s/go", gobin) + if debug { +@@ -1333,7 +1361,11 @@ func cmdbootstrap() { + xprintf("\n") + } + xprintf("Building Go toolchain2 using go_bootstrap and Go toolchain1.\n") +- os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) ++ if crossBuild { ++ os.Setenv("CC", defaultcc[""]) ++ } else { ++ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) ++ } + goInstall(goBootstrap, append([]string{"-i"}, toolchain...)...) + if debug { + run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") +@@ -1370,50 +1402,84 @@ func cmdbootstrap() { + } + checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) + +- if goos == oldgoos && goarch == oldgoarch { +- // Common case - not setting up for cross-compilation. +- timelog("build", "toolchain") +- if vflag > 0 { +- xprintf("\n") ++ if crossBuild { ++ gogcflags = os.Getenv("GO_GCFLAGS") ++ goldflags = os.Getenv("GO_LDFLAGS") ++ tool_files, _ := filepath.Glob(pathf("%s/*", tooldir)) ++ for _, f := range tool_files { ++ copyfile(pathf("%s/%s", build_tooldir, filepath.Base(f)), f, writeExec) ++ xremove(f) ++ } ++ os.Setenv("GOTOOLDIR", build_tooldir) ++ goBootstrap = pathf("%s/go_bootstrap", build_tooldir) ++ if hostOnly { ++ timelog("build", "host toolchain") ++ if vflag > 0 { ++ xprintf("\n") ++ } ++ xprintf("Building %s for host, %s/%s.\n", strings.Join(toBuild, ","), goos, goarch) ++ goInstall(goBootstrap, toBuild...) ++ checkNotStale(goBootstrap, toBuild...) ++ // Skip cmdGo staleness checks here, since we can't necessarily run the cmdGo binary ++ ++ timelog("build", "target toolchain") ++ if vflag > 0 { ++ xprintf("\n") ++ } ++ } else if targetOnly { ++ goos = oldgoos ++ goarch = oldgoarch ++ os.Setenv("GOOS", goos) ++ os.Setenv("GOARCH", goarch) ++ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) ++ xprintf("Building %s for target, %s/%s.\n", strings.Join(toBuild, ","), goos, goarch) ++ goInstall(goBootstrap, toBuild...) ++ checkNotStale(goBootstrap, toBuild...) ++ // Skip cmdGo staleness checks here, since we can't run the target's cmdGo binary + } +- xprintf("Building packages and commands for %s/%s.\n", goos, goarch) + } else { +- // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. +- // Finish GOHOSTOS/GOHOSTARCH installation and then +- // run GOOS/GOARCH installation. +- timelog("build", "host toolchain") +- if vflag > 0 { +- xprintf("\n") ++ ++ if goos == oldgoos && goarch == oldgoarch { ++ // Common case - not setting up for cross-compilation. ++ timelog("build", "toolchain") ++ if vflag > 0 { ++ xprintf("\n") ++ } ++ xprintf("Building packages and commands for %s/%s.\n", goos, goarch) ++ } else { ++ // GOOS/GOARCH does not match GOHOSTOS/GOHOSTARCH. ++ // Finish GOHOSTOS/GOHOSTARCH installation and then ++ // run GOOS/GOARCH installation. ++ timelog("build", "host toolchain") ++ if vflag > 0 { ++ xprintf("\n") ++ } ++ xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch) ++ goInstall(goBootstrap, "std", "cmd") ++ checkNotStale(goBootstrap, "std", "cmd") ++ checkNotStale(cmdGo, "std", "cmd") ++ ++ timelog("build", "target toolchain") ++ if vflag > 0 { ++ xprintf("\n") ++ } ++ goos = oldgoos ++ goarch = oldgoarch ++ os.Setenv("GOOS", goos) ++ os.Setenv("GOARCH", goarch) ++ os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) ++ xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) + } +- xprintf("Building packages and commands for host, %s/%s.\n", goos, goarch) + goInstall(goBootstrap, "std", "cmd") + checkNotStale(goBootstrap, "std", "cmd") + checkNotStale(cmdGo, "std", "cmd") + +- timelog("build", "target toolchain") +- if vflag > 0 { +- xprintf("\n") +- } +- goos = oldgoos +- goarch = oldgoarch +- os.Setenv("GOOS", goos) +- os.Setenv("GOARCH", goarch) +- os.Setenv("CC", compilerEnvLookup(defaultcc, goos, goarch)) +- xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) +- } +- targets := []string{"std", "cmd"} +- if goos == "js" && goarch == "wasm" { +- // Skip the cmd tools for js/wasm. They're not usable. +- targets = targets[:1] +- } +- goInstall(goBootstrap, targets...) +- checkNotStale(goBootstrap, targets...) +- checkNotStale(cmdGo, targets...) +- if debug { +- run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") +- run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) +- checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) +- copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) ++ if debug { ++ run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") ++ run("", ShowOutput|CheckExit, pathf("%s/buildid", tooldir), pathf("%s/pkg/%s_%s/runtime/internal/sys.a", goroot, goos, goarch)) ++ checkNotStale(goBootstrap, append(toolchain, "runtime/internal/sys")...) ++ copyfile(pathf("%s/compile4", tooldir), pathf("%s/compile", tooldir), writeExec) ++ } + } + + // Check that there are no new files in $GOROOT/bin other than +@@ -1430,8 +1496,11 @@ func cmdbootstrap() { + } + } + +- // Remove go_bootstrap now that we're done. +- xremove(pathf("%s/go_bootstrap", tooldir)) ++ // Except that for split host/target cross-builds, we need to ++ // keep it. ++ if !crossBuild { ++ xremove(pathf("%s/go_bootstrap", tooldir)) ++ } + + if goos == "android" { + // Make sure the exec wrapper will sync a fresh $GOROOT to the device. diff --git a/poky/meta/recipes-devtools/go/go-1.15/0007-cmd-go-make-GOROOT-precious-by-default.patch b/poky/meta/recipes-devtools/go/go-1.15/0007-cmd-go-make-GOROOT-precious-by-default.patch new file mode 100644 index 000000000..4b4d0d4f3 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0007-cmd-go-make-GOROOT-precious-by-default.patch @@ -0,0 +1,104 @@ +From 9ba507e076c744f4d394418e4a849e68cd426a4a Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:18:56 +0430 +Subject: [PATCH 7/9] cmd/go: make GOROOT precious by default + +Upstream-Status: Inappropriate [OE specific] + +The go build tool normally rebuilds whatever it detects is +stale. This can be a problem when GOROOT is intended to +be read-only and the go runtime has been built as a shared +library, since we don't want every application to be rebuilding +the shared runtime - particularly in cross-build/packaging +setups, since that would lead to 'abi mismatch' runtime errors. + +This patch prevents the install and linkshared actions from +installing to GOROOT unless overridden with the GOROOT_OVERRIDE +environment variable. + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Matt Madison . + +Signed-off-by: Alexander J Kube +--- + src/cmd/go/internal/work/action.go | 3 +++ + src/cmd/go/internal/work/build.go | 6 ++++++ + src/cmd/go/internal/work/exec.go | 25 +++++++++++++++++++++++++ + 3 files changed, 34 insertions(+) + +--- a/src/cmd/go/internal/work/action.go ++++ b/src/cmd/go/internal/work/action.go +@@ -670,6 +670,9 @@ func (b *Builder) addTransitiveLinkDeps( + if p1 == nil || p1.Shlib == "" || haveShlib[filepath.Base(p1.Shlib)] { + continue + } ++ if goRootPrecious && (p1.Standard || p1.Goroot) { ++ continue ++ } + haveShlib[filepath.Base(p1.Shlib)] = true + // TODO(rsc): The use of ModeInstall here is suspect, but if we only do ModeBuild, + // we'll end up building an overall library or executable that depends at runtime +--- a/src/cmd/go/internal/work/build.go ++++ b/src/cmd/go/internal/work/build.go +@@ -167,6 +167,8 @@ See also: go install, go get, go clean. + + const concurrentGCBackendCompilationEnabledByDefault = true + ++var goRootPrecious bool = true ++ + func init() { + // break init cycle + CmdBuild.Run = runBuild +@@ -179,6 +181,10 @@ func init() { + + AddBuildFlags(CmdBuild, DefaultBuildFlags) + AddBuildFlags(CmdInstall, DefaultBuildFlags) ++ ++ if x := os.Getenv("GOROOT_OVERRIDE"); x != "" { ++ goRootPrecious = false ++ } + } + + // Note that flags consulted by other parts of the code +--- a/src/cmd/go/internal/work/exec.go ++++ b/src/cmd/go/internal/work/exec.go +@@ -468,6 +468,23 @@ func (b *Builder) build(a *Action) (err + return errors.New("binary-only packages are no longer supported") + } + ++ if goRootPrecious && (a.Package.Standard || a.Package.Goroot) { ++ _, err := os.Stat(a.Package.Target) ++ if err == nil { ++ a.built = a.Package.Target ++ a.Target = a.Package.Target ++ a.buildID = b.fileHash(a.Package.Target) ++ a.Package.Stale = false ++ a.Package.StaleReason = "GOROOT-resident package" ++ return nil ++ } ++ a.Package.Stale = true ++ a.Package.StaleReason = "missing or invalid GOROOT-resident package" ++ if b.IsCmdList { ++ return nil ++ } ++ } ++ + if err := b.Mkdir(a.Objdir); err != nil { + return err + } +@@ -1520,6 +1537,14 @@ func BuildInstallFunc(b *Builder, a *Act + return err + } + ++ if goRootPrecious && a.Package != nil { ++ p := a.Package ++ if p.Standard || p.Goroot { ++ err := fmt.Errorf("attempting to install package %s into read-only GOROOT", p.ImportPath) ++ return err ++ } ++ } ++ + if err := b.Mkdir(a.Objdir); err != nil { + return err + } diff --git a/poky/meta/recipes-devtools/go/go-1.15/0008-use-GOBUILDMODE-to-set-buildmode.patch b/poky/meta/recipes-devtools/go/go-1.15/0008-use-GOBUILDMODE-to-set-buildmode.patch new file mode 100644 index 000000000..4e5d5021d --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-1.15/0008-use-GOBUILDMODE-to-set-buildmode.patch @@ -0,0 +1,42 @@ +From 971b5626339ce0c4d57f9721c9a81af566c5a044 Mon Sep 17 00:00:00 2001 +From: Alex Kube +Date: Wed, 23 Oct 2019 21:19:26 +0430 +Subject: [PATCH 8/9] cmd/go: Use GOBUILDMODE to set buildmode + +Upstream-Status: Denied [upstream choose antoher solution: `17a256b +cmd/go: -buildmode=pie for android/arm'] + +While building go itself, the go build system does not support +to set `-buildmode=pie' from environment. + +Add GOBUILDMODE to support it which make PIE executables the default +build mode, as PIE executables are required as of Yocto + +Refers: https://groups.google.com/forum/#!topic/golang-dev/gRCe5URKewI + +Adapted to Go 1.13 from patches originally submitted to +the meta/recipes-devtools/go tree by +Hongxu Jia + +Signed-off-by: Alexander J Kube +--- + src/cmd/go/internal/work/build.go | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +--- a/src/cmd/go/internal/work/build.go ++++ b/src/cmd/go/internal/work/build.go +@@ -254,7 +254,13 @@ func AddBuildFlags(cmd *base.Command, ma + + cmd.Flag.Var(&load.BuildAsmflags, "asmflags", "") + cmd.Flag.Var(buildCompiler{}, "compiler", "") +- cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "") ++ ++ if bm := os.Getenv("GOBUILDMODE"); bm != "" { ++ cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", bm, "") ++ } else { ++ cmd.Flag.StringVar(&cfg.BuildBuildmode, "buildmode", "default", "") ++ } ++ + cmd.Flag.Var(&load.BuildGcflags, "gcflags", "") + cmd.Flag.Var(&load.BuildGccgoflags, "gccgoflags", "") + if mask&OmitModFlag == 0 { diff --git a/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb b/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb deleted file mode 100644 index 3452ca46d..000000000 --- a/poky/meta/recipes-devtools/go/go-binary-native_1.14.7.bb +++ /dev/null @@ -1,46 +0,0 @@ -# This recipe is for bootstrapping our go-cross from a prebuilt binary of Go from golang.org. - -SUMMARY = "Go programming language compiler (upstream binary for bootstrap)" -HOMEPAGE = " http://golang.org/" -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" - -PROVIDES = "go-native" - -SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz;name=go_${BUILD_GOTUPLE}" -SRC_URI[go_linux_amd64.sha256sum] = "4a7fa60f323ee1416a4b1425aefc37ea359e9d64df19c326a58953a97ad41ea5" -SRC_URI[go_linux_arm64.sha256sum] = "fe5b6f6e441f3cb7b53ebf1a010bbebcb720ac98124984cfe2e51d72b8a58c71" - -UPSTREAM_CHECK_URI = "https://golang.org/dl/" -UPSTREAM_CHECK_REGEX = "go(?P\d+(\.\d+)+)\.linux" - -S = "${WORKDIR}/go" - -inherit goarch native - -do_compile() { - : -} - -make_wrapper() { - rm -f ${D}${bindir}/$1 - cat <${D}${bindir}/$1 -#!/bin/bash -here=\`dirname \$0\` -export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" -\$here/../lib/go/bin/$1 "\$@" -END - chmod +x ${D}${bindir}/$1 -} - -do_install() { - find ${S} -depth -type d -name testdata -exec rm -rf {} + - - install -d ${D}${bindir} ${D}${libdir}/go - cp --preserve=mode,timestamps -R ${S}/ ${D}${libdir}/ - - for f in ${S}/bin/* - do - make_wrapper `basename $f` - done -} diff --git a/poky/meta/recipes-devtools/go/go-binary-native_1.15.2.bb b/poky/meta/recipes-devtools/go/go-binary-native_1.15.2.bb new file mode 100644 index 000000000..ccd2d5eba --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-binary-native_1.15.2.bb @@ -0,0 +1,46 @@ +# This recipe is for bootstrapping our go-cross from a prebuilt binary of Go from golang.org. + +SUMMARY = "Go programming language compiler (upstream binary for bootstrap)" +HOMEPAGE = " http://golang.org/" +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=5d4950ecb7b26d2c5e4e7b4e0dd74707" + +PROVIDES = "go-native" + +SRC_URI = "https://dl.google.com/go/go${PV}.${BUILD_GOOS}-${BUILD_GOARCH}.tar.gz;name=go_${BUILD_GOTUPLE}" +SRC_URI[go_linux_amd64.sha256sum] = "b49fda1ca29a1946d6bb2a5a6982cf07ccd2aba849289508ee0f9918f6bb4552" +SRC_URI[go_linux_arm64.sha256sum] = "c8ec460cc82d61604b048f9439c06bd591722efce5cd48f49e19b5f6226bd36d" + +UPSTREAM_CHECK_URI = "https://golang.org/dl/" +UPSTREAM_CHECK_REGEX = "go(?P\d+(\.\d+)+)\.linux" + +S = "${WORKDIR}/go" + +inherit goarch native + +do_compile() { + : +} + +make_wrapper() { + rm -f ${D}${bindir}/$1 + cat <${D}${bindir}/$1 +#!/bin/bash +here=\`dirname \$0\` +export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" +\$here/../lib/go/bin/$1 "\$@" +END + chmod +x ${D}${bindir}/$1 +} + +do_install() { + find ${S} -depth -type d -name testdata -exec rm -rf {} + + + install -d ${D}${bindir} ${D}${libdir}/go + cp --preserve=mode,timestamps -R ${S}/ ${D}${libdir}/ + + for f in ${S}/bin/* + do + make_wrapper `basename $f` + done +} diff --git a/poky/meta/recipes-devtools/go/go-cross-canadian_1.14.bb b/poky/meta/recipes-devtools/go/go-cross-canadian_1.14.bb deleted file mode 100644 index 7ac9449e4..000000000 --- a/poky/meta/recipes-devtools/go/go-cross-canadian_1.14.bb +++ /dev/null @@ -1,2 +0,0 @@ -require go-cross-canadian.inc -require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-cross-canadian_1.15.bb b/poky/meta/recipes-devtools/go/go-cross-canadian_1.15.bb new file mode 100644 index 000000000..7ac9449e4 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-cross-canadian_1.15.bb @@ -0,0 +1,2 @@ +require go-cross-canadian.inc +require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-cross_1.14.bb b/poky/meta/recipes-devtools/go/go-cross_1.14.bb deleted file mode 100644 index 80b5a03f6..000000000 --- a/poky/meta/recipes-devtools/go/go-cross_1.14.bb +++ /dev/null @@ -1,2 +0,0 @@ -require go-cross.inc -require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-cross_1.15.bb b/poky/meta/recipes-devtools/go/go-cross_1.15.bb new file mode 100644 index 000000000..80b5a03f6 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-cross_1.15.bb @@ -0,0 +1,2 @@ +require go-cross.inc +require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-crosssdk_1.14.bb b/poky/meta/recipes-devtools/go/go-crosssdk_1.14.bb deleted file mode 100644 index 1857c8a57..000000000 --- a/poky/meta/recipes-devtools/go/go-crosssdk_1.14.bb +++ /dev/null @@ -1,2 +0,0 @@ -require go-crosssdk.inc -require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-crosssdk_1.15.bb b/poky/meta/recipes-devtools/go/go-crosssdk_1.15.bb new file mode 100644 index 000000000..1857c8a57 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-crosssdk_1.15.bb @@ -0,0 +1,2 @@ +require go-crosssdk.inc +require go-${PV}.inc diff --git a/poky/meta/recipes-devtools/go/go-native_1.14.bb b/poky/meta/recipes-devtools/go/go-native_1.14.bb deleted file mode 100644 index c5cb97e73..000000000 --- a/poky/meta/recipes-devtools/go/go-native_1.14.bb +++ /dev/null @@ -1,60 +0,0 @@ -# This recipe builds a native Go (written in Go) by first building an old Go 1.4 -# (written in C). However this old Go does not support all hosts platforms. - -require go-${PV}.inc - -inherit native - -SRC_URI_append = " https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz;name=bootstrap;subdir=go1.4" -SRC_URI[bootstrap.md5sum] = "dbf727a4b0e365bf88d97cbfde590016" -SRC_URI[bootstrap.sha256sum] = "f4ff5b5eb3a3cae1c993723f3eab519c5bae18866b5e5f96fe1102f0cb5c3e52" - -export GOOS = "${BUILD_GOOS}" -export GOARCH = "${BUILD_GOARCH}" -CC = "${@d.getVar('BUILD_CC').strip()}" - -GOMAKEARGS ?= "--no-banner" - -do_configure() { - cd ${WORKDIR}/go1.4/go/src - CGO_ENABLED=0 GOROOT=${WORKDIR}/go1.4/go ./make.bash -} - -do_compile() { - export GOROOT_FINAL="${libdir_native}/go" - export GOROOT_BOOTSTRAP="${WORKDIR}/go1.4/go" - - cd src - ./make.bash ${GOMAKEARGS} - cd ${B} -} -do_compile[dirs] =+ "${GOTMPDIR} ${B}/bin" -do_compile[cleandirs] += "${GOTMPDIR} ${B}/bin" - -make_wrapper() { - rm -f ${D}${bindir}/$2$3 - cat <${D}${bindir}/$2$3 -#!/bin/bash -here=\`dirname \$0\` -export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" -\$here/../lib/go/bin/$1 "\$@" -END - chmod +x ${D}${bindir}/$2 -} - -do_install() { - install -d ${D}${libdir}/go - cp --preserve=mode,timestamps -R ${B}/pkg ${D}${libdir}/go/ - install -d ${D}${libdir}/go/src - (cd ${S}/src; for d in *; do \ - [ -d $d ] && cp -a ${S}/src/$d ${D}${libdir}/go/src/; \ - done) - find ${D}${libdir}/go/src -depth -type d -name testdata -exec rm -rf {} \; - install -d ${D}${bindir} ${D}${libdir}/go/bin - for f in ${B}/bin/* - do - base=`basename $f` - install -m755 $f ${D}${libdir}/go/bin - make_wrapper $base $base - done -} diff --git a/poky/meta/recipes-devtools/go/go-native_1.15.bb b/poky/meta/recipes-devtools/go/go-native_1.15.bb new file mode 100644 index 000000000..f14892cdb --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-native_1.15.bb @@ -0,0 +1,59 @@ +# This recipe builds a native Go (written in Go) by first building an old Go 1.4 +# (written in C). However this old Go does not support all hosts platforms. + +require go-${PV}.inc + +inherit native + +SRC_URI_append = " https://dl.google.com/go/go1.4-bootstrap-20171003.tar.gz;name=bootstrap;subdir=go1.4" +SRC_URI[bootstrap.sha256sum] = "f4ff5b5eb3a3cae1c993723f3eab519c5bae18866b5e5f96fe1102f0cb5c3e52" + +export GOOS = "${BUILD_GOOS}" +export GOARCH = "${BUILD_GOARCH}" +CC = "${@d.getVar('BUILD_CC').strip()}" + +GOMAKEARGS ?= "--no-banner" + +do_configure() { + cd ${WORKDIR}/go1.4/go/src + CGO_ENABLED=0 GOROOT=${WORKDIR}/go1.4/go ./make.bash +} + +do_compile() { + export GOROOT_FINAL="${libdir_native}/go" + export GOROOT_BOOTSTRAP="${WORKDIR}/go1.4/go" + + cd src + ./make.bash ${GOMAKEARGS} + cd ${B} +} +do_compile[dirs] =+ "${GOTMPDIR} ${B}/bin" +do_compile[cleandirs] += "${GOTMPDIR} ${B}/bin" + +make_wrapper() { + rm -f ${D}${bindir}/$2$3 + cat <${D}${bindir}/$2$3 +#!/bin/bash +here=\`dirname \$0\` +export GOROOT="${GOROOT:-\`readlink -f \$here/../lib/go\`}" +\$here/../lib/go/bin/$1 "\$@" +END + chmod +x ${D}${bindir}/$2 +} + +do_install() { + install -d ${D}${libdir}/go + cp --preserve=mode,timestamps -R ${B}/pkg ${D}${libdir}/go/ + install -d ${D}${libdir}/go/src + (cd ${S}/src; for d in *; do \ + [ -d $d ] && cp -a ${S}/src/$d ${D}${libdir}/go/src/; \ + done) + find ${D}${libdir}/go/src -depth -type d -name testdata -exec rm -rf {} \; + install -d ${D}${bindir} ${D}${libdir}/go/bin + for f in ${B}/bin/* + do + base=`basename $f` + install -m755 $f ${D}${libdir}/go/bin + make_wrapper $base $base + done +} diff --git a/poky/meta/recipes-devtools/go/go-runtime_1.14.bb b/poky/meta/recipes-devtools/go/go-runtime_1.14.bb deleted file mode 100644 index 4eeee65e0..000000000 --- a/poky/meta/recipes-devtools/go/go-runtime_1.14.bb +++ /dev/null @@ -1,3 +0,0 @@ -export CGO_ENABLED_riscv64 = "" -require go-${PV}.inc -require go-runtime.inc diff --git a/poky/meta/recipes-devtools/go/go-runtime_1.15.bb b/poky/meta/recipes-devtools/go/go-runtime_1.15.bb new file mode 100644 index 000000000..4eeee65e0 --- /dev/null +++ b/poky/meta/recipes-devtools/go/go-runtime_1.15.bb @@ -0,0 +1,3 @@ +export CGO_ENABLED_riscv64 = "" +require go-${PV}.inc +require go-runtime.inc diff --git a/poky/meta/recipes-devtools/go/go_1.14.bb b/poky/meta/recipes-devtools/go/go_1.14.bb deleted file mode 100644 index bc90a1329..000000000 --- a/poky/meta/recipes-devtools/go/go_1.14.bb +++ /dev/null @@ -1,14 +0,0 @@ -require go-${PV}.inc -require go-target.inc - -export GOBUILDMODE="" -export CGO_ENABLED_riscv64 = "" -# Add pie to GOBUILDMODE to satisfy "textrel" QA checking, but mips/riscv -# doesn't support -buildmode=pie, so skip the QA checking for mips/riscv and its -# variants. -python() { - if 'mips' in d.getVar('TARGET_ARCH',True) or 'riscv' in d.getVar('TARGET_ARCH',True): - d.appendVar('INSANE_SKIP_%s' % d.getVar('PN',True), " textrel") - else: - d.setVar('GOBUILDMODE', 'pie') -} diff --git a/poky/meta/recipes-devtools/go/go_1.15.bb b/poky/meta/recipes-devtools/go/go_1.15.bb new file mode 100644 index 000000000..4bf9dd50b --- /dev/null +++ b/poky/meta/recipes-devtools/go/go_1.15.bb @@ -0,0 +1,15 @@ +require go-${PV}.inc +require go-target.inc + +inherit linuxloader + +export GOBUILDMODE="" +export CGO_ENABLED_riscv64 = "" +export GO_LDSO = "${@get_linuxloader(d)}" + +# mips/rv64 doesn't support -buildmode=pie, so skip the QA checking for mips/riscv and its +# variants. +python() { + if 'mips' in d.getVar('TARGET_ARCH',True) or 'riscv' in d.getVar('TARGET_ARCH',True): + d.appendVar('INSANE_SKIP_%s' % d.getVar('PN',True), " textrel") +} diff --git a/poky/meta/recipes-devtools/help2man/help2man-native_1.47.15.bb b/poky/meta/recipes-devtools/help2man/help2man-native_1.47.15.bb deleted file mode 100644 index 4f80a2bdb..000000000 --- a/poky/meta/recipes-devtools/help2man/help2man-native_1.47.15.bb +++ /dev/null @@ -1,23 +0,0 @@ -SUMMARY = "Program for creating simple man pages" -SECTION = "devel" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464" -DEPENDS = "autoconf-native automake-native" - -SRC_URI = "${GNU_MIRROR}/${BPN}/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "c25a35b30eceb315361484b0ff1f81c924e8ee5c8881576f1ee762f001dbcd1c" - -inherit autotools native - -EXTRA_OECONF = "--disable-nls" - -# We don't want to reconfigure things as it would require 'perlnative' to be -# used. -do_configure() { - oe_runconf -} - -do_install_append () { - # Make sure we use /usr/bin/env perl - sed -i -e "1s:#!.*:#! /usr/bin/env perl:" ${D}${bindir}/help2man -} diff --git a/poky/meta/recipes-devtools/help2man/help2man_1.47.15.bb b/poky/meta/recipes-devtools/help2man/help2man_1.47.15.bb new file mode 100644 index 000000000..6ab9f6cbe --- /dev/null +++ b/poky/meta/recipes-devtools/help2man/help2man_1.47.15.bb @@ -0,0 +1,22 @@ +SUMMARY = "Program for creating simple man pages" +SECTION = "devel" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=1ebbd3e34237af26da5dc08a4e440464" + +SRC_URI = "${GNU_MIRROR}/${BPN}/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "c25a35b30eceb315361484b0ff1f81c924e8ee5c8881576f1ee762f001dbcd1c" + +inherit autotools + +# This is a hand-maintained aclocal.m4 but our autotools class currently deletes +# aclocal.m4. +EXTRA_AUTORECONF += "--exclude=aclocal" + +EXTRA_OECONF = "--disable-nls" + +do_install_append () { + # Make sure we use /usr/bin/env perl + sed -i -e "1s:#!.*:#! /usr/bin/env perl:" ${D}${bindir}/help2man +} + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/librepo/librepo_1.12.0.bb b/poky/meta/recipes-devtools/librepo/librepo_1.12.0.bb deleted file mode 100644 index 2d9cac830..000000000 --- a/poky/meta/recipes-devtools/librepo/librepo_1.12.0.bb +++ /dev/null @@ -1,27 +0,0 @@ -SUMMARY = "A library providing C and Python (libcURL like) API \ - for downloading linux repository metadata and packages." -LICENSE = "LGPLv2.1" -LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" - -SRC_URI = "git://github.com/rpm-software-management/librepo.git \ - file://0002-Do-not-try-to-obtain-PYTHON_INSTALL_DIR-by-running-p.patch \ - file://0004-Set-gpgme-variables-with-pkg-config-not-with-cmake-m.patch \ - " - -SRCREV = "9c173f1110bb30f4ae842a8e3532b275966c2d83" - -S = "${WORKDIR}/git" - -DEPENDS = "curl glib-2.0 openssl attr gpgme libxml2" - -inherit cmake distutils3-base pkgconfig - -EXTRA_OECMAKE = " \ - -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} \ - -DPYTHON_DESIRED=3 \ - -DENABLE_TESTS=OFF \ - -DENABLE_DOCS=OFF \ - -DWITH_ZCHUNK=OFF \ -" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/librepo/librepo_1.12.1.bb b/poky/meta/recipes-devtools/librepo/librepo_1.12.1.bb new file mode 100644 index 000000000..059735105 --- /dev/null +++ b/poky/meta/recipes-devtools/librepo/librepo_1.12.1.bb @@ -0,0 +1,27 @@ +SUMMARY = "A library providing C and Python (libcURL like) API \ + for downloading linux repository metadata and packages." +LICENSE = "LGPLv2.1" +LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" + +SRC_URI = "git://github.com/rpm-software-management/librepo.git \ + file://0002-Do-not-try-to-obtain-PYTHON_INSTALL_DIR-by-running-p.patch \ + file://0004-Set-gpgme-variables-with-pkg-config-not-with-cmake-m.patch \ + " + +SRCREV = "d4ad350291f2937c0b6a3eea9e1d0c8e1051fc32" + +S = "${WORKDIR}/git" + +DEPENDS = "curl glib-2.0 openssl attr gpgme libxml2" + +inherit cmake distutils3-base pkgconfig + +EXTRA_OECMAKE = " \ + -DPYTHON_INSTALL_DIR=${PYTHON_SITEPACKAGES_DIR} \ + -DPYTHON_DESIRED=3 \ + -DENABLE_TESTS=OFF \ + -DENABLE_DOCS=OFF \ + -DWITH_ZCHUNK=OFF \ +" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/log4cplus/log4cplus_2.0.5.bb b/poky/meta/recipes-devtools/log4cplus/log4cplus_2.0.5.bb new file mode 100644 index 000000000..967ac7623 --- /dev/null +++ b/poky/meta/recipes-devtools/log4cplus/log4cplus_2.0.5.bb @@ -0,0 +1,19 @@ +SUMMARY = "log4cplus provides a simple C++ logging API for log management" +SECTION = "libs" +HOMEPAGE = "http://sourceforge.net/projects/log4cplus/" +BUGTRACKER = "http://sourceforge.net/p/log4cplus/bugs/" + +LICENSE = "Apache-2.0 & BSD-2-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=41e8e060c26822886b592ab4765c756b" + +SRC_URI = "${SOURCEFORGE_MIRROR}/project/${BPN}/${BPN}-stable/${PV}/${BP}.tar.gz \ + " +SRC_URI[md5sum] = "71dd956bf686195127559671f1426cff" +SRC_URI[sha256sum] = "c07115c23219390633798def30b7b51a0f79fdeb857e4b49632f17746d0ceb97" + +UPSTREAM_CHECK_URI = "https://sourceforge.net/projects/log4cplus/files/log4cplus-stable/" +UPSTREAM_CHECK_REGEX = "log4cplus-stable/(?P\d+(\.\d+)+)/" + +inherit autotools pkgconfig + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/meson/meson.inc b/poky/meta/recipes-devtools/meson/meson.inc index 607093a15..004189e36 100644 --- a/poky/meta/recipes-devtools/meson/meson.inc +++ b/poky/meta/recipes-devtools/meson/meson.inc @@ -16,7 +16,7 @@ SRC_URI = "https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${P file://0001-modules-python.py-do-not-substitute-python-s-install.patch \ file://0001-gnome.py-prefix-g-i-paths-with-PKG_CONFIG_SYSROOT_DI.patch \ " -SRC_URI[sha256sum] = "0a1ae2bfe2ae14ac47593537f93290fb79e9b775c55b4c53c282bc3ca3745b35" +SRC_URI[sha256sum] = "3b5741f884e04928bdfa1947467ff06afa6c98e623c25cef75adf71ca39ce080" SRC_URI_append_class-native = " \ file://0001-Make-CPU-family-warnings-fatal.patch \ diff --git a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch index 623e32957..199d4254d 100644 --- a/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch +++ b/poky/meta/recipes-devtools/meson/meson/0001-Make-CPU-family-warnings-fatal.patch @@ -1,7 +1,7 @@ -From 5624b5835af747b601780ad14646f9c1fb854931 Mon Sep 17 00:00:00 2001 +From 9311844b6c422479556e83b89a8e675ebcb2056c Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Tue, 3 Jul 2018 13:59:09 +0100 -Subject: [PATCH 1/2] Make CPU family warnings fatal +Subject: [PATCH] Make CPU family warnings fatal Upstream-Status: Inappropriate [OE specific] Signed-off-by: Ross Burton @@ -39,6 +39,3 @@ index bf09a88..8eabe78 100644 return trial --- -2.24.0 - diff --git a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch index dce463e5b..5c16cf501 100644 --- a/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch +++ b/poky/meta/recipes-devtools/meson/meson/0002-Support-building-allarch-recipes-again.patch @@ -1,4 +1,4 @@ -From 4b283d545152092fec12b9d80219161d11002c72 Mon Sep 17 00:00:00 2001 +From 38f59e256f760aa959c13f4c5713f87ff7addee5 Mon Sep 17 00:00:00 2001 From: Peter Kjellerstedt Date: Thu, 26 Jul 2018 16:32:49 +0200 Subject: [PATCH] Support building allarch recipes again @@ -13,7 +13,7 @@ Signed-off-by: Peter Kjellerstedt 1 file changed, 1 insertion(+) diff --git a/mesonbuild/envconfig.py b/mesonbuild/envconfig.py -index dc20616..f54adcd 100644 +index d1be65b..90f3573 100644 --- a/mesonbuild/envconfig.py +++ b/mesonbuild/envconfig.py @@ -36,6 +36,7 @@ _T = T.TypeVar('_T') diff --git a/poky/meta/recipes-devtools/meson/meson/0003-native_bindir.patch b/poky/meta/recipes-devtools/meson/meson/0003-native_bindir.patch index 64e9fae9e..81e9acd36 100644 --- a/poky/meta/recipes-devtools/meson/meson/0003-native_bindir.patch +++ b/poky/meta/recipes-devtools/meson/meson/0003-native_bindir.patch @@ -1,4 +1,4 @@ -From da2091f6dfe978fc6140fc2d01bcafbbfae8d8db Mon Sep 17 00:00:00 2001 +From f06c89939d0d006090a8a8728b2a13d532b83047 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Wed, 15 Nov 2017 15:05:01 +0100 Subject: [PATCH] native_bindir @@ -22,10 +22,10 @@ Signed-off-by: Ricardo Ribalda Delgado 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/mesonbuild/dependencies/base.py b/mesonbuild/dependencies/base.py -index 50f4179..e1529d8 100644 +index 368a4bc..9fc398e 100644 --- a/mesonbuild/dependencies/base.py +++ b/mesonbuild/dependencies/base.py -@@ -157,7 +157,7 @@ class Dependency: +@@ -183,7 +183,7 @@ class Dependency: def get_exe_args(self, compiler): return [] @@ -34,16 +34,16 @@ index 50f4179..e1529d8 100644 raise DependencyException('{!r} is not a pkgconfig dependency'.format(self.name)) def get_configtool_variable(self, variable_name): -@@ -225,7 +225,7 @@ class InternalDependency(Dependency): - self.ext_deps = ext_deps - self.variables = variables +@@ -261,7 +261,7 @@ class InternalDependency(Dependency): + setattr(result, k, copy.deepcopy(v, memo)) + return result - def get_pkgconfig_variable(self, variable_name, kwargs): + def get_pkgconfig_variable(self, variable_name, kwargs, use_native=False): raise DependencyException('Method "get_pkgconfig_variable()" is ' 'invalid for an internal dependency') -@@ -633,15 +633,18 @@ class PkgConfigDependency(ExternalDependency): +@@ -634,15 +634,18 @@ class PkgConfigDependency(ExternalDependency): return s.format(self.__class__.__name__, self.name, self.is_found, self.version_reqs) @@ -65,7 +65,7 @@ index 50f4179..e1529d8 100644 # Always copy the environment since we're going to modify it # with pkg-config variables if env is None: -@@ -667,7 +670,7 @@ class PkgConfigDependency(ExternalDependency): +@@ -668,7 +671,7 @@ class PkgConfigDependency(ExternalDependency): targs = tuple(args) cache = PkgConfigDependency.pkgbin_cache if (self.pkgbin, targs, fenv) not in cache: @@ -74,7 +74,7 @@ index 50f4179..e1529d8 100644 return cache[(self.pkgbin, targs, fenv)] def _convert_mingw_paths(self, args: T.List[str]) -> T.List[str]: -@@ -876,7 +879,7 @@ class PkgConfigDependency(ExternalDependency): +@@ -877,7 +880,7 @@ class PkgConfigDependency(ExternalDependency): (self.name, out_raw)) self.link_args, self.raw_link_args = self._search_libs(out, out_raw) @@ -83,7 +83,7 @@ index 50f4179..e1529d8 100644 options = ['--variable=' + variable_name, self.name] if 'define_variable' in kwargs: -@@ -889,7 +892,7 @@ class PkgConfigDependency(ExternalDependency): +@@ -890,7 +893,7 @@ class PkgConfigDependency(ExternalDependency): options = ['--define-variable=' + '='.join(definition)] + options @@ -93,10 +93,10 @@ index 50f4179..e1529d8 100644 if ret != 0: if self.required: diff --git a/mesonbuild/dependencies/ui.py b/mesonbuild/dependencies/ui.py -index 741f0b8..134dd54 100644 +index 95dfe2b..5f82890 100644 --- a/mesonbuild/dependencies/ui.py +++ b/mesonbuild/dependencies/ui.py -@@ -320,7 +320,7 @@ class QtBaseDependency(ExternalDependency): +@@ -325,7 +325,7 @@ class QtBaseDependency(ExternalDependency): self.bindir = self.get_pkgconfig_host_bins(core) if not self.bindir: # If exec_prefix is not defined, the pkg-config file is broken @@ -105,7 +105,7 @@ index 741f0b8..134dd54 100644 if prefix: self.bindir = os.path.join(prefix, 'bin') -@@ -524,7 +524,7 @@ class Qt4Dependency(QtBaseDependency): +@@ -528,7 +528,7 @@ class Qt4Dependency(QtBaseDependency): applications = ['moc', 'uic', 'rcc', 'lupdate', 'lrelease'] for application in applications: try: @@ -114,7 +114,7 @@ index 741f0b8..134dd54 100644 except MesonException: pass -@@ -534,7 +534,7 @@ class Qt5Dependency(QtBaseDependency): +@@ -538,7 +538,7 @@ class Qt5Dependency(QtBaseDependency): QtBaseDependency.__init__(self, 'qt5', env, kwargs) def get_pkgconfig_host_bins(self, core): diff --git a/poky/meta/recipes-devtools/meson/meson_0.55.0.bb b/poky/meta/recipes-devtools/meson/meson_0.55.0.bb deleted file mode 100644 index de9b905c1..000000000 --- a/poky/meta/recipes-devtools/meson/meson_0.55.0.bb +++ /dev/null @@ -1,4 +0,0 @@ -include meson.inc - -BBCLASSEXTEND = "native" - diff --git a/poky/meta/recipes-devtools/meson/meson_0.55.1.bb b/poky/meta/recipes-devtools/meson/meson_0.55.1.bb new file mode 100644 index 000000000..de9b905c1 --- /dev/null +++ b/poky/meta/recipes-devtools/meson/meson_0.55.1.bb @@ -0,0 +1,4 @@ +include meson.inc + +BBCLASSEXTEND = "native" + diff --git a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.0.bb b/poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.0.bb deleted file mode 100644 index 67add2c25..000000000 --- a/poky/meta/recipes-devtools/meson/nativesdk-meson_0.55.0.bb +++ /dev/null @@ -1,65 +0,0 @@ -include meson.inc - -inherit nativesdk -inherit siteinfo - -SRC_URI += "file://meson-setup.py \ - file://meson-wrapper" - -def meson_endian(prefix, d): - arch, os = d.getVar(prefix + "_ARCH"), d.getVar(prefix + "_OS") - sitedata = siteinfo_data_for_machine(arch, os, d) - if "endian-little" in sitedata: - return "little" - elif "endian-big" in sitedata: - return "big" - else: - bb.fatal("Cannot determine endianism for %s-%s" % (arch, os)) - -# The cross file logic is similar but not identical to that in meson.bbclass, -# since it's generating for an SDK rather than a cross-compile. Important -# differences are: -# - We can't set vars like CC, CXX, etc. yet because they will be filled in with -# real paths by meson-setup.sh when the SDK is extracted. -# - Some overrides aren't needed, since the SDK injects paths that take care of -# them. -do_install_append() { - install -d ${D}${datadir}/meson - cat >${D}${datadir}/meson/meson.cross.template <${D}${datadir}/meson/meson.cross.template < Date: Tue, 19 Nov 2019 13:12:17 -0600 Subject: [PATCH] Add --debug-prefix-map option @@ -11,7 +11,7 @@ Upstream-Status: Submitted [https://bugzilla.nasm.us/show_bug.cgi?id=3392635] Signed-off-by: Joshua Watt --- - asm/nasm.c | 26 +++++++++++++++++++++++++- + asm/nasm.c | 24 ++++++++++++++++++++++++ include/nasmlib.h | 9 +++++++++ nasm.txt | 4 ++++ nasmlib/filename.c | 20 ++++++++++++++++++++ @@ -23,34 +23,32 @@ Signed-off-by: Joshua Watt stdlib/strlcat.c | 2 +- test/elfdebugprefix.asm | 6 ++++++ test/performtest.pl | 12 ++++++++++-- - 12 files changed, 83 insertions(+), 10 deletions(-) + 12 files changed, 82 insertions(+), 9 deletions(-) create mode 100644 test/elfdebugprefix.asm diff --git a/asm/nasm.c b/asm/nasm.c -index a0e1719..fc6c62e 100644 +index e5ae89a..7a7f8b4 100644 --- a/asm/nasm.c +++ b/asm/nasm.c -@@ -938,7 +938,8 @@ enum text_options { - OPT_LIMIT, +@@ -939,6 +939,7 @@ enum text_options { OPT_KEEP_ALL, OPT_NO_LINE, -- OPT_DEBUG -+ OPT_DEBUG, -+ OPT_DEBUG_PREFIX_MAP + OPT_DEBUG, ++ OPT_DEBUG_PREFIX_MAP, + OPT_REPRODUCIBLE }; enum need_arg { - ARG_NO, -@@ -970,6 +971,7 @@ static const struct textargs textopts[] = { +@@ -971,6 +972,7 @@ static const struct textargs textopts[] = { {"keep-all", OPT_KEEP_ALL, ARG_NO, 0}, {"no-line", OPT_NO_LINE, ARG_NO, 0}, {"debug", OPT_DEBUG, ARG_MAYBE, 0}, + {"debug-prefix-map", OPT_DEBUG_PREFIX_MAP, true, 0}, + {"reproducible", OPT_REPRODUCIBLE, ARG_NO, 0}, {NULL, OPT_BOGUS, ARG_NO, 0} }; - -@@ -1332,6 +1334,26 @@ static bool process_arg(char *p, char *q, int pass) - case OPT_DEBUG: - debug_nasm = param ? strtoul(param, NULL, 10) : debug_nasm+1; +@@ -1337,6 +1339,26 @@ static bool process_arg(char *p, char *q, int pass) + case OPT_REPRODUCIBLE: + reproducible = true; break; + case OPT_DEBUG_PREFIX_MAP: { + struct debug_prefix_list *d; @@ -75,7 +73,7 @@ index a0e1719..fc6c62e 100644 case OPT_HELP: help(stdout); exit(0); -@@ -2297,6 +2319,8 @@ static void help(FILE *out) +@@ -2304,6 +2326,8 @@ static void help(FILE *out) " -w-x disable warning x (also -Wno-x)\n" " -w[+-]error promote all warnings to errors (also -Werror)\n" " -w[+-]error=x promote warning x to errors (also -Werror=x)\n" @@ -85,7 +83,7 @@ index a0e1719..fc6c62e 100644 fprintf(out, " %-20s %s\n", diff --git a/include/nasmlib.h b/include/nasmlib.h -index e9bfbcc..98fc653 100644 +index 438178d..4c3e90d 100644 --- a/include/nasmlib.h +++ b/include/nasmlib.h @@ -250,10 +250,19 @@ int64_t readstrnum(char *str, int length, bool *warn); @@ -181,10 +179,10 @@ index 54b22f8..c4a412c 100644 static void as86_cleanup(void) diff --git a/output/outcoff.c b/output/outcoff.c -index bcd9ff3..15bfcf3 100644 +index 58fa024..14baf7b 100644 --- a/output/outcoff.c +++ b/output/outcoff.c -@@ -1095,14 +1095,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value, +@@ -1072,14 +1072,14 @@ static void coff_symbol(char *name, int32_t strpos, int32_t value, static void coff_write_symbols(void) { @@ -215,7 +213,7 @@ index 61af020..1292958 100644 nsects = sectlen = 0; syms = saa_init((int32_t)sizeof(struct elf_symbol)); diff --git a/output/outieee.c b/output/outieee.c -index 4cc0f0f..2468724 100644 +index 6d6d4b2..cdb8333 100644 --- a/output/outieee.c +++ b/output/outieee.c @@ -207,7 +207,7 @@ static void ieee_unqualified_name(char *, char *); @@ -228,10 +226,10 @@ index 4cc0f0f..2468724 100644 fpubhead = NULL; fpubtail = &fpubhead; diff --git a/output/outobj.c b/output/outobj.c -index 0d4d311..d8dd6a0 100644 +index 56b43f9..fefea94 100644 --- a/output/outobj.c +++ b/output/outobj.c -@@ -638,7 +638,7 @@ static enum directive_result obj_directive(enum directive, char *); +@@ -644,7 +644,7 @@ static enum directive_result obj_directive(enum directive, char *); static void obj_init(void) { diff --git a/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb b/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb deleted file mode 100644 index 5c4e28de0..000000000 --- a/poky/meta/recipes-devtools/nasm/nasm_2.15.03.bb +++ /dev/null @@ -1,21 +0,0 @@ -SUMMARY = "General-purpose x86 assembler" -SECTION = "devel" -LICENSE = "BSD-2-Clause" -LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe" - -SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \ - file://0001-stdlib-Add-strlcat.patch \ - file://0002-Add-debug-prefix-map-option.patch \ - " - -SRC_URI[sha256sum] = "04e7343d9bf112bffa9fda86f6c7c8b120c2ccd700b882e2db9f57484b1bd778" - -EXTRA_AUTORECONF_append = " -I autoconf/m4" - -inherit autotools - -BBCLASSEXTEND = "native" - -DEPENDS = "groff-native" - -CVE_PRODUCT = "netwide_assembler" diff --git a/poky/meta/recipes-devtools/nasm/nasm_2.15.05.bb b/poky/meta/recipes-devtools/nasm/nasm_2.15.05.bb new file mode 100644 index 000000000..6a1294c34 --- /dev/null +++ b/poky/meta/recipes-devtools/nasm/nasm_2.15.05.bb @@ -0,0 +1,21 @@ +SUMMARY = "General-purpose x86 assembler" +SECTION = "devel" +LICENSE = "BSD-2-Clause" +LIC_FILES_CHKSUM = "file://LICENSE;md5=90904486f8fbf1861cf42752e1a39efe" + +SRC_URI = "http://www.nasm.us/pub/nasm/releasebuilds/${PV}/nasm-${PV}.tar.bz2 \ + file://0001-stdlib-Add-strlcat.patch \ + file://0002-Add-debug-prefix-map-option.patch \ + " + +SRC_URI[sha256sum] = "3c4b8339e5ab54b1bcb2316101f8985a5da50a3f9e504d43fa6f35668bee2fd0" + +EXTRA_AUTORECONF_append = " -I autoconf/m4" + +inherit autotools-brokensep + +BBCLASSEXTEND = "native" + +DEPENDS = "groff-native" + +CVE_PRODUCT = "netwide_assembler" diff --git a/poky/meta/recipes-devtools/ninja/ninja_1.10.0.bb b/poky/meta/recipes-devtools/ninja/ninja_1.10.0.bb deleted file mode 100644 index bdc636505..000000000 --- a/poky/meta/recipes-devtools/ninja/ninja_1.10.0.bb +++ /dev/null @@ -1,30 +0,0 @@ -SUMMARY = "Ninja is a small build system with a focus on speed." -HOMEPAGE = "http://martine.github.com/ninja/" -LICENSE = "Apache-2.0" -LIC_FILES_CHKSUM = "file://COPYING;md5=a81586a64ad4e476c791cda7e2f2c52e" - -DEPENDS = "re2c-native ninja-native" - -SRCREV = "ed7f67040b370189d989adbd60ff8ea29957231f" - -SRC_URI = "git://github.com/ninja-build/ninja.git;branch=release" -UPSTREAM_CHECK_GITTAGREGEX = "v(?P.*)" - -S = "${WORKDIR}/git" - -do_configure[noexec] = "1" - -do_compile_class-native() { - python3 ./configure.py --bootstrap -} - -do_compile() { - python3 ./configure.py - ninja -} - -do_install() { - install -D -m 0755 ${S}/ninja ${D}${bindir}/ninja -} - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/ninja/ninja_1.10.1.bb b/poky/meta/recipes-devtools/ninja/ninja_1.10.1.bb new file mode 100644 index 000000000..eefac4166 --- /dev/null +++ b/poky/meta/recipes-devtools/ninja/ninja_1.10.1.bb @@ -0,0 +1,30 @@ +SUMMARY = "Ninja is a small build system with a focus on speed." +HOMEPAGE = "http://martine.github.com/ninja/" +LICENSE = "Apache-2.0" +LIC_FILES_CHKSUM = "file://COPYING;md5=a81586a64ad4e476c791cda7e2f2c52e" + +DEPENDS = "re2c-native ninja-native" + +SRCREV = "a1f879b29c9aafe6a2bc0ba885701f8f4f19f772" + +SRC_URI = "git://github.com/ninja-build/ninja.git;branch=release" +UPSTREAM_CHECK_GITTAGREGEX = "v(?P.*)" + +S = "${WORKDIR}/git" + +do_configure[noexec] = "1" + +do_compile_class-native() { + python3 ./configure.py --bootstrap +} + +do_compile() { + python3 ./configure.py + ninja +} + +do_install() { + install -D -m 0755 ${S}/ninja ${D}${bindir}/ninja +} + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/packagegroups/packagegroup-core-device-devel.bb b/poky/meta/recipes-devtools/packagegroups/packagegroup-core-device-devel.bb deleted file mode 100644 index edee474ec..000000000 --- a/poky/meta/recipes-devtools/packagegroups/packagegroup-core-device-devel.bb +++ /dev/null @@ -1,16 +0,0 @@ -SUMMARY = "Provides a small set of tools for development on the device" - -PR = "r1" - -inherit packagegroup - -RPROVIDES_${PN} = "qemu-config" -RREPLACES_${PN} = "qemu-config" -RCONFLICTS_${PN} = "qemu-config" - -RDEPENDS_${PN} = "\ - distcc-config \ - nfs-export-root \ - bash \ - binutils-symlinks \ - " diff --git a/poky/meta/recipes-devtools/patchelf/patchelf_0.11.bb b/poky/meta/recipes-devtools/patchelf/patchelf_0.11.bb deleted file mode 100644 index ba7ad404e..000000000 --- a/poky/meta/recipes-devtools/patchelf/patchelf_0.11.bb +++ /dev/null @@ -1,16 +0,0 @@ -SRC_URI = "git://github.com/NixOS/patchelf;protocol=https \ - file://handle-read-only-files.patch \ - " - -LICENSE = "GPLv3" -SUMMARY = "Tool to allow editing of RPATH and interpreter fields in ELF binaries" - -SRCREV = "d6b2a72d9ec3bdfde4b1aacdada823ce388968bb" - -S = "${WORKDIR}/git" - -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -inherit autotools - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/patchelf/patchelf_0.12.bb b/poky/meta/recipes-devtools/patchelf/patchelf_0.12.bb new file mode 100644 index 000000000..43de90877 --- /dev/null +++ b/poky/meta/recipes-devtools/patchelf/patchelf_0.12.bb @@ -0,0 +1,16 @@ +SRC_URI = "git://github.com/NixOS/patchelf;protocol=https \ + file://handle-read-only-files.patch \ + " + +LICENSE = "GPLv3" +SUMMARY = "Tool to allow editing of RPATH and interpreter fields in ELF binaries" + +SRCREV = "8d3a16e97294e3c5521c61b4c8835499c9918264" + +S = "${WORKDIR}/git" + +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +inherit autotools + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-devtools/pseudo/pseudo_git.bb b/poky/meta/recipes-devtools/pseudo/pseudo_git.bb index 9a22304bb..3b623d8bd 100644 --- a/poky/meta/recipes-devtools/pseudo/pseudo_git.bb +++ b/poky/meta/recipes-devtools/pseudo/pseudo_git.bb @@ -6,7 +6,7 @@ SRC_URI = "git://git.yoctoproject.org/pseudo;branch=oe-core \ file://fallback-group \ " -SRCREV = "8efb082863ff0ceec7b7e46f9a44750e12f48039" +SRCREV = "d6b1b13c268d7246f0288d32d6b5eccc658cff4e" S = "${WORKDIR}/git" PV = "1.9.0+git${SRCPV}" diff --git a/poky/meta/recipes-devtools/python-numpy/python-numpy.inc b/poky/meta/recipes-devtools/python-numpy/python-numpy.inc index e37ab399e..0113f4098 100644 --- a/poky/meta/recipes-devtools/python-numpy/python-numpy.inc +++ b/poky/meta/recipes-devtools/python-numpy/python-numpy.inc @@ -8,7 +8,7 @@ SRCNAME = "numpy" SRC_URI = "https://github.com/${SRCNAME}/${SRCNAME}/releases/download/v${PV}/${SRCNAME}-${PV}.tar.gz \ file://0001-Don-t-search-usr-and-so-on-for-libraries-by-default-.patch \ " -SRC_URI[sha256sum] = "153cf8b0176e57a611931981acfe093d2f7fef623b48f91176efa199798a6b90" +SRC_URI[sha256sum] = "1396e6c3d20cbfc119195303b0272e749610b7042cc498be4134f013e9a3215c" UPSTREAM_CHECK_URI = "https://github.com/numpy/numpy/releases" UPSTREAM_CHECK_REGEX = "(?P\d+(\.\d+)+)\.tar" diff --git a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb deleted file mode 100644 index d388e88d2..000000000 --- a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.0.bb +++ /dev/null @@ -1,3 +0,0 @@ -inherit setuptools3 -require python-numpy.inc - diff --git a/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.1.bb b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.1.bb new file mode 100644 index 000000000..d388e88d2 --- /dev/null +++ b/poky/meta/recipes-devtools/python-numpy/python3-numpy_1.19.1.bb @@ -0,0 +1,3 @@ +inherit setuptools3 +require python-numpy.inc + diff --git a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch index 86c9363d6..ee0a9dbb1 100644 --- a/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch +++ b/poky/meta/recipes-devtools/python/files/0001-conditionally-do-not-fetch-code-by-easy_install.patch @@ -1,4 +1,4 @@ -From 768e1f2f14c9f1b3f9bd0e017c3f6183b45616e8 Mon Sep 17 00:00:00 2001 +From 6a5086619ee1c4bcebc7df622face11de6679255 Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Tue, 17 Jul 2018 10:13:38 +0800 Subject: [PATCH] conditionally do not fetch code by easy_install @@ -9,12 +9,13 @@ internet by easy_install. Upstream-Status: Inappropriate [oe specific] Signed-off-by: Hongxu Jia + --- setuptools/command/easy_install.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/setuptools/command/easy_install.py b/setuptools/command/easy_install.py -index bcbd4f58..6455afda 100644 +index bcbd4f5..6455afd 100644 --- a/setuptools/command/easy_install.py +++ b/setuptools/command/easy_install.py @@ -653,6 +653,11 @@ class easy_install(Command): @@ -29,6 +30,3 @@ index bcbd4f58..6455afda 100644 with self._tmpdir() as tmpdir: if not isinstance(spec, Requirement): if URL_SCHEME(spec): --- -2.25.1 - diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb b/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb deleted file mode 100644 index 1c500e468..000000000 --- a/poky/meta/recipes-devtools/python/python3-setuptools_49.3.1.bb +++ /dev/null @@ -1,65 +0,0 @@ -SUMMARY = "Download, build, install, upgrade, and uninstall Python packages" -HOMEPAGE = "https://pypi.org/project/setuptools" -SECTION = "devel/python" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=19;md5=9a33897f1bca1160d7aad3835152e158" - -PYPI_PACKAGE_EXT = "zip" - -inherit pypi setuptools3 - -SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch" - -SRC_URI += "file://0001-change-shebang-to-python3.patch" - -SRC_URI[sha256sum] = "1c7b51fba5d83160d540d18b2bf08fd546357488adf9ddbca08cc1e997bd5c18" - -DEPENDS += "${PYTHON_PN}" - -RDEPENDS_${PN} = "\ - ${PYTHON_PN}-2to3 \ - ${PYTHON_PN}-compile \ - ${PYTHON_PN}-compression \ - ${PYTHON_PN}-ctypes \ - ${PYTHON_PN}-distutils \ - ${PYTHON_PN}-email \ - ${PYTHON_PN}-html \ - ${PYTHON_PN}-json \ - ${PYTHON_PN}-netserver \ - ${PYTHON_PN}-numbers \ - ${PYTHON_PN}-pickle \ - ${PYTHON_PN}-pkgutil \ - ${PYTHON_PN}-plistlib \ - ${PYTHON_PN}-shell \ - ${PYTHON_PN}-stringold \ - ${PYTHON_PN}-threading \ - ${PYTHON_PN}-unittest \ - ${PYTHON_PN}-xml \ -" - -do_install_prepend() { - install -d ${D}${PYTHON_SITEPACKAGES_DIR} -} - -do_install_append() { - mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install -} - -BBCLASSEXTEND = "native nativesdk" - -# The pkg-resources module can be used by itself, without the package downloader -# and easy_install. Ship it in a separate package so that it can be used by -# minimal distributions. -PACKAGES =+ "${PYTHON_PN}-pkg-resources " -FILES_${PYTHON_PN}-pkg-resources = "${PYTHON_SITEPACKAGES_DIR}/pkg_resources/*" -RDEPENDS_${PYTHON_PN}-pkg-resources = "\ - ${PYTHON_PN}-compression \ - ${PYTHON_PN}-email \ - ${PYTHON_PN}-plistlib \ - ${PYTHON_PN}-pprint \ -" -# Due to the way OE-Core implemented native recipes, the native class cannot -# have a dependency on something that is not a recipe name. Work around that by -# manually setting RPROVIDES. -RDEPENDS_${PN}_append = " ${PYTHON_PN}-pkg-resources" -RPROVIDES_append_class-native = " ${PYTHON_PN}-pkg-resources-native" diff --git a/poky/meta/recipes-devtools/python/python3-setuptools_49.6.0.bb b/poky/meta/recipes-devtools/python/python3-setuptools_49.6.0.bb new file mode 100644 index 000000000..360128b11 --- /dev/null +++ b/poky/meta/recipes-devtools/python/python3-setuptools_49.6.0.bb @@ -0,0 +1,65 @@ +SUMMARY = "Download, build, install, upgrade, and uninstall Python packages" +HOMEPAGE = "https://pypi.org/project/setuptools" +SECTION = "devel/python" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=19;md5=9a33897f1bca1160d7aad3835152e158" + +PYPI_PACKAGE_EXT = "zip" + +inherit pypi setuptools3 + +SRC_URI_append_class-native = " file://0001-conditionally-do-not-fetch-code-by-easy_install.patch" + +SRC_URI += "file://0001-change-shebang-to-python3.patch" + +SRC_URI[sha256sum] = "46bd862894ed22c2edff033c758c2dc026324788d758e96788e8f7c11f4e9707" + +DEPENDS += "${PYTHON_PN}" + +RDEPENDS_${PN} = "\ + ${PYTHON_PN}-2to3 \ + ${PYTHON_PN}-compile \ + ${PYTHON_PN}-compression \ + ${PYTHON_PN}-ctypes \ + ${PYTHON_PN}-distutils \ + ${PYTHON_PN}-email \ + ${PYTHON_PN}-html \ + ${PYTHON_PN}-json \ + ${PYTHON_PN}-netserver \ + ${PYTHON_PN}-numbers \ + ${PYTHON_PN}-pickle \ + ${PYTHON_PN}-pkgutil \ + ${PYTHON_PN}-plistlib \ + ${PYTHON_PN}-shell \ + ${PYTHON_PN}-stringold \ + ${PYTHON_PN}-threading \ + ${PYTHON_PN}-unittest \ + ${PYTHON_PN}-xml \ +" + +do_install_prepend() { + install -d ${D}${PYTHON_SITEPACKAGES_DIR} +} + +do_install_append() { + mv ${D}${bindir}/easy_install ${D}${bindir}/easy3_install +} + +BBCLASSEXTEND = "native nativesdk" + +# The pkg-resources module can be used by itself, without the package downloader +# and easy_install. Ship it in a separate package so that it can be used by +# minimal distributions. +PACKAGES =+ "${PYTHON_PN}-pkg-resources " +FILES_${PYTHON_PN}-pkg-resources = "${PYTHON_SITEPACKAGES_DIR}/pkg_resources/*" +RDEPENDS_${PYTHON_PN}-pkg-resources = "\ + ${PYTHON_PN}-compression \ + ${PYTHON_PN}-email \ + ${PYTHON_PN}-plistlib \ + ${PYTHON_PN}-pprint \ +" +# Due to the way OE-Core implemented native recipes, the native class cannot +# have a dependency on something that is not a recipe name. Work around that by +# manually setting RPROVIDES. +RDEPENDS_${PN}_append = " ${PYTHON_PN}-pkg-resources" +RPROVIDES_append_class-native = " ${PYTHON_PN}-pkg-resources-native" diff --git a/poky/meta/recipes-devtools/qemu/qemu.inc b/poky/meta/recipes-devtools/qemu/qemu.inc index 5599382a9..bbb903896 100644 --- a/poky/meta/recipes-devtools/qemu/qemu.inc +++ b/poky/meta/recipes-devtools/qemu/qemu.inc @@ -28,9 +28,10 @@ SRC_URI = "https://download.qemu.org/${BPN}-${PV}.tar.xz \ file://0009-Fix-webkitgtk-builds.patch \ file://0010-configure-Add-pkg-config-handling-for-libgcrypt.patch \ file://0001-Add-enable-disable-udev.patch \ - file://0001-qemu-Do-not-include-file-if-not-exists.patch \ - file://find_datadir.patch \ - " + file://0001-qemu-Do-not-include-file-if-not-exists.patch \ + file://find_datadir.patch \ + file://usb-fix-setup_len-init.patch \ + " UPSTREAM_CHECK_REGEX = "qemu-(?P\d+(\.\d+)+)\.tar" SRC_URI[sha256sum] = "c9174eb5933d9eb5e61f541cd6d1184cd3118dfe4c5c4955bc1bdc4d390fa4e5" @@ -38,6 +39,10 @@ SRC_URI[sha256sum] = "c9174eb5933d9eb5e61f541cd6d1184cd3118dfe4c5c4955bc1bdc4d39 COMPATIBLE_HOST_mipsarchn32 = "null" COMPATIBLE_HOST_mipsarchn64 = "null" +# Per https://lists.nongnu.org/archive/html/qemu-devel/2020-09/msg03873.html +# upstream states qemu doesn't work without optimization +DEBUG_BUILD = "0" + do_install_append() { # Prevent QA warnings about installed ${localstatedir}/run if [ -d ${D}${localstatedir}/run ]; then rmdir ${D}${localstatedir}/run; fi diff --git a/poky/meta/recipes-devtools/qemu/qemu/usb-fix-setup_len-init.patch b/poky/meta/recipes-devtools/qemu/qemu/usb-fix-setup_len-init.patch new file mode 100644 index 000000000..92801da46 --- /dev/null +++ b/poky/meta/recipes-devtools/qemu/qemu/usb-fix-setup_len-init.patch @@ -0,0 +1,89 @@ +CVE: CVE-2020-14364 +Upstream-Status: Backport +Signed-off-by: Ross Burton + +From b946434f2659a182afc17e155be6791ebfb302eb Mon Sep 17 00:00:00 2001 +From: Gerd Hoffmann +Date: Tue, 25 Aug 2020 07:36:36 +0200 +Subject: [PATCH] usb: fix setup_len init (CVE-2020-14364) + +Store calculated setup_len in a local variable, verify it, and only +write it to the struct (USBDevice->setup_len) in case it passed the +sanity checks. + +This prevents other code (do_token_{in,out} functions specifically) +from working with invalid USBDevice->setup_len values and overrunning +the USBDevice->setup_buf[] buffer. + +Fixes: CVE-2020-14364 +Signed-off-by: Gerd Hoffmann +Tested-by: Gonglei +Reviewed-by: Li Qiang +Message-id: 20200825053636.29648-1-kraxel@redhat.com +--- + hw/usb/core.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/hw/usb/core.c b/hw/usb/core.c +index 5abd128b6bc..5234dcc73fe 100644 +--- a/hw/usb/core.c ++++ b/hw/usb/core.c +@@ -129,6 +129,7 @@ void usb_wakeup(USBEndpoint *ep, unsigned int stream) + static void do_token_setup(USBDevice *s, USBPacket *p) + { + int request, value, index; ++ unsigned int setup_len; + + if (p->iov.size != 8) { + p->status = USB_RET_STALL; +@@ -138,14 +139,15 @@ static void do_token_setup(USBDevice *s, USBPacket *p) + usb_packet_copy(p, s->setup_buf, p->iov.size); + s->setup_index = 0; + p->actual_length = 0; +- s->setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; +- if (s->setup_len > sizeof(s->data_buf)) { ++ setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; ++ if (setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small (%d > %zu)\n", +- s->setup_len, sizeof(s->data_buf)); ++ setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } ++ s->setup_len = setup_len; + + request = (s->setup_buf[0] << 8) | s->setup_buf[1]; + value = (s->setup_buf[3] << 8) | s->setup_buf[2]; +@@ -259,26 +261,28 @@ static void do_token_out(USBDevice *s, USBPacket *p) + static void do_parameter(USBDevice *s, USBPacket *p) + { + int i, request, value, index; ++ unsigned int setup_len; + + for (i = 0; i < 8; i++) { + s->setup_buf[i] = p->parameter >> (i*8); + } + + s->setup_state = SETUP_STATE_PARAM; +- s->setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; + s->setup_index = 0; + + request = (s->setup_buf[0] << 8) | s->setup_buf[1]; + value = (s->setup_buf[3] << 8) | s->setup_buf[2]; + index = (s->setup_buf[5] << 8) | s->setup_buf[4]; + +- if (s->setup_len > sizeof(s->data_buf)) { ++ setup_len = (s->setup_buf[7] << 8) | s->setup_buf[6]; ++ if (setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small (%d > %zu)\n", +- s->setup_len, sizeof(s->data_buf)); ++ setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } ++ s->setup_len = setup_len; + + if (p->pid == USB_TOKEN_OUT) { + usb_packet_copy(p, s->data_buf, s->setup_len); diff --git a/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb b/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb index 9b0949026..a4018cc44 100644 --- a/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb +++ b/poky/meta/recipes-devtools/qemu/qemu_5.1.0.bb @@ -10,11 +10,6 @@ DEPENDS = "glib-2.0 zlib pixman bison-native" RDEPENDS_${PN}_class-target += "bash" -# Does not compile for -Og because that level does not clean up dead-code. -# See lockable.h. -# -DEBUG_BUILD = "0" - EXTRA_OECONF_append_class-target = " --target-list=${@get_qemu_target_list(d)}" EXTRA_OECONF_append_class-target_mipsarcho32 = "${@bb.utils.contains('BBEXTENDCURR', 'multilib', ' --disable-capstone', '', d)}" EXTRA_OECONF_append_class-nativesdk = " --target-list=${@get_qemu_target_list(d)}" diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb b/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb deleted file mode 100644 index 182628f4b..000000000 --- a/poky/meta/recipes-devtools/rsync/rsync_3.2.2.bb +++ /dev/null @@ -1,58 +0,0 @@ -SUMMARY = "File synchronization tool" -HOMEPAGE = "http://rsync.samba.org/" -BUGTRACKER = "http://rsync.samba.org/bugzilla.html" -SECTION = "console/network" -# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) -# Includes opennsh and xxhash dynamic link exception -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26" - -DEPENDS = "popt" - -SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ - file://rsyncd.conf \ - file://makefile-no-rebuild.patch \ - " - -SRC_URI[sha256sum] = "644bd3841779507665211fd7db8359c8a10670c57e305b4aab61b4e40037afa8" - -# -16548 required for v3.1.3pre1. Already in v3.1.3. -CVE_CHECK_WHITELIST += " CVE-2017-16548 " - -inherit autotools-brokensep - -PACKAGECONFIG ??= "acl attr \ - ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ -" - -PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," -PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," -PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," -PACKAGECONFIG[lz4] = "--enable-lz4,--disable-lz4,lz4" -PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" -PACKAGECONFIG[xxhash] = "--enable-xxhash,--disable-xxhash,xxhash" -PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd" - -# By default, if crosscompiling, rsync disables a number of -# capabilities, hardlinking symlinks and special files (i.e. devices) -CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" - -EXTRA_OEMAKE = 'STRIP=""' -EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm" - -# rsync 3.0 uses configure.sh instead of configure, and -# makefile checks the existence of configure.sh -do_configure_prepend () { - rm -f ${S}/configure ${S}/configure.sh -} - -do_configure_append () { - cp -f ${S}/configure ${S}/configure.sh -} - -do_install_append() { - install -d ${D}${sysconfdir} - install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} -} - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/rsync/rsync_3.2.3.bb b/poky/meta/recipes-devtools/rsync/rsync_3.2.3.bb new file mode 100644 index 000000000..375efa0de --- /dev/null +++ b/poky/meta/recipes-devtools/rsync/rsync_3.2.3.bb @@ -0,0 +1,58 @@ +SUMMARY = "File synchronization tool" +HOMEPAGE = "http://rsync.samba.org/" +BUGTRACKER = "http://rsync.samba.org/bugzilla.html" +SECTION = "console/network" +# GPLv2+ (<< 3.0.0), GPLv3+ (>= 3.0.0) +# Includes opennsh and xxhash dynamic link exception +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://COPYING;md5=9e5a4f9b3a253d51520617aa54f8eb26" + +DEPENDS = "popt" + +SRC_URI = "https://download.samba.org/pub/${BPN}/src/${BP}.tar.gz \ + file://rsyncd.conf \ + file://makefile-no-rebuild.patch \ + " + +SRC_URI[sha256sum] = "becc3c504ceea499f4167a260040ccf4d9f2ef9499ad5683c179a697146ce50e" + +# -16548 required for v3.1.3pre1. Already in v3.1.3. +CVE_CHECK_WHITELIST += " CVE-2017-16548 " + +inherit autotools-brokensep + +PACKAGECONFIG ??= "acl attr \ + ${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} \ +" + +PACKAGECONFIG[acl] = "--enable-acl-support,--disable-acl-support,acl," +PACKAGECONFIG[attr] = "--enable-xattr-support,--disable-xattr-support,attr," +PACKAGECONFIG[ipv6] = "--enable-ipv6,--disable-ipv6," +PACKAGECONFIG[lz4] = "--enable-lz4,--disable-lz4,lz4" +PACKAGECONFIG[openssl] = "--enable-openssl,--disable-openssl,openssl" +PACKAGECONFIG[xxhash] = "--enable-xxhash,--disable-xxhash,xxhash" +PACKAGECONFIG[zstd] = "--enable-zstd,--disable-zstd,zstd" + +# By default, if crosscompiling, rsync disables a number of +# capabilities, hardlinking symlinks and special files (i.e. devices) +CACHED_CONFIGUREVARS += "rsync_cv_can_hardlink_special=yes rsync_cv_can_hardlink_symlink=yes" + +EXTRA_OEMAKE = 'STRIP=""' +EXTRA_OECONF = "--disable-simd --disable-md2man --disable-asm" + +# rsync 3.0 uses configure.sh instead of configure, and +# makefile checks the existence of configure.sh +do_configure_prepend () { + rm -f ${S}/configure ${S}/configure.sh +} + +do_configure_append () { + cp -f ${S}/configure ${S}/configure.sh +} + +do_install_append() { + install -d ${D}${sysconfdir} + install -m 0644 ${WORKDIR}/rsyncd.conf ${D}${sysconfdir} +} + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-devtools/vala/vala_0.48.7.bb b/poky/meta/recipes-devtools/vala/vala_0.48.7.bb deleted file mode 100644 index 2e61db105..000000000 --- a/poky/meta/recipes-devtools/vala/vala_0.48.7.bb +++ /dev/null @@ -1,5 +0,0 @@ -require ${BPN}.inc - -SRC_URI += " file://0001-vapigen.m4-use-PKG_CONFIG_SYSROOT_DIR.patch" - -SRC_URI[sha256sum] = "28de33e28da24500cc1675c3a6ced1301c9a6a5e6dd06193569001f9ce9a5c53" diff --git a/poky/meta/recipes-devtools/vala/vala_0.48.9.bb b/poky/meta/recipes-devtools/vala/vala_0.48.9.bb new file mode 100644 index 000000000..09bfcd647 --- /dev/null +++ b/poky/meta/recipes-devtools/vala/vala_0.48.9.bb @@ -0,0 +1,5 @@ +require ${BPN}.inc + +SRC_URI += " file://0001-vapigen.m4-use-PKG_CONFIG_SYSROOT_DIR.patch" + +SRC_URI[sha256sum] = "9cea16d3bb3daddbfe0556b99fbfa08146230db7651e1e674cd08b4df5cefea9" diff --git a/poky/meta/recipes-devtools/valgrind/valgrind/0001-adjust-path-filter-for-2-memcheck-tests.patch b/poky/meta/recipes-devtools/valgrind/valgrind/0001-adjust-path-filter-for-2-memcheck-tests.patch deleted file mode 100644 index 4bc4bb086..000000000 --- a/poky/meta/recipes-devtools/valgrind/valgrind/0001-adjust-path-filter-for-2-memcheck-tests.patch +++ /dev/null @@ -1,40 +0,0 @@ -From bf63e35c3036e6040c8cfecabc7160b1f36b0591 Mon Sep 17 00:00:00 2001 -From: Randy MacLeod -Date: Wed, 28 Aug 2019 12:31:15 -0400 -Subject: [PATCH] adjust path filter for 2 memcheck tests - -Test executables produced when cross-compiling can contain -relative paths such as: - coregrind/tests/../../../valgrind-3.15.0/coregrind/ -Use the --fullpath-after option to match and therefore -suppress more of the prefix to enable test to pass. - -Upstream-Status: Inappropriate [embedded specific] - -Signed-off-by: Randy MacLeod ---- - memcheck/tests/badfree3.vgtest | 2 +- - memcheck/tests/varinfo5.vgtest | 2 +- - 2 files changed, 2 insertions(+), 2 deletions(-) - -diff --git a/memcheck/tests/badfree3.vgtest b/memcheck/tests/badfree3.vgtest -index 3dfc5fd8a..57eec21f3 100644 ---- a/memcheck/tests/badfree3.vgtest -+++ b/memcheck/tests/badfree3.vgtest -@@ -1,3 +1,3 @@ - prog: badfree --vgopts: -q --fullpath-after=memcheck/ --fullpath-after=coregrind/ -+vgopts: -q --fullpath-after=/valgrind-3.15.0/memcheck/ --fullpath-after=/valgrind-3.15.0/coregrind/ - stderr_filter_args: badfree.c -diff --git a/memcheck/tests/varinfo5.vgtest b/memcheck/tests/varinfo5.vgtest -index 063d00dce..6907bb2f6 100644 ---- a/memcheck/tests/varinfo5.vgtest -+++ b/memcheck/tests/varinfo5.vgtest -@@ -1,3 +1,3 @@ - prog: varinfo5 --vgopts: --fullpath-after=memcheck/ --fullpath-after=coregrind/ --read-var-info=yes --read-inline-info=yes -q -+vgopts: --fullpath-after=/valgrind-3.15.0/memcheck/ --fullpath-after=/valgrind-3.15.0/coregrind/ --read-var-info=yes --read-inline-info=yes -q - stderr_filter: filter_varinfo3 --- -2.22.0 - diff --git a/poky/meta/recipes-devtools/valgrind/valgrind/0001-memcheck-vgtests-remove-fullpath-after-flags.patch b/poky/meta/recipes-devtools/valgrind/valgrind/0001-memcheck-vgtests-remove-fullpath-after-flags.patch new file mode 100644 index 000000000..dce8b52ba --- /dev/null +++ b/poky/meta/recipes-devtools/valgrind/valgrind/0001-memcheck-vgtests-remove-fullpath-after-flags.patch @@ -0,0 +1,42 @@ +From 3ff82dcb844f98dbf67c69f11f6516bc234725a9 Mon Sep 17 00:00:00 2001 +From: Stacy Gaikovaia +Date: Wed, 16 Sep 2020 13:45:07 -0400 +Subject: [PATCH] memcheck vgtests remove fullpath-after flags + +Test executables produced when cross-compiling can contain +relative paths containing version number, such as: + coregrind/tests/../../../valgrind-3.16.1/coregrind + +Remove the --fullpath-after option so yocto project doesn't +have to upgrade patch every valgrind uprev. Upgrade test stderr +paths in corresponding tests .bb script. + +Upstream-Status: Inappropriate [embedded specific] + +Signed-off-by: Stacy Gaikovaia +--- + memcheck/tests/badfree3.vgtest | 2 +- + memcheck/tests/varinfo5.vgtest | 2 +- + 2 files changed, 2 insertions(+), 2 deletions(-) + +diff --git a/memcheck/tests/badfree3.vgtest b/memcheck/tests/badfree3.vgtest +index 3dfc5fd8a..4ee03f00a 100644 +--- a/memcheck/tests/badfree3.vgtest ++++ b/memcheck/tests/badfree3.vgtest +@@ -1,3 +1,3 @@ + prog: badfree +-vgopts: -q --fullpath-after=memcheck/ --fullpath-after=coregrind/ ++vgopts: -q + stderr_filter_args: badfree.c +diff --git a/memcheck/tests/varinfo5.vgtest b/memcheck/tests/varinfo5.vgtest +index 063d00dce..79c4a72a4 100644 +--- a/memcheck/tests/varinfo5.vgtest ++++ b/memcheck/tests/varinfo5.vgtest +@@ -1,3 +1,3 @@ + prog: varinfo5 +-vgopts: --fullpath-after=memcheck/ --fullpath-after=coregrind/ --read-var-info=yes --read-inline-info=yes -q ++vgopts: --read-var-info=yes --read-inline-info=yes -q + stderr_filter: filter_varinfo3 +-- +2.25.1 + diff --git a/poky/meta/recipes-devtools/valgrind/valgrind_3.16.1.bb b/poky/meta/recipes-devtools/valgrind/valgrind_3.16.1.bb index 484a229a1..d4ca1a775 100644 --- a/poky/meta/recipes-devtools/valgrind/valgrind_3.16.1.bb +++ b/poky/meta/recipes-devtools/valgrind/valgrind_3.16.1.bb @@ -36,7 +36,7 @@ SRC_URI = "https://sourceware.org/pub/valgrind/valgrind-${PV}.tar.bz2 \ file://0001-Make-local-functions-static-to-avoid-assembler-error.patch \ file://0001-Return-a-valid-exit_code-from-vg_regtest.patch \ file://0001-valgrind-filter_xml_frames-do-not-filter-usr.patch \ - file://0001-adjust-path-filter-for-2-memcheck-tests.patch \ + file://0001-memcheck-vgtests-remove-fullpath-after-flags.patch \ file://s390x_vec_op_t.patch \ file://0001-none-tests-fdleak_cmsg.stderr.exp-adjust-tmp-paths.patch \ file://0001-memcheck-tests-Fix-timerfd-syscall-test.patch \ @@ -47,6 +47,17 @@ UPSTREAM_CHECK_REGEX = "valgrind-(?P\d+(\.\d+)+)\.tar" COMPATIBLE_HOST = '(i.86|x86_64|arm|aarch64|mips|powerpc|powerpc64).*-linux' +# patch 0001-memcheck-vgtests-remove-fullpath-after-flags.patch removes relative path +# argument. Change expected stderr files accordingly. +do_patch_append() { + bb.build.exec_func('do_sed_paths', d) +} + +do_sed_paths() { + sed -i -e 's|tests/||' ${S}/memcheck/tests/badfree3.stderr.exp + sed -i -e 's|tests/||' ${S}/memcheck/tests/varinfo5.stderr.exp +} + # valgrind supports armv7 and above COMPATIBLE_HOST_armv4 = 'null' COMPATIBLE_HOST_armv5 = 'null' @@ -118,7 +129,8 @@ RRECOMMENDS_${PN} += "${TCLIBC}-dbg" RDEPENDS_${PN}-ptest += " bash coreutils file \ gdb libgomp \ perl \ - perl-module-getopt-long perl-module-file-basename perl-module-file-glob \ + perl-module-file-basename perl-module-file-glob perl-module-getopt-long \ + perl-module-overloading \ procps sed ${PN}-dbg ${PN}-src" RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" diff --git a/poky/meta/recipes-extended/acpica/acpica_20200528.bb b/poky/meta/recipes-extended/acpica/acpica_20200528.bb deleted file mode 100644 index d68afd815..000000000 --- a/poky/meta/recipes-extended/acpica/acpica_20200528.bb +++ /dev/null @@ -1,49 +0,0 @@ -SUMMARY = "ACPICA tools for the development and debug of ACPI tables" -DESCRIPTION = "The ACPI Component Architecture (ACPICA) project provides an \ -OS-independent reference implementation of the Advanced Configuration and \ -Power Interface Specification (ACPI). ACPICA code contains those portions of \ -ACPI meant to be directly integrated into the host OS as a kernel-resident \ -subsystem, and a small set of tools to assist in developing and debugging \ -ACPI tables." - -HOMEPAGE = "http://www.acpica.org/" -SECTION = "console/tools" - -LICENSE = "Intel | BSD | GPLv2" -LIC_FILES_CHKSUM = "file://source/compiler/aslcompile.c;beginline=7;endline=150;md5=6adbcb81e9ee6ae50c569b94fe12f7c5" - -COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux" - -DEPENDS = "m4-native flex-native bison-native" - -SRC_URI = "https://acpica.org/sites/acpica/files/acpica-unix-${PV}.tar.gz" -SRC_URI[sha256sum] = "e69f81c6924c8d30f9b9005bb002307f07b5a1538e13c909bea2f8a44e0d8610" - -UPSTREAM_CHECK_URI = "https://acpica.org/downloads" - -S = "${WORKDIR}/acpica-unix-${PV}" - -inherit update-alternatives - -ALTERNATIVE_PRIORITY = "100" -ALTERNATIVE_${PN} = "acpixtract acpidump" - -EXTRA_OEMAKE = "CC='${CC}' \ - OPT_CFLAGS=-Wall \ - DESTDIR=${D} \ - PREFIX=${prefix} \ - INSTALLDIR=${bindir} \ - INSTALLFLAGS= \ - " - -do_install() { - oe_runmake install -} - -# iasl*.bb is a subset of this recipe, so RREPLACE it -PROVIDES = "iasl" -RPROVIDES_${PN} += "iasl" -RREPLACES_${PN} += "iasl" -RCONFLICTS_${PN} += "iasl" - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-extended/acpica/acpica_20200717.bb b/poky/meta/recipes-extended/acpica/acpica_20200717.bb new file mode 100644 index 000000000..d1d06c0c2 --- /dev/null +++ b/poky/meta/recipes-extended/acpica/acpica_20200717.bb @@ -0,0 +1,49 @@ +SUMMARY = "ACPICA tools for the development and debug of ACPI tables" +DESCRIPTION = "The ACPI Component Architecture (ACPICA) project provides an \ +OS-independent reference implementation of the Advanced Configuration and \ +Power Interface Specification (ACPI). ACPICA code contains those portions of \ +ACPI meant to be directly integrated into the host OS as a kernel-resident \ +subsystem, and a small set of tools to assist in developing and debugging \ +ACPI tables." + +HOMEPAGE = "http://www.acpica.org/" +SECTION = "console/tools" + +LICENSE = "Intel | BSD | GPLv2" +LIC_FILES_CHKSUM = "file://source/compiler/aslcompile.c;beginline=7;endline=150;md5=6adbcb81e9ee6ae50c569b94fe12f7c5" + +COMPATIBLE_HOST = "(i.86|x86_64|arm|aarch64).*-linux" + +DEPENDS = "m4-native flex-native bison-native" + +SRC_URI = "https://acpica.org/sites/acpica/files/acpica-unix-${PV}.tar.gz" +SRC_URI[sha256sum] = "cb99903ef240732f395af40c23b9b19c7899033f48840743544eebb6da72a828" + +UPSTREAM_CHECK_URI = "https://acpica.org/downloads" + +S = "${WORKDIR}/acpica-unix-${PV}" + +inherit update-alternatives + +ALTERNATIVE_PRIORITY = "100" +ALTERNATIVE_${PN} = "acpixtract acpidump" + +EXTRA_OEMAKE = "CC='${CC}' \ + OPT_CFLAGS=-Wall \ + DESTDIR=${D} \ + PREFIX=${prefix} \ + INSTALLDIR=${bindir} \ + INSTALLFLAGS= \ + " + +do_install() { + oe_runmake install +} + +# iasl*.bb is a subset of this recipe, so RREPLACE it +PROVIDES = "iasl" +RPROVIDES_${PN} += "iasl" +RREPLACES_${PN} += "iasl" +RCONFLICTS_${PN} += "iasl" + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.1.bb b/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.1.bb deleted file mode 100644 index 2f841de06..000000000 --- a/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.1.bb +++ /dev/null @@ -1,31 +0,0 @@ -SUMMARY = "Tool for creating HTML, PDF, EPUB, man pages" -DESCRIPTION = "AsciiDoc is a text document format for writing short documents, \ -articles, books and UNIX man pages." - -HOMEPAGE = "http://asciidoc.org/" - -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=4e5d1baf6f20559e3bec172226a47e4e \ - file://LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263 " - -SRC_URI = "git://github.com/asciidoc/asciidoc-py3;protocol=https \ - file://auto-catalogs.patch" -SRCREV = "fce6bd87fbdef5e510310464b02a75fb32f72f74" - -DEPENDS = "libxml2-native libxslt-native docbook-xml-dtd4-native docbook-xsl-stylesheets-native" - -S = "${WORKDIR}/git" - -# Tell xmllint where to find the DocBook XML catalogue, because right now it -# opens /etc/xml/catalog on the host. Depends on auto-catalogs.patch -export SGML_CATALOG_FILES="file://${STAGING_ETCDIR_NATIVE}/xml/catalog" - -# Not using automake -inherit autotools-brokensep -CLEANBROKEN = "1" - -# target and nativesdk needs python3, but for native we can use the host. -RDEPENDS_${PN} += "python3" -RDEPENDS_remove_class-native = "python3" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.2.bb b/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.2.bb new file mode 100644 index 000000000..711bfbfb9 --- /dev/null +++ b/poky/meta/recipes-extended/asciidoc/asciidoc_9.0.2.bb @@ -0,0 +1,31 @@ +SUMMARY = "Tool for creating HTML, PDF, EPUB, man pages" +DESCRIPTION = "AsciiDoc is a text document format for writing short documents, \ +articles, books and UNIX man pages." + +HOMEPAGE = "http://asciidoc.org/" + +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYRIGHT;md5=4e5d1baf6f20559e3bec172226a47e4e \ + file://LICENSE;md5=b234ee4d69f5fce4486a80fdaf4a4263 " + +SRC_URI = "git://github.com/asciidoc/asciidoc-py3;protocol=https \ + file://auto-catalogs.patch" +SRCREV = "9a407dc9a497364c91421fd961954eddb565baf1" + +DEPENDS = "libxml2-native libxslt-native docbook-xml-dtd4-native docbook-xsl-stylesheets-native" + +S = "${WORKDIR}/git" + +# Tell xmllint where to find the DocBook XML catalogue, because right now it +# opens /etc/xml/catalog on the host. Depends on auto-catalogs.patch +export SGML_CATALOG_FILES="file://${STAGING_ETCDIR_NATIVE}/xml/catalog" + +# Not using automake +inherit autotools-brokensep +CLEANBROKEN = "1" + +# target and nativesdk needs python3, but for native we can use the host. +RDEPENDS_${PN} += "python3" +RDEPENDS_remove_class-native = "python3" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-extended/images/core-image-testmaster-initramfs.bb b/poky/meta/recipes-extended/images/core-image-testmaster-initramfs.bb index 09a6d1604..1a2e0af27 100644 --- a/poky/meta/recipes-extended/images/core-image-testmaster-initramfs.bb +++ b/poky/meta/recipes-extended/images/core-image-testmaster-initramfs.bb @@ -8,6 +8,7 @@ PACKAGE_INSTALL = "initramfs-live-boot initramfs-live-install-testfs initramfs-l IMAGE_FEATURES = "" export IMAGE_BASENAME = "core-image-testmaster-initramfs" +IMAGE_NAME_SUFFIX ?= "" IMAGE_LINGUAS = "" LICENSE = "MIT" diff --git a/poky/meta/recipes-extended/iputils/iputils/0001-iputils-Initialize-libgcrypt.patch b/poky/meta/recipes-extended/iputils/iputils/0001-iputils-Initialize-libgcrypt.patch deleted file mode 100644 index b56804ceb..000000000 --- a/poky/meta/recipes-extended/iputils/iputils/0001-iputils-Initialize-libgcrypt.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 8576e0c218634e6f7ed1b6ff02fa164fb0c75f86 Mon Sep 17 00:00:00 2001 -From: Mingli Yu -Date: Wed, 29 Apr 2020 03:50:32 +0000 -Subject: [PATCH] iputils_md5dig.h: Initialize libgcrypt - -Initialize libgcrypt on first use otherwise -there comes below warning when check the status -of the ninfod.service. - # systemctl status ninfod.service - * ninfod.service - Respond to IPv6 Node Information Queries - Loaded: loaded (/lib/systemd/system/ninfod.service; enabled; vendor preset: enabled) - Active: active (running) since Wed 2020-04-29 05:18:21 UTC; 36s ago - Docs: man:ninfod(8) - Main PID: 347 (ninfod) - Tasks: 1 (limit: 9382) - Memory: 1.2M - CGroup: /system.slice/ninfod.service - `-347 /sbin/ninfod -d - - Apr 29 05:18:21 intel-x86-64 systemd[1]: Started Respond to IPv6 Node Information Queries. - Apr 29 05:18:24 intel-x86-64 ninfod[347]: Libgcrypt warning: missing initialization - please fix the application - -Upstream-Status: Inappropriate [the upstream avoids linking to crypto libraries in - commit 214ed83 common: copy md5 implementation to iputils project] - -Signed-off-by: Mingli Yu ---- - iputils_md5dig.h | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/iputils_md5dig.h b/iputils_md5dig.h -index bfa7f02..3cc3fbf 100644 ---- a/iputils_md5dig.h -+++ b/iputils_md5dig.h -@@ -24,8 +24,17 @@ typedef struct { - gcry_md_hd_t dig; - } iputils_md5dig_ctx; - -+void maybeInit() -+{ -+ if (!gcry_control(GCRYCTL_INITIALIZATION_FINISHED_P)) -+ { -+ gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0); -+ } -+} -+ - static void iputils_md5dig_init(iputils_md5dig_ctx *ctx) - { -+ maybeInit(); - if (gcry_md_open(&ctx->dig, GCRY_MD_MD5, 0) != GPG_ERR_NO_ERROR) - abort(); - return; --- -2.24.1 - diff --git a/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch b/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch deleted file mode 100644 index e106a0cf7..000000000 --- a/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch +++ /dev/null @@ -1,51 +0,0 @@ -From ab1aa2eb0097a7ef05ffccac058b06812deb2695 Mon Sep 17 00:00:00 2001 -From: Sami Kerola -Date: Sat, 28 Dec 2019 17:16:27 +0000 -Subject: [PATCH] ninfod: change variable name to avoid colliding with function - name - -The sys/capability.h header has 'extern int cap_setuid(uid_t uid);' -function prototype. - -Addresses: https://github.com/iputils/iputils/issues/246 - -Upstream-Status: Backport [https://github.com/iputils/iputils/commit/18f9a84e0e702841d6cc4d5f593de4fbd1348e83] -Signed-off-by: Sami Kerola -Signed-off-by: Alexander Kanavin ---- - ninfod/ninfod.c | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - -diff --git a/ninfod/ninfod.c b/ninfod/ninfod.c -index badbf80..28f03af 100644 ---- a/ninfod/ninfod.c -+++ b/ninfod/ninfod.c -@@ -454,7 +454,7 @@ static void do_daemonize(void) - /* --------- */ - #ifdef HAVE_LIBCAP - static const cap_value_t cap_net_raw = CAP_NET_RAW; --static const cap_value_t cap_setuid = CAP_SETUID; -+static const cap_value_t cap_setuserid = CAP_SETUID; - static cap_flag_value_t cap_ok; - #else - static uid_t euid; -@@ -486,7 +486,7 @@ static void limit_capabilities(void) - - cap_get_flag(cap_cur_p, CAP_SETUID, CAP_PERMITTED, &cap_ok); - if (cap_ok != CAP_CLEAR) -- cap_set_flag(cap_p, CAP_PERMITTED, 1, &cap_setuid, CAP_SET); -+ cap_set_flag(cap_p, CAP_PERMITTED, 1, &cap_setuserid, CAP_SET); - - if (cap_set_proc(cap_p) < 0) { - DEBUG(LOG_ERR, "cap_set_proc: %s\n", strerror(errno)); -@@ -519,8 +519,8 @@ static void drop_capabilities(void) - - /* setuid / setuid */ - if (cap_ok != CAP_CLEAR) { -- cap_set_flag(cap_p, CAP_PERMITTED, 1, &cap_setuid, CAP_SET); -- cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &cap_setuid, CAP_SET); -+ cap_set_flag(cap_p, CAP_PERMITTED, 1, &cap_setuserid, CAP_SET); -+ cap_set_flag(cap_p, CAP_EFFECTIVE, 1, &cap_setuserid, CAP_SET); - - if (cap_set_proc(cap_p) < 0) { - DEBUG(LOG_ERR, "cap_set_proc: %s\n", strerror(errno)); diff --git a/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-fix-systemd-Documentation-url-error.patch b/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-fix-systemd-Documentation-url-error.patch deleted file mode 100644 index 03a3f5602..000000000 --- a/poky/meta/recipes-extended/iputils/iputils/0001-ninfod-fix-systemd-Documentation-url-error.patch +++ /dev/null @@ -1,28 +0,0 @@ -From c1f1527eb30d4a5feebf9a0757582bbf7fe3eae9 Mon Sep 17 00:00:00 2001 -From: Andrea Stevanato -Date: Tue, 5 Nov 2019 19:08:30 +0000 -Subject: [PATCH] ninfod: fix systemd Documentation url error - -systemd[1]: /usr/lib/systemd/system/ninfod.service:3: Invalid URL, ignoring: ninfod(8) - -Upstream-Status: Backport [https://github.com/iputils/iputils/commit/c1f1527eb30d4a5feebf9a0757582bbf7fe3eae9] -Signed-off-by: Alex Kiernan ---- - systemd/ninfod.service.in | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/systemd/ninfod.service.in b/systemd/ninfod.service.in -index 5ab69ca00e96..8e79fcd9238e 100644 ---- a/systemd/ninfod.service.in -+++ b/systemd/ninfod.service.in -@@ -1,6 +1,6 @@ - [Unit] - Description=Respond to IPv6 Node Information Queries --Documentation=ninfod(8) -+Documentation=man:ninfod(8) - Requires=network.target - After=network.target - --- -2.17.1 - diff --git a/poky/meta/recipes-extended/iputils/iputils_s20190709.bb b/poky/meta/recipes-extended/iputils/iputils_s20190709.bb deleted file mode 100644 index 545f3d5e8..000000000 --- a/poky/meta/recipes-extended/iputils/iputils_s20190709.bb +++ /dev/null @@ -1,74 +0,0 @@ -SUMMARY = "Network monitoring tools" -DESCRIPTION = "Utilities for the IP protocol, including traceroute6, \ -tracepath, tracepath6, ping, ping6 and arping." -HOMEPAGE = "https://github.com/iputils/iputils" -SECTION = "console/network" - -LICENSE = "BSD & GPLv2+" - -LIC_FILES_CHKSUM = "file://LICENSE;md5=55aa8c9fcad0691cef0ecd420361e390" - -DEPENDS = "gnutls" - -SRC_URI = "git://github.com/iputils/iputils \ - file://0001-ninfod-change-variable-name-to-avoid-colliding-with-.patch \ - file://0001-ninfod-fix-systemd-Documentation-url-error.patch \ - file://0001-rarpd-rdisc-Drop-PrivateUsers.patch \ - file://0001-iputils-Initialize-libgcrypt.patch \ - " -SRCREV = "13e00847176aa23683d68fce1d17ffb523510946" - -S = "${WORKDIR}/git" - -UPSTREAM_CHECK_GITTAGREGEX = "(?Ps\d+)" - -# Fixed in 2000-10-10, but the versioning of iputils -# breaks the version order. -CVE_CHECK_WHITELIST += "CVE-2000-1213 CVE-2000-1214" - -PACKAGECONFIG ??= "libcap libgcrypt rarpd \ - ${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', 'ninfod traceroute6', '', d)} \ - ${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)}" -PACKAGECONFIG[libcap] = "-DUSE_CAP=true, -DUSE_CAP=false, libcap" -PACKAGECONFIG[libgcrypt] = "-DUSE_CRYPTO=gcrypt, -DUSE_CRYPTO=none, libgcrypt" -PACKAGECONFIG[libidn] = "-DUSE_IDN=true, -DUSE_IDN=false, libidn2" -PACKAGECONFIG[gettext] = "-DUSE_GETTEXT=true, -DUSE_GETTEXT=false, gettext" -PACKAGECONFIG[ninfod] = "-DBUILD_NINFOD=true,-DBUILD_NINFOD=false," -PACKAGECONFIG[rarpd] = "-DBUILD_RARPD=true,-DBUILD_RARPD=false," -PACKAGECONFIG[systemd] = "-Dsystemdunitdir=${systemd_unitdir}/system,,systemd" -PACKAGECONFIG[traceroute6] = "-DBUILD_TRACEROUTE6=true,-DBUILD_TRACEROUTE6=false," -PACKAGECONFIG[docs] = "-DBUILD_HTML_MANS=true -DBUILD_MANS=true,-DBUILD_HTML_MANS=false -DBUILD_MANS=false, libxslt" - -inherit meson systemd update-alternatives - -# Have to disable setcap/suid as its not deterministic -EXTRA_OEMESON += "--prefix=${root_prefix}/ -DNO_SETCAP_OR_SUID=true" - -ALTERNATIVE_PRIORITY = "100" - -ALTERNATIVE_${PN}-ping = "ping" -ALTERNATIVE_LINK_NAME[ping] = "${base_bindir}/ping" - -SPLITPKGS = "${PN}-ping ${PN}-arping ${PN}-tracepath ${PN}-clockdiff ${PN}-tftpd ${PN}-rdisc \ - ${@bb.utils.contains('PACKAGECONFIG', 'rarpd', '${PN}-rarpd', '', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', '${PN}-traceroute6 ${PN}-ninfod', '', d)}" -PACKAGES += "${SPLITPKGS}" - -ALLOW_EMPTY_${PN} = "1" -RDEPENDS_${PN} += "${SPLITPKGS}" - -FILES_${PN} = "" -FILES_${PN}-ping = "${base_bindir}/ping.${BPN}" -FILES_${PN}-arping = "${base_bindir}/arping" -FILES_${PN}-tracepath = "${base_bindir}/tracepath" -FILES_${PN}-traceroute6 = "${base_bindir}/traceroute6" -FILES_${PN}-clockdiff = "${base_bindir}/clockdiff" -FILES_${PN}-tftpd = "${base_bindir}/tftpd" -FILES_${PN}-rarpd = "${base_sbindir}/rarpd ${systemd_unitdir}/system/rarpd@.service" -FILES_${PN}-rdisc = "${base_sbindir}/rdisc" -FILES_${PN}-ninfod = "${base_sbindir}/ninfod ${sysconfdir}/init.d/ninfod.sh" - -SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', '${PN}-ninfod', '', d)} \ - ${PN}-rdisc" -SYSTEMD_SERVICE_${PN}-ninfod = "ninfod.service" -SYSTEMD_SERVICE_${PN}-rdisc = "rdisc.service" diff --git a/poky/meta/recipes-extended/iputils/iputils_s20200821.bb b/poky/meta/recipes-extended/iputils/iputils_s20200821.bb new file mode 100644 index 000000000..28dd194a1 --- /dev/null +++ b/poky/meta/recipes-extended/iputils/iputils_s20200821.bb @@ -0,0 +1,72 @@ +SUMMARY = "Network monitoring tools" +DESCRIPTION = "Utilities for the IP protocol, including traceroute6, \ +tracepath, tracepath6, ping, ping6 and arping." +HOMEPAGE = "https://github.com/iputils/iputils" +SECTION = "console/network" + +LICENSE = "BSD & GPLv2+" + +LIC_FILES_CHKSUM = "file://LICENSE;md5=55aa8c9fcad0691cef0ecd420361e390" + +DEPENDS = "gnutls" + +SRC_URI = "git://github.com/iputils/iputils \ + file://0001-rarpd-rdisc-Drop-PrivateUsers.patch \ + " +SRCREV = "23c3782ae0c7f9c6ae59dbed8ad9204f8758542b" + +S = "${WORKDIR}/git" + +UPSTREAM_CHECK_GITTAGREGEX = "(?Ps\d+)" + +# Fixed in 2000-10-10, but the versioning of iputils +# breaks the version order. +CVE_CHECK_WHITELIST += "CVE-2000-1213 CVE-2000-1214" + +PACKAGECONFIG ??= "libcap rarpd \ + ${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', 'ninfod traceroute6', '', d)} \ + ${@bb.utils.filter('DISTRO_FEATURES', 'systemd', d)}" +PACKAGECONFIG[libcap] = "-DUSE_CAP=true, -DUSE_CAP=false, libcap" +PACKAGECONFIG[libidn] = "-DUSE_IDN=true, -DUSE_IDN=false, libidn2" +PACKAGECONFIG[gettext] = "-DUSE_GETTEXT=true, -DUSE_GETTEXT=false, gettext" +PACKAGECONFIG[ninfod] = "-DBUILD_NINFOD=true,-DBUILD_NINFOD=false," +PACKAGECONFIG[rarpd] = "-DBUILD_RARPD=true,-DBUILD_RARPD=false," +PACKAGECONFIG[systemd] = "-Dsystemdunitdir=${systemd_unitdir}/system,,systemd" +PACKAGECONFIG[tftpd] = "-DBUILD_TFTPD=true, -DBUILD_TFTPD=false," +PACKAGECONFIG[traceroute6] = "-DBUILD_TRACEROUTE6=true,-DBUILD_TRACEROUTE6=false," +PACKAGECONFIG[docs] = "-DBUILD_HTML_MANS=true -DBUILD_MANS=true,-DBUILD_HTML_MANS=false -DBUILD_MANS=false, libxslt" + +inherit meson systemd update-alternatives + +# Have to disable setcap/suid as its not deterministic +EXTRA_OEMESON += "--prefix=${root_prefix}/ -DNO_SETCAP_OR_SUID=true" + +ALTERNATIVE_PRIORITY = "100" + +ALTERNATIVE_${PN}-ping = "ping" +ALTERNATIVE_LINK_NAME[ping] = "${base_bindir}/ping" + +SPLITPKGS = "${PN}-ping ${PN}-arping ${PN}-tracepath ${PN}-clockdiff ${PN}-rdisc \ + ${@bb.utils.contains('PACKAGECONFIG', 'rarpd', '${PN}-rarpd', '', d)} \ + ${@bb.utils.contains('PACKAGECONFIG', 'tftpd', '${PN}-tftpd', '', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', '${PN}-traceroute6 ${PN}-ninfod', '', d)}" +PACKAGES += "${SPLITPKGS}" + +ALLOW_EMPTY_${PN} = "1" +RDEPENDS_${PN} += "${SPLITPKGS}" + +FILES_${PN} = "" +FILES_${PN}-ping = "${base_bindir}/ping.${BPN}" +FILES_${PN}-arping = "${base_bindir}/arping" +FILES_${PN}-tracepath = "${base_bindir}/tracepath" +FILES_${PN}-traceroute6 = "${base_bindir}/traceroute6" +FILES_${PN}-clockdiff = "${base_bindir}/clockdiff" +FILES_${PN}-tftpd = "${base_bindir}/tftpd ${sysconfdir}/xinetd.d/tftp" +FILES_${PN}-rarpd = "${base_sbindir}/rarpd ${systemd_unitdir}/system/rarpd@.service" +FILES_${PN}-rdisc = "${base_sbindir}/rdisc" +FILES_${PN}-ninfod = "${base_sbindir}/ninfod ${sysconfdir}/init.d/ninfod.sh" + +SYSTEMD_PACKAGES = "${@bb.utils.contains('DISTRO_FEATURES', 'ipv6', '${PN}-ninfod', '', d)} \ + ${PN}-rdisc" +SYSTEMD_SERVICE_${PN}-ninfod = "ninfod.service" +SYSTEMD_SERVICE_${PN}-rdisc = "rdisc.service" diff --git a/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.2.bb b/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.2.bb deleted file mode 100644 index a18246160..000000000 --- a/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.2.bb +++ /dev/null @@ -1,15 +0,0 @@ -SUMMARY = "pipeline manipulation library" -DESCRIPTION = "This is a C library for setting up and running pipelines of processes, \ -without needing to involve shell command-line parsing which is often \ -error-prone and insecure." -HOMEPAGE = "http://libpipeline.nongnu.org/" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -SRC_URI = "${SAVANNAH_GNU_MIRROR}/libpipeline/libpipeline-${PV}.tar.gz" -SRC_URI[md5sum] = "169de4cc1f6f7f7d430a5bed858b2fd3" -SRC_URI[sha256sum] = "fd59c649c1ae9d67604d1644f116ad4d297eaa66f838e3dfab96b41e85b059fb" - -inherit pkgconfig autotools - -acpaths = "-I ${S}/gl/m4 -I ${S}/m4" diff --git a/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.3.bb b/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.3.bb new file mode 100644 index 000000000..ee15fd064 --- /dev/null +++ b/poky/meta/recipes-extended/libpipeline/libpipeline_1.5.3.bb @@ -0,0 +1,14 @@ +SUMMARY = "pipeline manipulation library" +DESCRIPTION = "This is a C library for setting up and running pipelines of processes, \ +without needing to involve shell command-line parsing which is often \ +error-prone and insecure." +HOMEPAGE = "http://libpipeline.nongnu.org/" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +SRC_URI = "${SAVANNAH_GNU_MIRROR}/libpipeline/libpipeline-${PV}.tar.gz" +SRC_URI[sha256sum] = "5dbf08faf50fad853754293e57fd4e6c69bb8e486f176596d682c67e02a0adb0" + +inherit pkgconfig autotools + +acpaths = "-I ${S}/gl/m4 -I ${S}/m4" diff --git a/poky/meta/recipes-extended/man-pages/man-pages_5.07.bb b/poky/meta/recipes-extended/man-pages/man-pages_5.07.bb deleted file mode 100644 index 04d0597b3..000000000 --- a/poky/meta/recipes-extended/man-pages/man-pages_5.07.bb +++ /dev/null @@ -1,37 +0,0 @@ -SUMMARY = "Linux man-pages" -DESCRIPTION = "The Linux man-pages project documents the Linux kernel and C library interfaces that are employed by user programs" -SECTION = "console/utils" -HOMEPAGE = "http://www.kernel.org/pub/linux/docs/man-pages" -LICENSE = "GPLv2+" - -LIC_FILES_CHKSUM = "file://README;md5=207f70f56526417514ac46b6680e314f" -SRC_URI = "${KERNELORG_MIRROR}/linux/docs/${BPN}/${BP}.tar.gz" - -SRC_URI[md5sum] = "193d9223e9c6dbe1d928eaaecc1f68db" -SRC_URI[sha256sum] = "5ba58d7b2b9a8929903b1cf791ed4edf72ab225678d5ea0a6b80dc090080ac33" - -inherit manpages - -MAN_PKG = "${PN}" - -PACKAGECONFIG ??= "" -PACKAGECONFIG[manpages] = "" - -do_configure[noexec] = "1" -do_compile[noexec] = "1" - -do_install() { - oe_runmake install DESTDIR=${D} -} - -# Only deliveres man-pages so FILES_${PN} gets everything -FILES_${PN}-doc = "" -FILES_${PN} = "${mandir}/*" - -inherit update-alternatives - -ALTERNATIVE_PRIORITY = "100" -ALTERNATIVE_${PN} = "passwd.5 getspnam.3 crypt.3" -ALTERNATIVE_LINK_NAME[passwd.5] = "${mandir}/man5/passwd.5" -ALTERNATIVE_LINK_NAME[getspnam.3] = "${mandir}/man3/getspnam.3" -ALTERNATIVE_LINK_NAME[crypt.3] = "${mandir}/man3/crypt.3" diff --git a/poky/meta/recipes-extended/man-pages/man-pages_5.08.bb b/poky/meta/recipes-extended/man-pages/man-pages_5.08.bb new file mode 100644 index 000000000..caf9320a6 --- /dev/null +++ b/poky/meta/recipes-extended/man-pages/man-pages_5.08.bb @@ -0,0 +1,36 @@ +SUMMARY = "Linux man-pages" +DESCRIPTION = "The Linux man-pages project documents the Linux kernel and C library interfaces that are employed by user programs" +SECTION = "console/utils" +HOMEPAGE = "http://www.kernel.org/pub/linux/docs/man-pages" +LICENSE = "GPLv2+" + +LIC_FILES_CHKSUM = "file://README;md5=207f70f56526417514ac46b6680e314f" +SRC_URI = "${KERNELORG_MIRROR}/linux/docs/${BPN}/${BP}.tar.gz" + +SRC_URI[sha256sum] = "6e0b8ae23ee9467cee701f23dea908257a93e5fffa9e261b19a23efbd27e84a2" + +inherit manpages + +MAN_PKG = "${PN}" + +PACKAGECONFIG ??= "" +PACKAGECONFIG[manpages] = "" + +do_configure[noexec] = "1" +do_compile[noexec] = "1" + +do_install() { + oe_runmake install DESTDIR=${D} +} + +# Only deliveres man-pages so FILES_${PN} gets everything +FILES_${PN}-doc = "" +FILES_${PN} = "${mandir}/*" + +inherit update-alternatives + +ALTERNATIVE_PRIORITY = "100" +ALTERNATIVE_${PN} = "passwd.5 getspnam.3 crypt.3" +ALTERNATIVE_LINK_NAME[passwd.5] = "${mandir}/man5/passwd.5" +ALTERNATIVE_LINK_NAME[getspnam.3] = "${mandir}/man3/getspnam.3" +ALTERNATIVE_LINK_NAME[crypt.3] = "${mandir}/man3/crypt.3" diff --git a/poky/meta/recipes-extended/mc/files/0001-Ticket-4070-misc-Makefile.am-install-mc.lib-only-onc.patch b/poky/meta/recipes-extended/mc/files/0001-Ticket-4070-misc-Makefile.am-install-mc.lib-only-onc.patch deleted file mode 100644 index 824c12f8d..000000000 --- a/poky/meta/recipes-extended/mc/files/0001-Ticket-4070-misc-Makefile.am-install-mc.lib-only-onc.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 58dd59637ac5c6340ddfe96ad8b76883e4da20ef Mon Sep 17 00:00:00 2001 -From: Sergei Trofimovich -Date: Tue, 3 Mar 2020 09:26:12 +0000 -Subject: [PATCH] Ticket #4070: misc/Makefile.am: install mc.lib only once. - -Before the change mc.lib was installed twice due to being -in two _DATA variables: - -dist_pkgdata_DATA = \ - mc.lib - -pkgdata_DATA = \ - $(dist_pkgdata_DATA) \ - $(PKGDATA_OUT) - -This causes occasional install failures when two parallel -`/usr/bin/install` calls race in installing the file: - -$ make -j20 DESTDIR=/var/tmp/portage/app-misc/mc-4.8.24/image install -... - /usr/lib/portage/python3.6/ebuild-helpers/xattr/install \ - -c -m 644 mc.lib '/var/tmp/portage/app-misc/mc-4.8.24/image/usr/share/mc' - /usr/lib/portage/python3.6/ebuild-helpers/xattr/install \ - -c -m 644 mc.lib mc.charsets '/var/tmp/portage/app-misc/mc-4.8.24/image/usr/share/mc' -... - /usr/bin/install: cannot create regular file - '/var/tmp/portage/app-misc/mc-4.8.24/image/usr/share/mc/mc.lib': File exists - -After the change mc.lib is present only in dist_pkgdata_DATA. - -Upstream-Status: Backport [https://github.com/MidnightCommander/mc/commit/afb09f7cd7024484845ade25e15b8b93d6cf2d2c] - -Signed-off-by: Sergei Trofimovich -Signed-off-by: Andrew Borodin -Signed-off-by: Alexander Kanavin ---- - misc/Makefile.am | 2 -- - 1 file changed, 2 deletions(-) - -diff --git a/misc/Makefile.am b/misc/Makefile.am -index 8ed1826..24f4a0e 100644 ---- a/misc/Makefile.am -+++ b/misc/Makefile.am -@@ -17,7 +17,6 @@ dist_pkgdata_DATA = \ - mc.lib - - pkgdata_DATA = \ -- $(dist_pkgdata_DATA) \ - $(PKGDATA_OUT) - - SCRIPTS_IN = \ -@@ -54,7 +53,6 @@ EXTRA_DIST = \ - $(LIBFILES_SCRIPT) \ - $(SCRIPTS_IN) \ - $(noinst_DATA) \ -- $(dist_pkgdata_DATA) \ - $(PKGDATA_IN) - - install-data-hook: diff --git a/poky/meta/recipes-extended/mc/mc_4.8.24.bb b/poky/meta/recipes-extended/mc/mc_4.8.24.bb deleted file mode 100644 index 034df2a42..000000000 --- a/poky/meta/recipes-extended/mc/mc_4.8.24.bb +++ /dev/null @@ -1,55 +0,0 @@ -SUMMARY = "Midnight Commander is an ncurses based file manager" -HOMEPAGE = "http://www.midnight-commander.org/" -LICENSE = "GPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=270bbafe360e73f9840bd7981621f9c2" -SECTION = "console/utils" -DEPENDS = "ncurses glib-2.0 util-linux" -RDEPENDS_${PN} = "ncurses-terminfo-base" -RRECOMMENDS_${PN} = "ncurses-terminfo" - -SRC_URI = "http://www.midnight-commander.org/downloads/${BPN}-${PV}.tar.bz2 \ - file://0001-mc-replace-perl-w-with-use-warnings.patch \ - file://nomandate.patch \ - file://0001-Ticket-4070-misc-Makefile.am-install-mc.lib-only-onc.patch \ - " -SRC_URI[md5sum] = "2621de1fa9058a9c41a4248becc969f9" -SRC_URI[sha256sum] = "cfcc4d0546d0c3a88645a8bf71612ed36647ea3264d973b1f28183a0c84bae34" - -inherit autotools gettext pkgconfig - -# -# Both Samba (smb) and sftp require package delivered from meta-openembedded -# -PACKAGECONFIG ??= "" -PACKAGECONFIG[smb] = "--enable-vfs-smb,--disable-vfs-smb,samba," -PACKAGECONFIG[sftp] = "--enable-vfs-sftp,--disable-vfs-sftp,libssh2," - -EXTRA_OECONF = "--with-screen=ncurses --without-gpm-mouse --without-x --disable-configure-args" - -CACHED_CONFIGUREVARS += "ac_cv_path_PERL='/usr/bin/env perl'" -CACHED_CONFIGUREVARS += "ac_cv_path_PYTHON='/usr/bin/env python'" -CACHED_CONFIGUREVARS += "ac_cv_path_GREP='/usr/bin/env grep'" -CACHED_CONFIGUREVARS += "mc_cv_have_zipinfo=yes" - -do_install_append () { - sed -i -e '1s,#!.*perl,#!${bindir}/env perl,' ${D}${libexecdir}/mc/extfs.d/* - - rm ${D}${libexecdir}/mc/extfs.d/s3+ ${D}${libexecdir}/mc/extfs.d/uc1541 -} - -PACKAGES =+ "${BPN}-helpers-perl ${BPN}-helpers ${BPN}-fish" - -SUMMARY_${BPN}-helpers-perl = "Midnight Commander Perl-based helper scripts" -FILES_${BPN}-helpers-perl = "${libexecdir}/mc/extfs.d/a+ ${libexecdir}/mc/extfs.d/apt+ \ - ${libexecdir}/mc/extfs.d/deb ${libexecdir}/mc/extfs.d/deba \ - ${libexecdir}/mc/extfs.d/debd ${libexecdir}/mc/extfs.d/dpkg+ \ - ${libexecdir}/mc/extfs.d/mailfs ${libexecdir}/mc/extfs.d/patchfs \ - ${libexecdir}/mc/extfs.d/rpms+ ${libexecdir}/mc/extfs.d/ulib \ - ${libexecdir}/mc/extfs.d/uzip" -RDEPENDS_${BPN}-helpers-perl = "perl" - -SUMMARY_${BPN}-helpers = "Midnight Commander shell helper scripts" -FILES_${BPN}-helpers = "${libexecdir}/mc/extfs.d/* ${libexecdir}/mc/ext.d/*" - -SUMMARY_${BPN}-fish = "Midnight Commander Fish scripts" -FILES_${BPN}-fish = "${libexecdir}/mc/fish" diff --git a/poky/meta/recipes-extended/mc/mc_4.8.25.bb b/poky/meta/recipes-extended/mc/mc_4.8.25.bb new file mode 100644 index 000000000..83c8a6ecf --- /dev/null +++ b/poky/meta/recipes-extended/mc/mc_4.8.25.bb @@ -0,0 +1,53 @@ +SUMMARY = "Midnight Commander is an ncurses based file manager" +HOMEPAGE = "http://www.midnight-commander.org/" +LICENSE = "GPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=270bbafe360e73f9840bd7981621f9c2" +SECTION = "console/utils" +DEPENDS = "ncurses glib-2.0 util-linux" +RDEPENDS_${PN} = "ncurses-terminfo-base" +RRECOMMENDS_${PN} = "ncurses-terminfo" + +SRC_URI = "http://www.midnight-commander.org/downloads/${BPN}-${PV}.tar.bz2 \ + file://0001-mc-replace-perl-w-with-use-warnings.patch \ + file://nomandate.patch \ + " +SRC_URI[sha256sum] = "407dc20f70082f26c7f5716406cb755cbb6cba3f2f13b841b79a991282d310c2" + +inherit autotools gettext pkgconfig + +# +# Both Samba (smb) and sftp require package delivered from meta-openembedded +# +PACKAGECONFIG ??= "" +PACKAGECONFIG[smb] = "--enable-vfs-smb,--disable-vfs-smb,samba," +PACKAGECONFIG[sftp] = "--enable-vfs-sftp,--disable-vfs-sftp,libssh2," + +EXTRA_OECONF = "--with-screen=ncurses --without-gpm-mouse --without-x --disable-configure-args" + +CACHED_CONFIGUREVARS += "ac_cv_path_PERL='/usr/bin/env perl'" +CACHED_CONFIGUREVARS += "ac_cv_path_PYTHON='/usr/bin/env python'" +CACHED_CONFIGUREVARS += "ac_cv_path_GREP='/usr/bin/env grep'" +CACHED_CONFIGUREVARS += "mc_cv_have_zipinfo=yes" + +do_install_append () { + sed -i -e '1s,#!.*perl,#!${bindir}/env perl,' ${D}${libexecdir}/mc/extfs.d/* + + rm ${D}${libexecdir}/mc/extfs.d/s3+ ${D}${libexecdir}/mc/extfs.d/uc1541 +} + +PACKAGES =+ "${BPN}-helpers-perl ${BPN}-helpers ${BPN}-fish" + +SUMMARY_${BPN}-helpers-perl = "Midnight Commander Perl-based helper scripts" +FILES_${BPN}-helpers-perl = "${libexecdir}/mc/extfs.d/a+ ${libexecdir}/mc/extfs.d/apt+ \ + ${libexecdir}/mc/extfs.d/deb ${libexecdir}/mc/extfs.d/deba \ + ${libexecdir}/mc/extfs.d/debd ${libexecdir}/mc/extfs.d/dpkg+ \ + ${libexecdir}/mc/extfs.d/mailfs ${libexecdir}/mc/extfs.d/patchfs \ + ${libexecdir}/mc/extfs.d/rpms+ ${libexecdir}/mc/extfs.d/ulib \ + ${libexecdir}/mc/extfs.d/uzip" +RDEPENDS_${BPN}-helpers-perl = "perl" + +SUMMARY_${BPN}-helpers = "Midnight Commander shell helper scripts" +FILES_${BPN}-helpers = "${libexecdir}/mc/extfs.d/* ${libexecdir}/mc/ext.d/*" + +SUMMARY_${BPN}-fish = "Midnight Commander Fish scripts" +FILES_${BPN}-fish = "${libexecdir}/mc/fish" diff --git a/poky/meta/recipes-extended/msmtp/msmtp_1.8.11.bb b/poky/meta/recipes-extended/msmtp/msmtp_1.8.11.bb deleted file mode 100644 index 804ed59b4..000000000 --- a/poky/meta/recipes-extended/msmtp/msmtp_1.8.11.bb +++ /dev/null @@ -1,27 +0,0 @@ -SUMMARY = "msmtp is an SMTP client" -DESCRIPTION = "A sendmail replacement for use in MTAs like mutt" -HOMEPAGE = "https://marlam.de/msmtp/" -SECTION = "console/network" - -LICENSE = "GPLv3" -DEPENDS = "zlib gnutls" - -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -UPSTREAM_CHECK_URI = "https://marlam.de/msmtp/download/" - -SRC_URI = "https://marlam.de/${BPN}/releases/${BP}.tar.xz" -SRC_URI[sha256sum] = "f25f0fa177ce9e0ad65c127e790a37f35fb64fee9e33d90345844c5c86780e60" - -inherit gettext autotools update-alternatives pkgconfig - -EXTRA_OECONF += "--without-libsecret --without-libgsasl --without-libidn" - -ALTERNATIVE_${PN} = "sendmail" -# /usr/lib/sendmial is required by LSB core test -ALTERNATIVE_${PN}_linuxstdbase = "sendmail usr-lib-sendmail" -ALTERNATIVE_TARGET[sendmail] = "${bindir}/msmtp" -ALTERNATIVE_LINK_NAME[sendmail] = "${sbindir}/sendmail" -ALTERNATIVE_TARGET[usr-lib-sendmail] = "${bindir}/msmtp" -ALTERNATIVE_LINK_NAME[usr-lib-sendmail] = "/usr/lib/sendmail" -ALTERNATIVE_PRIORITY = "100" diff --git a/poky/meta/recipes-extended/msmtp/msmtp_1.8.12.bb b/poky/meta/recipes-extended/msmtp/msmtp_1.8.12.bb new file mode 100644 index 000000000..54798f9e6 --- /dev/null +++ b/poky/meta/recipes-extended/msmtp/msmtp_1.8.12.bb @@ -0,0 +1,27 @@ +SUMMARY = "msmtp is an SMTP client" +DESCRIPTION = "A sendmail replacement for use in MTAs like mutt" +HOMEPAGE = "https://marlam.de/msmtp/" +SECTION = "console/network" + +LICENSE = "GPLv3" +DEPENDS = "zlib gnutls" + +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +UPSTREAM_CHECK_URI = "https://marlam.de/msmtp/download/" + +SRC_URI = "https://marlam.de/${BPN}/releases/${BP}.tar.xz" +SRC_URI[sha256sum] = "a86fef9477339923afefe974988a38e32d0feb90dfeeb88f7f55aac356a96354" + +inherit gettext autotools update-alternatives pkgconfig + +EXTRA_OECONF += "--without-libsecret --without-libgsasl --without-libidn" + +ALTERNATIVE_${PN} = "sendmail" +# /usr/lib/sendmial is required by LSB core test +ALTERNATIVE_${PN}_linuxstdbase = "sendmail usr-lib-sendmail" +ALTERNATIVE_TARGET[sendmail] = "${bindir}/msmtp" +ALTERNATIVE_LINK_NAME[sendmail] = "${sbindir}/sendmail" +ALTERNATIVE_TARGET[usr-lib-sendmail] = "${bindir}/msmtp" +ALTERNATIVE_LINK_NAME[usr-lib-sendmail] = "/usr/lib/sendmail" +ALTERNATIVE_PRIORITY = "100" diff --git a/poky/meta/recipes-extended/packagegroups/packagegroup-core-base-utils.bb b/poky/meta/recipes-extended/packagegroups/packagegroup-core-base-utils.bb index 750183793..1e63da7f1 100644 --- a/poky/meta/recipes-extended/packagegroups/packagegroup-core-base-utils.bb +++ b/poky/meta/recipes-extended/packagegroups/packagegroup-core-base-utils.bb @@ -21,8 +21,8 @@ RDEPENDS_${PN} = "\ coreutils \ cpio \ ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "", "debianutils-run-parts", d)} \ - dhcp-client \ - ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "", "dhcp-server", d)} \ + dhcpcd \ + ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "", "kea", d)} \ diffutils \ ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "", "dpkg-start-stop", d)} \ e2fsprogs \ diff --git a/poky/meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb b/poky/meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb index aff00e56e..ec8f9e48b 100644 --- a/poky/meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb +++ b/poky/meta/recipes-extended/rpcbind/rpcbind_1.2.5.bb @@ -19,7 +19,7 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/rpcbind/rpcbind-${PV}.tar.bz2 \ SRC_URI[md5sum] = "ed46f09b9c0fa2d49015f6431bc5ea7b" SRC_URI[sha256sum] = "2ce360683963b35c19c43f0ee2c7f18aa5b81ef41c3fdbd15ffcb00b8bffda7a" -inherit autotools update-rc.d systemd pkgconfig +inherit autotools update-rc.d systemd pkgconfig update-alternatives PACKAGECONFIG ??= "tcp-wrappers" PACKAGECONFIG[tcp-wrappers] = "--enable-libwrap,--disable-libwrap,tcp-wrappers" @@ -50,3 +50,6 @@ do_install_append () { ${WORKDIR}/init.d > ${D}${sysconfdir}/init.d/rpcbind chmod 0755 ${D}${sysconfdir}/init.d/rpcbind } + +ALTERNATIVE_${PN} = "rpcinfo" +ALTERNATIVE_LINK_NAME[rpcinfo] = "${bindir}/rpcinfo" diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb deleted file mode 100644 index c668d8cbd..000000000 --- a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.18.bb +++ /dev/null @@ -1,27 +0,0 @@ -SUMMARY = "System load testing utility" -DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ -imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." -HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" -LICENSE = "GPLv2" -LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" - -SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ - file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ - file://no_daddr_t.patch \ - " -SRC_URI[sha256sum] = "07c82a5c89538b5b696a79192faa70d0232352004c9e532946f7f3613d0adf23" - -DEPENDS = "coreutils-native" - -PROVIDES = "stress" -RPROVIDES_${PN} = "stress" -RREPLACES_${PN} = "stress" -RCONFLICTS_${PN} = "stress" - -inherit bash-completion - -do_install() { - oe_runmake DESTDIR=${D} install - ln -s stress-ng ${D}${bindir}/stress -} - diff --git a/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.19.bb b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.19.bb new file mode 100644 index 000000000..f1af99e51 --- /dev/null +++ b/poky/meta/recipes-extended/stress-ng/stress-ng_0.11.19.bb @@ -0,0 +1,27 @@ +SUMMARY = "System load testing utility" +DESCRIPTION = "Deliberately simple workload generator for POSIX systems. It \ +imposes a configurable amount of CPU, memory, I/O, and disk stress on the system." +HOMEPAGE = "https://kernel.ubuntu.com/~cking/stress-ng/" +LICENSE = "GPLv2" +LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" + +SRC_URI = "https://kernel.ubuntu.com/~cking/tarballs/${BPN}/${BP}.tar.xz \ + file://0001-Do-not-preserve-ownership-when-installing-example-jo.patch \ + file://no_daddr_t.patch \ + " +SRC_URI[sha256sum] = "a50b753f00a9c880eda4f9d72bb82e37149ac24fab4265212e101926a1c20868" + +DEPENDS = "coreutils-native" + +PROVIDES = "stress" +RPROVIDES_${PN} = "stress" +RREPLACES_${PN} = "stress" +RCONFLICTS_${PN} = "stress" + +inherit bash-completion + +do_install() { + oe_runmake DESTDIR=${D} install + ln -s stress-ng ${D}${bindir}/stress +} + diff --git a/poky/meta/recipes-extended/sysstat/sysstat.inc b/poky/meta/recipes-extended/sysstat/sysstat.inc index 8fd87b943..e5e134c03 100644 --- a/poky/meta/recipes-extended/sysstat/sysstat.inc +++ b/poky/meta/recipes-extended/sysstat/sysstat.inc @@ -62,6 +62,6 @@ pkg_postinst_${PN} () { fi } -FILES_${PN} += "${systemd_system_unitdir}" +FILES_${PN} += "${systemd_system_unitdir} ${nonarch_base_libdir}/systemd" TARGET_CC_ARCH += "${LDFLAGS}" diff --git a/poky/meta/recipes-extended/sysstat/sysstat_12.2.2.bb b/poky/meta/recipes-extended/sysstat/sysstat_12.2.2.bb deleted file mode 100644 index 333d10295..000000000 --- a/poky/meta/recipes-extended/sysstat/sysstat_12.2.2.bb +++ /dev/null @@ -1,8 +0,0 @@ -require sysstat.inc - -LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb" - -SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch" - -SRC_URI[md5sum] = "1073fdf7bba58483a467a8a0681beeda" -SRC_URI[sha256sum] = "78388b64acec81378b962e66b40e57acd4b18ff17a849bc9308f95d290248c9c" diff --git a/poky/meta/recipes-extended/sysstat/sysstat_12.4.0.bb b/poky/meta/recipes-extended/sysstat/sysstat_12.4.0.bb new file mode 100644 index 000000000..6773213a4 --- /dev/null +++ b/poky/meta/recipes-extended/sysstat/sysstat_12.4.0.bb @@ -0,0 +1,7 @@ +require sysstat.inc + +LIC_FILES_CHKSUM = "file://COPYING;md5=a23a74b3f4caf9616230789d94217acb" + +SRC_URI += "file://0001-configure.in-remove-check-for-chkconfig.patch" + +SRC_URI[sha256sum] = "78556c339795ecd07eb10ee09e3f5d52901d3a29f874ae92b45efd0de7b62d16" diff --git a/poky/meta/recipes-extended/timezone/tzdata.bb b/poky/meta/recipes-extended/timezone/tzdata.bb index 1e2b440fb..6aac516f6 100644 --- a/poky/meta/recipes-extended/timezone/tzdata.bb +++ b/poky/meta/recipes-extended/timezone/tzdata.bb @@ -37,6 +37,8 @@ do_install () { cp -pP "${S}/zone.tab" ${D}${datadir}/zoneinfo cp -pP "${S}/zone1970.tab" ${D}${datadir}/zoneinfo cp -pP "${S}/iso3166.tab" ${D}${datadir}/zoneinfo + cp -pP "${S}/leapseconds" ${D}${datadir}/zoneinfo + cp -pP "${S}/leap-seconds.list" ${D}${datadir}/zoneinfo # Install default timezone if [ -e ${D}${datadir}/zoneinfo/${DEFAULT_TIMEZONE} ]; then @@ -145,6 +147,8 @@ RPROVIDES_tzdata-misc = "tzdata-misc" FILES_tzdata-core += " \ ${sysconfdir}/localtime \ ${sysconfdir}/timezone \ + ${datadir}/zoneinfo/leapseconds \ + ${datadir}/zoneinfo/leap-seconds.list \ ${datadir}/zoneinfo/Pacific/Honolulu \ ${datadir}/zoneinfo/America/Anchorage \ ${datadir}/zoneinfo/America/Los_Angeles \ diff --git a/poky/meta/recipes-gnome/epiphany/epiphany_3.36.3.bb b/poky/meta/recipes-gnome/epiphany/epiphany_3.36.3.bb deleted file mode 100644 index 8eb4ac4fc..000000000 --- a/poky/meta/recipes-gnome/epiphany/epiphany_3.36.3.bb +++ /dev/null @@ -1,20 +0,0 @@ -SUMMARY = "WebKit based web browser for GNOME" -BUGTRACKER = "https://gitlab.gnome.org/GNOME/epiphany" -LICENSE = "GPLv3+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -DEPENDS = "libsoup-2.4 webkitgtk gtk+3 iso-codes avahi libnotify gcr \ - gsettings-desktop-schemas libxml2-native \ - glib-2.0 glib-2.0-native json-glib libdazzle libhandy" - -GNOMEBASEBUILDCLASS = "meson" -inherit gnomebase gsettings features_check upstream-version-is-even gettext mime-xdg -REQUIRED_DISTRO_FEATURES = "x11 opengl" - -SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive \ - file://0002-help-meson.build-disable-the-use-of-yelp.patch \ - " -SRC_URI[archive.sha256sum] = "621b5626374891769afec9c35946ab6c3910ea9cf9a2498bab166c99bd91e16f" - -FILES_${PN} += "${datadir}/dbus-1 ${datadir}/gnome-shell/search-providers ${datadir}/metainfo" -RDEPENDS_${PN} = "iso-codes adwaita-icon-theme gsettings-desktop-schemas" diff --git a/poky/meta/recipes-gnome/epiphany/epiphany_3.36.4.bb b/poky/meta/recipes-gnome/epiphany/epiphany_3.36.4.bb new file mode 100644 index 000000000..4c3b18331 --- /dev/null +++ b/poky/meta/recipes-gnome/epiphany/epiphany_3.36.4.bb @@ -0,0 +1,20 @@ +SUMMARY = "WebKit based web browser for GNOME" +BUGTRACKER = "https://gitlab.gnome.org/GNOME/epiphany" +LICENSE = "GPLv3+" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +DEPENDS = "libsoup-2.4 webkitgtk gtk+3 iso-codes avahi libnotify gcr \ + gsettings-desktop-schemas libxml2-native \ + glib-2.0 glib-2.0-native json-glib libdazzle libhandy" + +GNOMEBASEBUILDCLASS = "meson" +inherit gnomebase gsettings features_check upstream-version-is-even gettext mime-xdg +REQUIRED_DISTRO_FEATURES = "x11 opengl" + +SRC_URI = "${GNOME_MIRROR}/${GNOMEBN}/${@gnome_verdir("${PV}")}/${GNOMEBN}-${PV}.tar.${GNOME_COMPRESS_TYPE};name=archive \ + file://0002-help-meson.build-disable-the-use-of-yelp.patch \ + " +SRC_URI[archive.sha256sum] = "588a75b1588f5a509c33cf0be6a38a0f4fc1748eeb499a51d991ddef485242bf" + +FILES_${PN} += "${datadir}/dbus-1 ${datadir}/gnome-shell/search-providers ${datadir}/metainfo" +RDEPENDS_${PN} = "iso-codes adwaita-icon-theme gsettings-desktop-schemas" diff --git a/poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb b/poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb index d0df5015a..0405fa78b 100644 --- a/poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb +++ b/poky/meta/recipes-gnome/gdk-pixbuf/gdk-pixbuf_2.40.0.bb @@ -40,20 +40,20 @@ inherit meson pkgconfig gettext pixbufcache ptest-gnome upstream-version-is-even GIR_MESON_OPTION = 'gir' -EXTRA_OEMESON_append = " ${@bb.utils.contains('PTEST_ENABLED', '1', '-Dinstalled_tests=true', '-Dinstalled_tests=false', d)}" - LIBV = "2.10.0" GDK_PIXBUF_LOADERS ?= "png jpeg" -PACKAGECONFIG = "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)} ${GDK_PIXBUF_LOADERS}" +PACKAGECONFIG = "${GDK_PIXBUF_LOADERS} \ + ${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)} \ + ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}" PACKAGECONFIG_class-native = "${GDK_PIXBUF_LOADERS}" PACKAGECONFIG[png] = "-Dpng=true,-Dpng=false,libpng" PACKAGECONFIG[jpeg] = "-Djpeg=true,-Djpeg=false,jpeg" PACKAGECONFIG[tiff] = "-Dtiff=true,-Dtiff=false,tiff" PACKAGECONFIG[jpeg2000] = "-Djasper=true,-Djasper=false,jasper" - +PACKAGECONFIG[tests] = "-Dinstalled_tests=true,-Dinstalled_tests=false" PACKAGECONFIG[x11] = "-Dx11=true,-Dx11=false,virtual/libx11" PACKAGES =+ "${PN}-xlib" diff --git a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb deleted file mode 100644 index 70c0e66db..000000000 --- a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.21.bb +++ /dev/null @@ -1,19 +0,0 @@ -require gtk+3.inc - -MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" - -SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar.xz \ - file://0001-Hardcoded-libtool.patch \ - file://0002-Do-not-try-to-initialize-GL-without-libGL.patch \ - file://0003-Add-disable-opengl-configure-option.patch \ - file://link_fribidi.patch \ - " -SRC_URI[md5sum] = "95afed6c860d27de827db66434d681da" -SRC_URI[sha256sum] = "aeea6ae7cd35e83dfc7699be716519faefca346c62e784dd1a37d9df94c08f52" - -S = "${WORKDIR}/gtk+-${PV}" - -LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ - file://gtk/gtk.h;endline=25;md5=1d8dc0fccdbfa26287a271dce88af737 \ - file://gdk/gdk.h;endline=25;md5=c920ce39dc88c6f06d3e7c50e08086f2 \ - file://tests/testgtk.c;endline=25;md5=cb732daee1d82af7a2bf953cf3cf26f1" diff --git a/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.22.bb b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.22.bb new file mode 100644 index 000000000..6af2bd94c --- /dev/null +++ b/poky/meta/recipes-gnome/gtk+/gtk+3_3.24.22.bb @@ -0,0 +1,18 @@ +require gtk+3.inc + +MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" + +SRC_URI = "http://ftp.gnome.org/pub/gnome/sources/gtk+/${MAJ_VER}/gtk+-${PV}.tar.xz \ + file://0001-Hardcoded-libtool.patch \ + file://0002-Do-not-try-to-initialize-GL-without-libGL.patch \ + file://0003-Add-disable-opengl-configure-option.patch \ + file://link_fribidi.patch \ + " +SRC_URI[sha256sum] = "bf18a4a5dff28a7b02aaef1b949c2d09c96c18387eddab152bb4cd55a5b67dda" + +S = "${WORKDIR}/gtk+-${PV}" + +LIC_FILES_CHKSUM = "file://COPYING;md5=5f30f0716dfdd0d91eb439ebec522ec2 \ + file://gtk/gtk.h;endline=25;md5=1d8dc0fccdbfa26287a271dce88af737 \ + file://gdk/gdk.h;endline=25;md5=c920ce39dc88c6f06d3e7c50e08086f2 \ + file://tests/testgtk.c;endline=25;md5=cb732daee1d82af7a2bf953cf3cf26f1" diff --git a/poky/meta/recipes-gnome/json-glib/json-glib/0001-scanner-use-macro-instead-of-cast-to-convert-pointer.patch b/poky/meta/recipes-gnome/json-glib/json-glib/0001-scanner-use-macro-instead-of-cast-to-convert-pointer.patch new file mode 100644 index 000000000..2a834b674 --- /dev/null +++ b/poky/meta/recipes-gnome/json-glib/json-glib/0001-scanner-use-macro-instead-of-cast-to-convert-pointer.patch @@ -0,0 +1,33 @@ +From d60fcd5bd5c2675e4342775b910a2ea48ec0eccb Mon Sep 17 00:00:00 2001 +From: Dimitry Andric +Date: Wed, 19 Aug 2020 03:35:16 +0000 +Subject: [PATCH] scanner: use macro instead of cast to convert pointer to integer + +Clang 11 build failed due to a new warning (part of -Werror=pointer-to-int-cast): +../json-glib/json-scanner.c:928:13: error: cast to smaller integer type 'GTokenType' from 'gpointer' (aka 'void *') [-Werror,-Wvoid-pointer-to-enum-cast] + *token_p = (GTokenType) value_p->v_symbol; + ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upstream-Status: Backport [https://gitlab.gnome.org/GNOME/json-glib/-/commit/8c5fabe962b7337066dac7a697d23fce257a5d64] +Signed-off-by: Jan Beich +Signed-off-by: Khem Raj +--- + json-glib/json-scanner.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/json-glib/json-scanner.c b/json-glib/json-scanner.c +index 0c9919f..59dd29c 100644 +--- a/json-glib/json-scanner.c ++++ b/json-glib/json-scanner.c +@@ -925,7 +925,7 @@ json_scanner_get_token_i (JsonScanner *scanner, + + case G_TOKEN_SYMBOL: + if (scanner->config->symbol_2_token) +- *token_p = (GTokenType) value_p->v_symbol; ++ *token_p = GPOINTER_TO_INT (value_p->v_symbol); + break; + + case G_TOKEN_BINARY: +-- +2.28.0 + diff --git a/poky/meta/recipes-gnome/json-glib/json-glib_1.4.4.bb b/poky/meta/recipes-gnome/json-glib/json-glib_1.4.4.bb index 5143d73ed..add9ff41a 100644 --- a/poky/meta/recipes-gnome/json-glib/json-glib_1.4.4.bb +++ b/poky/meta/recipes-gnome/json-glib/json-glib_1.4.4.bb @@ -12,9 +12,11 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=7fbc338309ac38fefcd64b04bb903e34" DEPENDS = "glib-2.0" GNOMEBASEBUILDCLASS = "meson" -inherit gnomebase lib_package gobject-introspection gtk-doc gettext ptest-gnome manpages +inherit gnomebase lib_package gobject-introspection gtk-doc gettext ptest-gnome manpages upstream-version-is-even -SRC_URI += "file://run-ptest" +SRC_URI += "file://run-ptest \ + file://0001-scanner-use-macro-instead-of-cast-to-convert-pointer.patch \ +" SRC_URI[archive.md5sum] = "4d4bb9837f6d31e32d0ce658ae135f68" SRC_URI[archive.sha256sum] = "720c5f4379513dc11fd97dc75336eb0c0d3338c53128044d9fabec4374f4bc47" diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Do-not-disable-introspection-in-cross-builds.patch b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Do-not-disable-introspection-in-cross-builds.patch new file mode 100644 index 000000000..d00656fc8 --- /dev/null +++ b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-Do-not-disable-introspection-in-cross-builds.patch @@ -0,0 +1,27 @@ +From df5848f423ec0f4b3e519900014349a0c00b1969 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Mon, 17 Aug 2020 16:36:03 +0200 +Subject: [PATCH] Do not disable introspection in cross builds. + +Yocto can and does support introspection cross builds, through +running target binaries under qemu emulation. + +Upstream-Status: Pending +Signed-off-by: Alexander Kanavin +--- + src/meson.build | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/meson.build b/src/meson.build +index 1865633..5e1787c 100644 +--- a/src/meson.build ++++ b/src/meson.build +@@ -634,7 +634,7 @@ if have_gobject + gir = find_program('g-ir-scanner', required: get_option('introspection')) + build_gir = gir.found() + +- build_gir = build_gir and not meson.is_cross_build() ++ build_gir = build_gir + if not build_gir and get_option('introspection').enabled() + error('Introspection support is requested but it isn\'t available in cross builds') + endif diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-src-hb-gobject-enums.cc.tmpl-write-out-only-the-file.patch b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-src-hb-gobject-enums.cc.tmpl-write-out-only-the-file.patch new file mode 100644 index 000000000..d9fccfac0 --- /dev/null +++ b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/0001-src-hb-gobject-enums.cc.tmpl-write-out-only-the-file.patch @@ -0,0 +1,28 @@ +From f316b794265f28e89821dfab0772caac1bb6b056 Mon Sep 17 00:00:00 2001 +From: Alexander Kanavin +Date: Tue, 18 Aug 2020 10:31:47 +0000 +Subject: [PATCH] src/hb-gobject-enums.cc.tmpl: write out only the filename, + not the full path + +This is beneficial for reproducible builds, as build paths can vary +between builds. + +Upstream-Status: Backport [https://github.com/harfbuzz/harfbuzz/commit/e876886d1e877ad90b6f02badefd6ebee1bc0b09] +Signed-off-by: Alexander Kanavin +--- + src/hb-gobject-enums.cc.tmpl | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/hb-gobject-enums.cc.tmpl b/src/hb-gobject-enums.cc.tmpl +index 2ffd1c9..87a11dd 100644 +--- a/src/hb-gobject-enums.cc.tmpl ++++ b/src/hb-gobject-enums.cc.tmpl +@@ -43,7 +43,7 @@ + /*** END file-header ***/ + + /*** BEGIN file-production ***/ +-/* enumerations from "@filename@" */ ++/* enumerations from "@basename@" */ + /*** END file-production ***/ + + /*** BEGIN file-tail ***/ diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz/version-race.patch b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/version-race.patch new file mode 100644 index 000000000..2d692f36b --- /dev/null +++ b/poky/meta/recipes-graphics/harfbuzz/harfbuzz/version-race.patch @@ -0,0 +1,121 @@ +Upstream-Status: Backport [https://github.com/harfbuzz/harfbuzz/commit/5aff83104e03d6d2617987d24a51e490ab7a5cd1] +Signed-off-by: Ross Burton + +From bc1c93fbe04459a4b12c76c713ba1b750d2d9108 Mon Sep 17 00:00:00 2001 +From: Ross Burton +Date: Mon, 7 Sep 2020 17:11:17 +0100 +Subject: [PATCH 1/2] [build] No need to pass source directory to + gen-hb-version + +The input file is by definition in the source directory, so dirname() +that instead of needing the directory to be passed. + +Needed because a follow-up commit will change when this is called, and the +source directory isn't trivially available at that point. +--- + src/gen-hb-version.py | 6 +++--- + src/meson.build | 2 +- + 2 files changed, 4 insertions(+), 4 deletions(-) + +diff --git a/src/gen-hb-version.py b/src/gen-hb-version.py +index 15e56b93..bf16f88a 100755 +--- a/src/gen-hb-version.py ++++ b/src/gen-hb-version.py +@@ -4,15 +4,15 @@ + + import os, sys, shutil + +-if len (sys.argv) < 5: ++if len (sys.argv) < 4: + sys.exit(__doc__) + + version = sys.argv[1] + major, minor, micro = version.split (".") + + OUTPUT = sys.argv[2] +-CURRENT_SOURCE_DIR = sys.argv[3] +-INPUT = sys.argv[4] ++INPUT = sys.argv[3] ++CURRENT_SOURCE_DIR = os.path.dirname(INPUT) + + with open (INPUT, "r", encoding='utf-8') as template: + with open (OUTPUT, "wb") as output: +diff --git a/src/meson.build b/src/meson.build +index 5d7cd578..2d78c992 100644 +--- a/src/meson.build ++++ b/src/meson.build +@@ -286,7 +286,7 @@ custom_target('hb-version.h', + input: 'hb-version.h.in', + output: 'hb-version.h', + command: [find_program('gen-hb-version.py'), meson.project_version(), +- '@OUTPUT@', '@CURRENT_SOURCE_DIR@', '@INPUT@'], ++ '@OUTPUT@', '@INPUT@'], + ) + + ragel = find_program('ragel', required: false) +-- +2.28.0 + + +From 5aff83104e03d6d2617987d24a51e490ab7a5cd1 Mon Sep 17 00:00:00 2001 +From: Ross Burton +Date: Mon, 7 Sep 2020 10:55:33 +0100 +Subject: [PATCH 2/2] [build] generate hb-version.h once at configure time with + Meson + +Currently with Meson hb-version.h is generated during the build without +any explicit dependencies which can result in build failures due races +over the file. + +Change this to be generated at configure time, so that the file is always +generated once before the build itself. + +Closes #2667 +--- + src/meson.build | 17 ++++++++--------- + 1 file changed, 8 insertions(+), 9 deletions(-) + +diff --git a/src/meson.build b/src/meson.build +index 2d78c992..19290245 100644 +--- a/src/meson.build ++++ b/src/meson.build +@@ -1,3 +1,10 @@ ++hb_version_h = configure_file( ++ command: [find_program('gen-hb-version.py'), meson.project_version(), '@OUTPUT@', '@INPUT@'], ++ input: 'hb-version.h.in', ++ output: 'hb-version.h', ++ install: true, ++ install_dir: join_paths(get_option('includedir'), meson.project_name())) ++ + # Base and default-included sources and headers + hb_base_sources = files( + 'hb-aat-layout-ankr-table.hh', +@@ -214,9 +221,9 @@ hb_base_headers = files( + 'hb-shape.h', + 'hb-style.h', + 'hb-unicode.h', +- 'hb-version.h', + 'hb.h', + ) ++hb_base_headers += hb_version_h + + # Optional Sources and Headers with external deps + +@@ -281,14 +288,6 @@ hb_gobject_headers = files( + 'hb-gobject-structs.h', + ) + +-custom_target('hb-version.h', +- build_by_default: true, +- input: 'hb-version.h.in', +- output: 'hb-version.h', +- command: [find_program('gen-hb-version.py'), meson.project_version(), +- '@OUTPUT@', '@INPUT@'], +-) +- + ragel = find_program('ragel', required: false) + if not ragel.found() + warning('You have to install ragel if you are going to develop HarfBuzz itself') +-- +2.28.0 + diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb deleted file mode 100644 index 08c8f8323..000000000 --- a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.1.bb +++ /dev/null @@ -1,43 +0,0 @@ -SUMMARY = "Text shaping library" -DESCRIPTION = "HarfBuzz is an OpenType text shaping engine." -HOMEPAGE = "http://www.freedesktop.org/wiki/Software/HarfBuzz" -BUGTRACKER = "https://bugs.freedesktop.org/enter_bug.cgi?product=HarfBuzz" -SECTION = "libs" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=8f787620b7d3866d9552fd1924c07572 \ - file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc" - -UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" -UPSTREAM_CHECK_REGEX = "harfbuzz-(?P\d+(\.\d+)+).tar" - -SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "e95ee43b6bd0d3d1307e2aacf0f9c0050e5baceb21988b367b833028114aa569" - -inherit autotools pkgconfig lib_package gtk-doc - -PACKAGECONFIG ??= "cairo fontconfig freetype glib icu" -PACKAGECONFIG[cairo] = "--with-cairo,--without-cairo,cairo" -PACKAGECONFIG[fontconfig] = "--with-fontconfig,--without-fontconfig,fontconfig" -PACKAGECONFIG[freetype] = "--with-freetype,--without-freetype,freetype" -PACKAGECONFIG[glib] = "--with-glib,--without-glib,glib-2.0" -PACKAGECONFIG[graphite] = "--with-graphite2,--without-graphite2,graphite2" -PACKAGECONFIG[icu] = "--with-icu,--without-icu,icu" - -PACKAGES =+ "${PN}-icu ${PN}-icu-dev ${PN}-subset" - -LEAD_SONAME = "libharfbuzz.so" - -do_install_append() { - # If no tools are installed due to PACKAGECONFIG then this directory is - #still installed, so remove it to stop packaging wanings. - rmdir --ignore-fail-on-non-empty ${D}${bindir} -} - -FILES_${PN}-icu = "${libdir}/libharfbuzz-icu.so.*" -FILES_${PN}-icu-dev = "${libdir}/libharfbuzz-icu.la \ - ${libdir}/libharfbuzz-icu.so \ - ${libdir}/pkgconfig/harfbuzz-icu.pc \ -" -FILES_${PN}-subset = "${libdir}/libharfbuzz-subset.so.*" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.2.bb b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.2.bb new file mode 100644 index 000000000..f95273f68 --- /dev/null +++ b/poky/meta/recipes-graphics/harfbuzz/harfbuzz_2.7.2.bb @@ -0,0 +1,52 @@ +SUMMARY = "Text shaping library" +DESCRIPTION = "HarfBuzz is an OpenType text shaping engine." +HOMEPAGE = "http://www.freedesktop.org/wiki/Software/HarfBuzz" +BUGTRACKER = "https://bugs.freedesktop.org/enter_bug.cgi?product=HarfBuzz" +SECTION = "libs" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=8f787620b7d3866d9552fd1924c07572 \ + file://src/hb-ucd.cc;beginline=1;endline=15;md5=29d4dcb6410429195df67efe3382d8bc" + +UPSTREAM_CHECK_URI = "https://github.com/${BPN}/${BPN}/releases" +UPSTREAM_CHECK_REGEX = "harfbuzz-(?P\d+(\.\d+)+).tar" + +SRC_URI = "https://github.com/${BPN}/${BPN}/releases/download/${PV}/${BPN}-${PV}.tar.xz \ + file://0001-Do-not-disable-introspection-in-cross-builds.patch \ + file://0001-src-hb-gobject-enums.cc.tmpl-write-out-only-the-file.patch \ + file://version-race.patch \ + " +SRC_URI[sha256sum] = "b8c048d7c2964a12f2c80deb6634dfc836b603dd12bf0d0a3df1627698e220ce" + +inherit meson pkgconfig lib_package gtk-doc gobject-introspection + +GIR_MESON_ENABLE_FLAG = 'enabled' +GIR_MESON_DISABLE_FLAG = 'disabled' +GTKDOC_MESON_ENABLE_FLAG = 'enabled' +GTKDOC_MESON_DISABLE_FLAG = 'disabled' + +PACKAGECONFIG ??= "cairo fontconfig freetype glib icu" +PACKAGECONFIG[cairo] = "-Dcairo=enabled,-Dcairo=disabled,cairo" +PACKAGECONFIG[fontconfig] = "-Dfontconfig=enabled,-Dfontconfig=disabled,fontconfig" +PACKAGECONFIG[freetype] = "-Dfreetype=enabled,-Dfreetype=disabled,freetype" +PACKAGECONFIG[glib] = "-Dglib=enabled,-Dglib=disabled,glib-2.0" +PACKAGECONFIG[graphite] = "-Dgraphite=enabled,-Dgraphite=disabled,graphite2" +PACKAGECONFIG[icu] = "-Dicu=enabled,-Dicu=disabled,icu" + +PACKAGES =+ "${PN}-icu ${PN}-icu-dev ${PN}-subset" + +LEAD_SONAME = "libharfbuzz.so" + +do_install_append() { + # If no tools are installed due to PACKAGECONFIG then this directory is + #still installed, so remove it to stop packaging wanings. + rmdir --ignore-fail-on-non-empty ${D}${bindir} +} + +FILES_${PN}-icu = "${libdir}/libharfbuzz-icu.so.*" +FILES_${PN}-icu-dev = "${libdir}/libharfbuzz-icu.la \ + ${libdir}/libharfbuzz-icu.so \ + ${libdir}/pkgconfig/harfbuzz-icu.pc \ +" +FILES_${PN}-subset = "${libdir}/libharfbuzz-subset.so.*" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/images/core-image-weston.bb b/poky/meta/recipes-graphics/images/core-image-weston.bb index f5102e198..fa7e9ef42 100644 --- a/poky/meta/recipes-graphics/images/core-image-weston.bb +++ b/poky/meta/recipes-graphics/images/core-image-weston.bb @@ -10,3 +10,5 @@ REQUIRED_DISTRO_FEATURES = "wayland" CORE_IMAGE_BASE_INSTALL += "weston weston-init weston-examples gtk+3-demo clutter-1.0-examples" CORE_IMAGE_BASE_INSTALL += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'weston-xwayland matchbox-terminal', '', d)}" + +QB_MEM = "-m 512" diff --git a/poky/meta/recipes-graphics/libva/libva-utils_2.8.0.bb b/poky/meta/recipes-graphics/libva/libva-utils_2.8.0.bb index fbc5a0583..78e380533 100644 --- a/poky/meta/recipes-graphics/libva/libva-utils_2.8.0.bb +++ b/poky/meta/recipes-graphics/libva/libva-utils_2.8.0.bb @@ -18,7 +18,7 @@ SRC_URI = "git://github.com/intel/libva-utils.git;branch=v2.8-branch" SRCREV = "af101a46d24c3f71a12b9f1ddb0c63626b19b3d8" S = "${WORKDIR}/git" -UPSTREAM_CHECK_GITTAGREGEX = "(?P(\d+(\.\d+)+))" +UPSTREAM_CHECK_GITTAGREGEX = "(?P(\d+(\.\d+)+))$" DEPENDS = "libva" diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb deleted file mode 100644 index e50782be1..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.4.bb +++ /dev/null @@ -1,15 +0,0 @@ -require mesa.inc - -SUMMARY += " (OpenGL only, no EGL/GLES)" - -PROVIDES = "virtual/libgl virtual/mesa" - -S = "${WORKDIR}/mesa-${PV}" - -# At least one DRI rendering engine is required to build mesa. -# When no X11 is available, use osmesa for the rendering engine. -PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" -PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" - -# When NOT using X11, we need to make sure we have swrast available. -DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.6.bb b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.6.bb new file mode 100644 index 000000000..e50782be1 --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa-gl_20.1.6.bb @@ -0,0 +1,15 @@ +require mesa.inc + +SUMMARY += " (OpenGL only, no EGL/GLES)" + +PROVIDES = "virtual/libgl virtual/mesa" + +S = "${WORKDIR}/mesa-${PV}" + +# At least one DRI rendering engine is required to build mesa. +# When no X11 is available, use osmesa for the rendering engine. +PACKAGECONFIG ??= "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" +PACKAGECONFIG_class-target = "opengl dri ${@bb.utils.contains('DISTRO_FEATURES', 'x11', 'x11', 'osmesa', d)}" + +# When NOT using X11, we need to make sure we have swrast available. +DRIDRIVERS_append = "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '', ',swrast', d)}" diff --git a/poky/meta/recipes-graphics/mesa/mesa.inc b/poky/meta/recipes-graphics/mesa/mesa.inc index 8d8e5288a..af2a5c0f9 100644 --- a/poky/meta/recipes-graphics/mesa/mesa.inc +++ b/poky/meta/recipes-graphics/mesa/mesa.inc @@ -23,7 +23,7 @@ SRC_URI = "https://mesa.freedesktop.org/archive/mesa-${PV}.tar.xz \ file://0001-meson-misdetects-64bit-atomics-on-mips-clang.patch \ " -SRC_URI[sha256sum] = "6800271c2be2a0447510eb4e9b67edd9521859a4d565310617c4b359eb6799fe" +SRC_URI[sha256sum] = "23bed40114b03ad640c95bfe72cc879ed2f941d0d481b77b5204a1fc567fa93c" UPSTREAM_CHECK_GITTAGREGEX = "mesa-(?P\d+(\.\d+)+)" diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb b/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb deleted file mode 100644 index 96e8aa38d..000000000 --- a/poky/meta/recipes-graphics/mesa/mesa_20.1.4.bb +++ /dev/null @@ -1,2 +0,0 @@ -require ${BPN}.inc - diff --git a/poky/meta/recipes-graphics/mesa/mesa_20.1.6.bb b/poky/meta/recipes-graphics/mesa/mesa_20.1.6.bb new file mode 100644 index 000000000..96e8aa38d --- /dev/null +++ b/poky/meta/recipes-graphics/mesa/mesa_20.1.6.bb @@ -0,0 +1,2 @@ +require ${BPN}.inc + diff --git a/poky/meta/recipes-graphics/pango/pango/0001-Fix-build-reproducibility.patch b/poky/meta/recipes-graphics/pango/pango/0001-Fix-build-reproducibility.patch deleted file mode 100644 index 03abf8763..000000000 --- a/poky/meta/recipes-graphics/pango/pango/0001-Fix-build-reproducibility.patch +++ /dev/null @@ -1,31 +0,0 @@ -From f8b32901981a06a8db4169b82a704dcf7e8b6560 Mon Sep 17 00:00:00 2001 -From: Joshua Watt -Date: Wed, 20 Nov 2019 15:43:57 -0600 -Subject: [PATCH] Fix build reproducibility - -Changes the comment in pango-enum-types.c to reference the file basename -instead of the full path. This ensures that the generated file is -reproducible when it is included in source packages meant for debugging. - -Upstream-Status: Pending [https://gitlab.gnome.org/GNOME/pango/merge_requests/159] -Signed-off-by: Joshua Watt ---- - pango/pango-enum-types.c.template | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/pango/pango-enum-types.c.template b/pango/pango-enum-types.c.template -index d922c691..0d55ef74 100644 ---- a/pango/pango-enum-types.c.template -+++ b/pango/pango-enum-types.c.template -@@ -6,7 +6,7 @@ - /*** END file-header ***/ - - /*** BEGIN file-production ***/ --/* enumerations from "@filename@" */ -+/* enumerations from "@basename@" */ - /*** END file-production ***/ - - /*** BEGIN value-header ***/ --- -2.23.0 - diff --git a/poky/meta/recipes-graphics/pango/pango_1.44.7.bb b/poky/meta/recipes-graphics/pango/pango_1.44.7.bb deleted file mode 100644 index 0ee8abcc0..000000000 --- a/poky/meta/recipes-graphics/pango/pango_1.44.7.bb +++ /dev/null @@ -1,49 +0,0 @@ -SUMMARY = "Framework for layout and rendering of internationalized text" -DESCRIPTION = "Pango is a library for laying out and rendering of text, \ -with an emphasis on internationalization. Pango can be used anywhere \ -that text layout is needed, though most of the work on Pango so far has \ -been done in the context of the GTK+ widget toolkit. Pango forms the \ -core of text and font handling for GTK+-2.x." -HOMEPAGE = "http://www.pango.org/" -BUGTRACKER = "http://bugzilla.gnome.org" -SECTION = "libs" -LICENSE = "LGPLv2.0+" - -LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7" - -GNOMEBASEBUILDCLASS = "meson" - -inherit gnomebase gtk-doc ptest-gnome upstream-version-is-even gobject-introspection - -SRC_URI += "file://run-ptest \ - file://0001-Fix-build-reproducibility.patch" -SRC_URI[archive.md5sum] = "c75cc5b833d934d98e83343832e20e5d" -SRC_URI[archive.sha256sum] = "66a5b6cc13db73efed67b8e933584509f8ddb7b10a8a40c3850ca4a985ea1b1f" - -DEPENDS = "glib-2.0 glib-2.0-native fontconfig freetype virtual/libiconv cairo harfbuzz fribidi" - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)} \ - ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}" - -PACKAGECONFIG[x11] = ",,virtual/libx11 libxft" -PACKAGECONFIG[tests] = "-Dinstall-tests=true, -Dinstall-tests=false" -PACKAGECONFIG[thai] = ",,libthai" - -GTKDOC_MESON_OPTION = "gtk_doc" -GIR_MESON_OPTION = 'introspection' - -do_configure_prepend_toolchain-clang() { - sed -i -e "/Werror=implicit-fallthrough/d" ${S}/meson.build -} - -LEAD_SONAME = "libpango-1.0*" - -FILES_${PN} = "${bindir}/* ${libdir}/libpango*${SOLIBS}" - -RDEPENDS_${PN}-ptest += "cantarell-fonts" -RDEPENDS_${PN}-ptest_append_libc-glibc = " locale-base-en-us" - -RPROVIDES_${PN} += "pango-modules pango-module-indic-lang \ - pango-module-basic-fc pango-module-arabic-lang" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/pango/pango_1.46.1.bb b/poky/meta/recipes-graphics/pango/pango_1.46.1.bb new file mode 100644 index 000000000..cc6cc1e35 --- /dev/null +++ b/poky/meta/recipes-graphics/pango/pango_1.46.1.bb @@ -0,0 +1,47 @@ +SUMMARY = "Framework for layout and rendering of internationalized text" +DESCRIPTION = "Pango is a library for laying out and rendering of text, \ +with an emphasis on internationalization. Pango can be used anywhere \ +that text layout is needed, though most of the work on Pango so far has \ +been done in the context of the GTK+ widget toolkit. Pango forms the \ +core of text and font handling for GTK+-2.x." +HOMEPAGE = "http://www.pango.org/" +BUGTRACKER = "http://bugzilla.gnome.org" +SECTION = "libs" +LICENSE = "LGPLv2.0+" + +LIC_FILES_CHKSUM = "file://COPYING;md5=3bf50002aefd002f49e7bb854063f7e7" + +GNOMEBASEBUILDCLASS = "meson" + +inherit gnomebase gtk-doc ptest-gnome upstream-version-is-even gobject-introspection + +SRC_URI += " file://run-ptest" +SRC_URI[archive.sha256sum] = "fe516b10711bbb6fd75011d66dd08fabfce18f7931aed7415136d53c4aadf1c5" + +DEPENDS = "glib-2.0 glib-2.0-native fontconfig freetype virtual/libiconv cairo harfbuzz fribidi" + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)} \ + ${@bb.utils.contains('PTEST_ENABLED', '1', 'tests', '', d)}" + +PACKAGECONFIG[x11] = ",,virtual/libx11 libxft" +PACKAGECONFIG[tests] = "-Dinstall-tests=true, -Dinstall-tests=false" +PACKAGECONFIG[thai] = ",,libthai" + +GTKDOC_MESON_OPTION = "gtk_doc" +GIR_MESON_OPTION = 'introspection' + +do_configure_prepend_toolchain-clang() { + sed -i -e "/Werror=implicit-fallthrough/d" ${S}/meson.build +} + +LEAD_SONAME = "libpango-1.0*" + +FILES_${PN} = "${bindir}/* ${libdir}/libpango*${SOLIBS}" + +RDEPENDS_${PN}-ptest += "cantarell-fonts" +RDEPENDS_${PN}-ptest_append_libc-glibc = " locale-base-en-us" + +RPROVIDES_${PN} += "pango-modules pango-module-indic-lang \ + pango-module-basic-fc pango-module-arabic-lang" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/piglit/piglit_git.bb b/poky/meta/recipes-graphics/piglit/piglit_git.bb index e531ffc40..d35117a4f 100644 --- a/poky/meta/recipes-graphics/piglit/piglit_git.bb +++ b/poky/meta/recipes-graphics/piglit/piglit_git.bb @@ -10,7 +10,7 @@ SRC_URI = "git://gitlab.freedesktop.org/mesa/piglit.git;protocol=https \ " UPSTREAM_CHECK_COMMITS = "1" -SRCREV = "7d76fca56e8ca72ab6809f6f0234b5971af5690a" +SRCREV = "2a6a8f954dcd3f748055f28e019d2eca64635332" # (when PV goes above 1.0 remove the trailing r) PV = "1.0+gitr${SRCPV}" diff --git a/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb b/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb deleted file mode 100644 index baf5c1d16..000000000 --- a/poky/meta/recipes-graphics/wayland/libinput_1.16.0.bb +++ /dev/null @@ -1,50 +0,0 @@ -SUMMARY = "Library to handle input devices in Wayland compositors" -DESCRIPTION = "libinput is a library to handle input devices in Wayland \ -compositors and to provide a generic X.Org input driver. It provides \ -device detection, device handling, input device event processing and \ -abstraction so minimize the amount of custom input code compositors need to \ -provide the common set of functionality that users expect." -HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libinput/" -SECTION = "libs" - -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=1f2ea9ebff3a2c6d458faf58492efb63" - -DEPENDS = "libevdev udev mtdev libcheck" - -SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \ - file://run-ptest \ - file://determinism.patch \ - " -SRC_URI[md5sum] = "b518dae7f603040872739216971ee97b" -SRC_URI[sha256sum] = "83f6d0c94e5e0dd87094ce73f0edb631919617d24a60ee0ab9bd9197411d76e8" - -UPSTREAM_CHECK_REGEX = "libinput-(?P\d+\.\d+\.(?!9\d+)\d+)" - -inherit meson pkgconfig lib_package ptest - -# Patch out build directory, otherwise it leaks into ptest binary -do_configure_append() { - sed -i -e "s,${WORKDIR},,g" config.h - if [ -e "litest-config.h" ]; then - sed -i -e "s,${WORKDIR},,g" litest-config.h - fi -} - -PACKAGECONFIG ??= "" -PACKAGECONFIG[libwacom] = "-Dlibwacom=true,-Dlibwacom=false,libwacom" -PACKAGECONFIG[gui] = "-Ddebug-gui=true,-Ddebug-gui=false,cairo gtk+3" - -UDEVDIR = "`pkg-config --variable=udevdir udev`" - -EXTRA_OEMESON += "-Dudev-dir=${UDEVDIR} \ - -Ddocumentation=false \ - ${@bb.utils.contains('PTEST_ENABLED', '1', '-Dtests=true -Dinstall-tests=true', '-Dtests=false -Dinstall-tests=false', d)} \ - -Dzshcompletiondir=no" - -# package name changed in 1.8.1 upgrade: make sure package upgrades work -RPROVIDES_${PN} = "libinput" -RREPLACES_${PN} = "libinput" -RCONFLICTS_${PN} = "libinput" - -FILES_${PN}-ptest += "${libexecdir}/libinput/libinput-test-suite" diff --git a/poky/meta/recipes-graphics/wayland/libinput_1.16.1.bb b/poky/meta/recipes-graphics/wayland/libinput_1.16.1.bb new file mode 100644 index 000000000..149760918 --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/libinput_1.16.1.bb @@ -0,0 +1,49 @@ +SUMMARY = "Library to handle input devices in Wayland compositors" +DESCRIPTION = "libinput is a library to handle input devices in Wayland \ +compositors and to provide a generic X.Org input driver. It provides \ +device detection, device handling, input device event processing and \ +abstraction so minimize the amount of custom input code compositors need to \ +provide the common set of functionality that users expect." +HOMEPAGE = "http://www.freedesktop.org/wiki/Software/libinput/" +SECTION = "libs" + +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=1f2ea9ebff3a2c6d458faf58492efb63" + +DEPENDS = "libevdev udev mtdev libcheck" + +SRC_URI = "http://www.freedesktop.org/software/${BPN}/${BP}.tar.xz \ + file://run-ptest \ + file://determinism.patch \ + " +SRC_URI[sha256sum] = "7ba7d1aeedd15168bb21d17e9e628aa1c27957963a423a3fea3938a501758539" + +UPSTREAM_CHECK_REGEX = "libinput-(?P\d+\.\d+\.(?!9\d+)\d+)" + +inherit meson pkgconfig lib_package ptest + +# Patch out build directory, otherwise it leaks into ptest binary +do_configure_append() { + sed -i -e "s,${WORKDIR},,g" config.h + if [ -e "litest-config.h" ]; then + sed -i -e "s,${WORKDIR},,g" litest-config.h + fi +} + +PACKAGECONFIG ??= "" +PACKAGECONFIG[libwacom] = "-Dlibwacom=true,-Dlibwacom=false,libwacom" +PACKAGECONFIG[gui] = "-Ddebug-gui=true,-Ddebug-gui=false,cairo gtk+3" + +UDEVDIR = "`pkg-config --variable=udevdir udev`" + +EXTRA_OEMESON += "-Dudev-dir=${UDEVDIR} \ + -Ddocumentation=false \ + ${@bb.utils.contains('PTEST_ENABLED', '1', '-Dtests=true -Dinstall-tests=true', '-Dtests=false -Dinstall-tests=false', d)} \ + -Dzshcompletiondir=no" + +# package name changed in 1.8.1 upgrade: make sure package upgrades work +RPROVIDES_${PN} = "libinput" +RREPLACES_${PN} = "libinput" +RCONFLICTS_${PN} = "libinput" + +FILES_${PN}-ptest += "${libexecdir}/libinput/libinput-test-suite" diff --git a/poky/meta/recipes-graphics/wayland/weston-init.bb b/poky/meta/recipes-graphics/wayland/weston-init.bb index 40aa76295..07cec75fb 100644 --- a/poky/meta/recipes-graphics/wayland/weston-init.bb +++ b/poky/meta/recipes-graphics/wayland/weston-init.bb @@ -8,11 +8,19 @@ SRC_URI = "file://init \ file://weston.env \ file://weston.ini \ file://weston@.service \ + file://weston@.socket \ file://71-weston-drm.rules \ + file://weston-autologin \ file://weston-start" S = "${WORKDIR}" +DEFAULTBACKEND ??= "" +DEFAULTBACKEND_qemuall ?= "fbdev" +DEFAULTBACKEND_qemuarm64 = "drm" +DEFAULTBACKEND_qemux86 = "drm" +DEFAULTBACKEND_qemux86-64 = "drm" + do_install() { install -Dm755 ${WORKDIR}/init ${D}/${sysconfdir}/init.d/weston install -D -p -m0644 ${WORKDIR}/weston.ini ${D}${sysconfdir}/xdg/weston/weston.ini @@ -20,6 +28,10 @@ do_install() { # Install Weston systemd service and accompanying udev rule install -D -p -m0644 ${WORKDIR}/weston@.service ${D}${systemd_system_unitdir}/weston@.service + install -D -p -m0644 ${WORKDIR}/weston@.socket ${D}${systemd_system_unitdir}/weston@.socket + if [ "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" ]; then + install -D -p -m0644 ${WORKDIR}/weston-autologin ${D}${sysconfdir}/pam.d/weston-autologin + fi sed -i -e s:/etc:${sysconfdir}:g \ -e s:/usr/bin:${bindir}:g \ -e s:/var:${localstatedir}:g \ @@ -30,14 +42,9 @@ do_install() { install -Dm755 ${WORKDIR}/weston-start ${D}${bindir}/weston-start sed -i 's,@DATADIR@,${datadir},g' ${D}${bindir}/weston-start sed -i 's,@LOCALSTATEDIR@,${localstatedir},g' ${D}${bindir}/weston-start -} - -do_install_append_libc-musl_qemux86() { - echo "WESTON_DISABLE_ATOMIC=Y" >> ${D}${sysconfdir}/default/weston -} - -do_install_append_libc-musl_qemux86-64() { - echo "WESTON_DISABLE_ATOMIC=Y" >> ${D}${sysconfdir}/default/weston + if [ -n "${DEFAULTBACKEND}" ]; then + sed -i -e "/^\[core\]/a backend=${DEFAULTBACKEND}-backend.so" ${D}${sysconfdir}/xdg/weston/weston.ini + fi } inherit update-rc.d features_check systemd @@ -50,7 +57,7 @@ RDEPENDS_${PN} = "weston kbd" INITSCRIPT_NAME = "weston" INITSCRIPT_PARAMS = "start 9 5 2 . stop 20 0 1 6 ." -FILES_${PN} += "${sysconfdir}/xdg/weston/weston.ini ${systemd_system_unitdir}/weston@.service ${sysconfdir}/default/weston" +FILES_${PN} += "${sysconfdir}/xdg/weston/weston.ini ${systemd_system_unitdir}/weston@.service ${systemd_system_unitdir}/weston@.socket ${sysconfdir}/default/weston ${sysconfdir}/pam.d/" CONFFILES_${PN} += "${sysconfdir}/xdg/weston/weston.ini ${sysconfdir}/default/weston" diff --git a/poky/meta/recipes-graphics/wayland/weston-init/qemuall/weston.ini b/poky/meta/recipes-graphics/wayland/weston-init/qemuall/weston.ini deleted file mode 100644 index 17ebd7fda..000000000 --- a/poky/meta/recipes-graphics/wayland/weston-init/qemuall/weston.ini +++ /dev/null @@ -1,2 +0,0 @@ -[core] -backend=fbdev-backend.so diff --git a/poky/meta/recipes-graphics/wayland/weston-init/qemux86-64/weston.ini b/poky/meta/recipes-graphics/wayland/weston-init/qemux86-64/weston.ini deleted file mode 100644 index e69de29bb..000000000 diff --git a/poky/meta/recipes-graphics/wayland/weston-init/qemux86/weston.ini b/poky/meta/recipes-graphics/wayland/weston-init/qemux86/weston.ini deleted file mode 100644 index e69de29bb..000000000 diff --git a/poky/meta/recipes-graphics/wayland/weston-init/weston-autologin b/poky/meta/recipes-graphics/wayland/weston-init/weston-autologin new file mode 100644 index 000000000..f6e6d106d --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/weston-init/weston-autologin @@ -0,0 +1,11 @@ +auth required pam_nologin.so +auth required pam_unix.so try_first_pass nullok + +account required pam_nologin.so +account required pam_unix.so + +session required pam_env.so +session required pam_unix.so +-session optional pam_systemd.so type=wayland class=user desktop=weston +-session optional pam_loginuid.so + diff --git a/poky/meta/recipes-graphics/wayland/weston-init/weston.ini b/poky/meta/recipes-graphics/wayland/weston-init/weston.ini index 1e6dff68f..b48726d59 100644 --- a/poky/meta/recipes-graphics/wayland/weston-init/weston.ini +++ b/poky/meta/recipes-graphics/wayland/weston-init/weston.ini @@ -68,8 +68,8 @@ require-input=false #min_accel_factor = 0.16 #max_accel_factor = 1.0 -#[screen-share] -#command=/usr/bin/weston --backend=rdp-backend.so --shell=fullscreen-shell.so --no-clients-resize +[screen-share] +command=/usr/bin/weston --backend=rdp-backend.so --shell=fullscreen-shell.so --no-clients-resize #[xwayland] #path=/usr/bin/Xwayland diff --git a/poky/meta/recipes-graphics/wayland/weston-init/weston@.service b/poky/meta/recipes-graphics/wayland/weston-init/weston@.service index 39e193014..0a1df15bd 100644 --- a/poky/meta/recipes-graphics/wayland/weston-init/weston@.service +++ b/poky/meta/recipes-graphics/wayland/weston-init/weston@.service @@ -1,15 +1,64 @@ +# This is a system unit for launching Weston with auto-login as the +# user configured here. +# +# Weston must be built with systemd support, and your weston.ini must load +# the plugin systemd-notify.so. [Unit] -Description=Weston Wayland Compositor -RequiresMountsFor=/run -Conflicts=plymouth-quit.service -After=systemd-user-sessions.service plymouth-quit-wait.service +Description=Weston, a Wayland compositor, as a system service +Documentation=man:weston(1) man:weston.ini(5) +Documentation=http://wayland.freedesktop.org/ + +# Make sure we are started after logins are permitted. +After=systemd-user-sessions.service + +# If Plymouth is used, we want to start when it is on its way out. +After=plymouth-quit-wait.service + +# D-Bus is necessary for contacting logind. Logind is required. +Wants=dbus.socket +After=dbus.socket + +# Since we are part of the graphical session, make sure we are started before +# it is complete. +Before=graphical.target + +# Prevent starting on systems without virtual consoles, Weston requires one +# for now. +ConditionPathExists=/dev/tty0 [Service] -User=%i -PAMName=login -EnvironmentFile=-/etc/default/weston +# Requires systemd-notify.so Weston plugin. +Type=notify +ExecStart=/usr/bin/weston --modules=systemd-notify.so + +# Optional watchdog setup +TimeoutStartSec=60 +WatchdogSec=20 + +# The user to run Weston as. +User=%I + +# Make sure working directory is users home directory +WorkingDirectory=/home/%i + +# Set up a full user session for the user, required by Weston. +PAMName=weston-autologin + +# A virtual terminal is needed. +TTYPath=/dev/tty7 +TTYReset=yes +TTYVHangup=yes +TTYVTDisallocate=yes + +# Fail to start if not controlling the tty. +StandardInput=tty-fail +StandardOutput=journal StandardError=journal -PermissionsStartOnly=true -IgnoreSIGPIPE=no -ExecStart=/usr/bin/weston-start -v -e -- $OPTARGS +# Log this user with utmp, letting it show up with commands 'w' and 'who'. +UtmpIdentifier=tty7 +UtmpMode=user + +[Install] +WantedBy=graphical.target +DefaultInstance=tty7 diff --git a/poky/meta/recipes-graphics/wayland/weston-init/weston@.socket b/poky/meta/recipes-graphics/wayland/weston-init/weston@.socket new file mode 100644 index 000000000..f1790d74a --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/weston-init/weston@.socket @@ -0,0 +1,10 @@ +[Unit] +Description=Weston Wayland socket +After=user-runtime-dir@1000.service + +[Socket] +ListenStream=/run/user/1000/wayland-%I + +[Install] +WantedBy=sockets.target + diff --git a/poky/meta/recipes-graphics/wayland/weston/0001-tests-include-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch b/poky/meta/recipes-graphics/wayland/weston/0001-tests-include-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch new file mode 100644 index 000000000..6fe86ff3f --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/weston/0001-tests-include-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch @@ -0,0 +1,47 @@ +From 58760e09eed662a72da939ff4802d605489cff8e Mon Sep 17 00:00:00 2001 +From: Denys Dmytriyenko +Date: Tue, 8 Sep 2020 19:37:42 -0400 +Subject: [PATCH] tests: include fcntl.h for open(), O_RDWR, O_CLOEXEC and + O_CREAT + +musl libc (unlike glibc) requires explicitly incuding fcntl.h to define open(), +O_RDWR, O_CLOEXEC and O_CREAT. Otherwise the build fails with the errors: + +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c: In function 'wait_for_lock': +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c:135:7: warning: implicit declaration of function 'open'; did you mean 'popen'? [-Wimplicit-function-declaration] +| 135 | fd = open(lock_path, O_RDWR | O_CLOEXEC | O_CREAT, 00700); +| | ^~~~ +| | popen +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c:135:23: error: 'O_RDWR' undeclared (first use in this function) +| 135 | fd = open(lock_path, O_RDWR | O_CLOEXEC | O_CREAT, 00700); +| | ^~~~~~ +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c:135:23: note: each undeclared identifier is reported only once for each function it appears in +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c:135:32: error: 'O_CLOEXEC' undeclared (first use in this function) +| 135 | fd = open(lock_path, O_RDWR | O_CLOEXEC | O_CREAT, 00700); +| | ^~~~~~~~~ +| ../weston-9.0.0/tests/weston-test-fixture-compositor.c:135:44: error: 'O_CREAT' undeclared (first use in this function) +| 135 | fd = open(lock_path, O_RDWR | O_CLOEXEC | O_CREAT, 00700); +| | ^~~~~~~ + +Upstream-Status: Submitted [https://gitlab.freedesktop.org/wayland/weston/-/merge_requests/493/diffs?commit_id=b10c0e843dcb8148bbe869bb15261955b94ac98c] + +Signed-off-by: Denys Dmytriyenko +--- + tests/weston-test-fixture-compositor.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/tests/weston-test-fixture-compositor.c b/tests/weston-test-fixture-compositor.c +index 0c9855f..e0e32c9 100644 +--- a/tests/weston-test-fixture-compositor.c ++++ b/tests/weston-test-fixture-compositor.c +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + + #include "shared/helpers.h" + #include "weston-test-fixture-compositor.h" +-- +2.7.4 + diff --git a/poky/meta/recipes-graphics/wayland/weston/0001-weston-launch-Provide-a-default-version-that-doesn-t.patch b/poky/meta/recipes-graphics/wayland/weston/0001-weston-launch-Provide-a-default-version-that-doesn-t.patch index 62b864c13..3279a728c 100644 --- a/poky/meta/recipes-graphics/wayland/weston/0001-weston-launch-Provide-a-default-version-that-doesn-t.patch +++ b/poky/meta/recipes-graphics/wayland/weston/0001-weston-launch-Provide-a-default-version-that-doesn-t.patch @@ -1,7 +1,8 @@ -From 5f2d71998eb77068cbaee2d468cbb296a42d5739 Mon Sep 17 00:00:00 2001 +From a1548c742bf2dedbb47282d8a00407b60bbab669 Mon Sep 17 00:00:00 2001 From: Tom Hochstein Date: Wed, 22 Feb 2017 15:53:30 +0200 Subject: [PATCH] weston-launch: Provide a default version that doesn't require + PAM weston-launch requires PAM for starting weston as a non-root user. @@ -57,7 +58,7 @@ index 08d23ec..cb9fd3f 100644 install: true ) diff --git a/libweston/weston-launch.c b/libweston/weston-launch.c -index 8a711b4..54c567a 100644 +index 521cb2c..2d42d33 100644 --- a/libweston/weston-launch.c +++ b/libweston/weston-launch.c @@ -51,7 +51,9 @@ @@ -97,7 +98,7 @@ index 8a711b4..54c567a 100644 static int setup_launcher_socket(struct weston_launch *wl) -@@ -431,6 +437,7 @@ quit(struct weston_launch *wl, int status) +@@ -466,6 +472,7 @@ quit(struct weston_launch *wl, int status) close(wl->signalfd); close(wl->sock[0]); @@ -105,15 +106,15 @@ index 8a711b4..54c567a 100644 if (wl->new_user) { err = pam_close_session(wl->ph, 0); if (err) -@@ -438,6 +445,7 @@ quit(struct weston_launch *wl, int status) +@@ -473,6 +480,7 @@ quit(struct weston_launch *wl, int status) err, pam_strerror(wl->ph, err)); pam_end(wl->ph, err); } +#endif - if (ioctl(wl->tty, KDSKBMUTE, 0) && - ioctl(wl->tty, KDSKBMODE, wl->kb_mode)) -@@ -666,6 +674,7 @@ setup_session(struct weston_launch *wl, char **child_argv) + /* + * Get a fresh handle to the tty as the previous one is in +@@ -710,6 +718,7 @@ setup_session(struct weston_launch *wl, char **child_argv) setenv("HOME", wl->pw->pw_dir, 1); setenv("SHELL", wl->pw->pw_shell, 1); @@ -121,7 +122,7 @@ index 8a711b4..54c567a 100644 env = pam_getenvlist(wl->ph); if (env) { for (i = 0; env[i]; ++i) { -@@ -674,6 +683,7 @@ setup_session(struct weston_launch *wl, char **child_argv) +@@ -718,6 +727,7 @@ setup_session(struct weston_launch *wl, char **child_argv) } free(env); } @@ -129,7 +130,7 @@ index 8a711b4..54c567a 100644 /* * We open a new session, so it makes sense -@@ -745,8 +755,10 @@ static void +@@ -789,8 +799,10 @@ static void help(const char *name) { fprintf(stderr, "Usage: %s [args...] [-- [weston args..]]\n", name); @@ -140,7 +141,7 @@ index 8a711b4..54c567a 100644 fprintf(stderr, " -t, --tty Start session on alternative tty,\n" " e.g. -t /dev/tty4, requires -u option.\n"); fprintf(stderr, " -v, --verbose Be verbose\n"); -@@ -760,7 +772,9 @@ main(int argc, char *argv[]) +@@ -804,7 +816,9 @@ main(int argc, char *argv[]) int i, c; char *tty = NULL; struct option opts[] = { @@ -150,7 +151,7 @@ index 8a711b4..54c567a 100644 { "tty", required_argument, NULL, 't' }, { "verbose", no_argument, NULL, 'v' }, { "help", no_argument, NULL, 'h' }, -@@ -772,11 +786,16 @@ main(int argc, char *argv[]) +@@ -816,11 +830,16 @@ main(int argc, char *argv[]) while ((c = getopt_long(argc, argv, "u:t:vh", opts, &i)) != -1) { switch (c) { case 'u': @@ -167,7 +168,7 @@ index 8a711b4..54c567a 100644 break; case 't': tty = optarg; -@@ -828,8 +847,10 @@ main(int argc, char *argv[]) +@@ -872,8 +891,10 @@ main(int argc, char *argv[]) if (setup_tty(&wl, tty) < 0) exit(EXIT_FAILURE); @@ -179,7 +180,7 @@ index 8a711b4..54c567a 100644 if (setup_launcher_socket(&wl) < 0) exit(EXIT_FAILURE); diff --git a/meson_options.txt b/meson_options.txt -index c862ecc..73ef2c3 100644 +index 239bd2d..99e4ec3 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -73,6 +73,13 @@ option( diff --git a/poky/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch b/poky/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch new file mode 100644 index 000000000..a4444e5d1 --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/weston/dont-use-plane-add-prop.patch @@ -0,0 +1,23 @@ +Fix atomic modesetting with musl + +atomic modesetting seems to fail with drm weston backend and this patch fixes +it, below errors are seen before weston exits + +atomic: couldn't commit new state: Invalid argument + +Upstream-Status: Submitted [https://gitlab.freedesktop.org/wayland/weston/-/issues/158] +Signed-off-by: Khem Raj + +--- a/libweston/backend-drm/kms.c ++++ b/libweston/backend-drm/kms.c +@@ -1168,8 +1168,8 @@ drm_pending_state_apply_atomic(struct dr + wl_list_for_each(plane, &b->plane_list, link) { + drm_debug(b, "\t\t[atomic] starting with plane %lu disabled\n", + (unsigned long) plane->plane_id); +- plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0); +- plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0); ++ //plane_add_prop(req, plane, WDRM_PLANE_CRTC_ID, 0); ++ //plane_add_prop(req, plane, WDRM_PLANE_FB_ID, 0); + } + + flags |= DRM_MODE_ATOMIC_ALLOW_MODESET; diff --git a/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb b/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb deleted file mode 100644 index 8fef86482..000000000 --- a/poky/meta/recipes-graphics/wayland/weston_8.0.0.bb +++ /dev/null @@ -1,128 +0,0 @@ -SUMMARY = "Weston, a Wayland compositor" -DESCRIPTION = "Weston is the reference implementation of a Wayland compositor" -HOMEPAGE = "http://wayland.freedesktop.org" -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=d79ee9e66bb0f95d3386a7acae780b70 \ - file://libweston/compositor.c;endline=27;md5=6c53bbbd99273f4f7c4affa855c33c0a" - -SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \ - file://weston.png \ - file://weston.desktop \ - file://xwayland.weston-start \ - file://0001-weston-launch-Provide-a-default-version-that-doesn-t.patch \ -" -SRC_URI[md5sum] = "53e4810d852df0601d01fd986a5b22b3" -SRC_URI[sha256sum] = "7518b49b2eaa1c3091f24671bdcc124fd49fc8f1af51161927afa4329c027848" - -UPSTREAM_CHECK_URI = "https://wayland.freedesktop.org/releases.html" - -inherit meson pkgconfig useradd features_check -# depends on virtual/egl -REQUIRED_DISTRO_FEATURES = "opengl" - -DEPENDS = "libxkbcommon gdk-pixbuf pixman cairo glib-2.0" -DEPENDS += "wayland wayland-protocols libinput virtual/egl pango wayland-native" - -WESTON_MAJOR_VERSION = "${@'.'.join(d.getVar('PV').split('.')[0:1])}" - -EXTRA_OEMESON += "-Dbackend-default=auto -Dbackend-rdp=false -Dpipewire=false" - -PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'kms fbdev wayland egl clients', '', d)} \ - ${@bb.utils.contains('DISTRO_FEATURES', 'x11 wayland', 'xwayland', '', d)} \ - ${@bb.utils.filter('DISTRO_FEATURES', 'pam systemd x11', d)} \ - ${@bb.utils.contains_any('DISTRO_FEATURES', 'wayland x11', '', 'headless', d)} \ - launch \ - image-jpeg \ - screenshare \ - shell-desktop \ - shell-fullscreen \ - shell-ivi" - -# -# Compositor choices -# -# Weston on KMS -PACKAGECONFIG[kms] = "-Dbackend-drm=true,-Dbackend-drm=false,drm udev virtual/egl virtual/libgles2 virtual/libgbm mtdev" -# Weston on Wayland (nested Weston) -PACKAGECONFIG[wayland] = "-Dbackend-wayland=true,-Dbackend-wayland=false,virtual/egl virtual/libgles2" -# Weston on X11 -PACKAGECONFIG[x11] = "-Dbackend-x11=true,-Dbackend-x11=false,virtual/libx11 libxcb libxcb libxcursor cairo" -# Headless Weston -PACKAGECONFIG[headless] = "-Dbackend-headless=true,-Dbackend-headless=false" -# Weston on framebuffer -PACKAGECONFIG[fbdev] = "-Dbackend-fbdev=true,-Dbackend-fbdev=false,udev mtdev" -# weston-launch -PACKAGECONFIG[launch] = "-Dweston-launch=true,-Dweston-launch=false,drm" -# VA-API desktop recorder -PACKAGECONFIG[vaapi] = "-Dbackend-drm-screencast-vaapi=true,-Dbackend-drm-screencast-vaapi=false,libva" -# Weston with EGL support -PACKAGECONFIG[egl] = "-Drenderer-gl=true,-Drenderer-gl=false,virtual/egl" -# Weston with lcms support -PACKAGECONFIG[lcms] = "-Dcolor-management-lcms=true,-Dcolor-management-lcms=false,lcms" -# Weston with webp support -PACKAGECONFIG[webp] = "-Dimage-webp=true,-Dimage-webp=false,libwebp" -# Weston with systemd-login support -PACKAGECONFIG[systemd] = "-Dsystemd=true -Dlauncher-logind=true,-Dsystemd=false -Dlauncher-logind=false,systemd dbus" -# Weston with Xwayland support (requires X11 and Wayland) -PACKAGECONFIG[xwayland] = "-Dxwayland=true,-Dxwayland=false" -# colord CMS support -PACKAGECONFIG[colord] = "-Dcolor-management-colord=true,-Dcolor-management-colord=false,colord" -# Clients support -PACKAGECONFIG[clients] = "-Dsimple-clients=all -Ddemo-clients=true,-Dsimple-clients= -Ddemo-clients=false" -# Virtual remote output with GStreamer on DRM backend -PACKAGECONFIG[remoting] = "-Dremoting=true,-Dremoting=false,gstreamer-1.0" -# Weston with PAM support -PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam" -# Weston with screen-share support -PACKAGECONFIG[screenshare] = "-Dscreenshare=true,-Dscreenshare=false" -# Traditional desktop shell -PACKAGECONFIG[shell-desktop] = "-Dshell-desktop=true,-Dshell-desktop=false" -# Fullscreen shell -PACKAGECONFIG[shell-fullscreen] = "-Dshell-fullscreen=true,-Dshell-fullscreen=false" -# In-Vehicle Infotainment (IVI) shell -PACKAGECONFIG[shell-ivi] = "-Dshell-ivi=true,-Dshell-ivi=false" -# JPEG image loading support -PACKAGECONFIG[image-jpeg] = "-Dimage-jpeg=true,-Dimage-jpeg=false, jpeg" - -do_install_append() { - # Weston doesn't need the .la files to load modules, so wipe them - rm -f ${D}/${libdir}/libweston-${WESTON_MAJOR_VERSION}/*.la - - # If X11, ship a desktop file to launch it - if [ "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)}" ]; then - install -d ${D}${datadir}/applications - install ${WORKDIR}/weston.desktop ${D}${datadir}/applications - - install -d ${D}${datadir}/icons/hicolor/48x48/apps - install ${WORKDIR}/weston.png ${D}${datadir}/icons/hicolor/48x48/apps - fi - - if [ "${@bb.utils.contains('PACKAGECONFIG', 'xwayland', 'yes', 'no', d)}" = "yes" ]; then - install -Dm 644 ${WORKDIR}/xwayland.weston-start ${D}${datadir}/weston-start/xwayland - fi - - if [ "${@bb.utils.contains('PACKAGECONFIG', 'launch', 'yes', 'no', d)}" = "yes" ]; then - chmod u+s ${D}${bindir}/weston-launch - fi -} - -PACKAGES += "${@bb.utils.contains('PACKAGECONFIG', 'xwayland', '${PN}-xwayland', '', d)} \ - libweston-${WESTON_MAJOR_VERSION} ${PN}-examples" - -FILES_${PN}-dev += "${libdir}/${BPN}/libexec_weston.so" -FILES_${PN} = "${bindir}/weston ${bindir}/weston-terminal ${bindir}/weston-info ${bindir}/weston-launch ${bindir}/wcap-decode ${libexecdir} ${libdir}/${BPN}/*.so* ${datadir}" - -FILES_libweston-${WESTON_MAJOR_VERSION} = "${libdir}/lib*${SOLIBS} ${libdir}/libweston-${WESTON_MAJOR_VERSION}/*.so" -SUMMARY_libweston-${WESTON_MAJOR_VERSION} = "Helper library for implementing 'wayland window managers'." - -FILES_${PN}-examples = "${bindir}/*" - -FILES_${PN}-xwayland = "${libdir}/libweston-${WESTON_MAJOR_VERSION}/xwayland.so" -RDEPENDS_${PN}-xwayland += "xserver-xorg-xwayland" - -RDEPENDS_${PN} += "xkeyboard-config" -RRECOMMENDS_${PN} = "weston-init liberation-fonts" -RRECOMMENDS_${PN}-dev += "wayland-protocols" - -USERADD_PACKAGES = "${PN}" -GROUPADD_PARAM_${PN} = "--system weston-launch" diff --git a/poky/meta/recipes-graphics/wayland/weston_9.0.0.bb b/poky/meta/recipes-graphics/wayland/weston_9.0.0.bb new file mode 100644 index 000000000..0b037a377 --- /dev/null +++ b/poky/meta/recipes-graphics/wayland/weston_9.0.0.bb @@ -0,0 +1,131 @@ +SUMMARY = "Weston, a Wayland compositor" +DESCRIPTION = "Weston is the reference implementation of a Wayland compositor" +HOMEPAGE = "http://wayland.freedesktop.org" +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=d79ee9e66bb0f95d3386a7acae780b70 \ + file://libweston/compositor.c;endline=27;md5=6c53bbbd99273f4f7c4affa855c33c0a" + +SRC_URI = "https://wayland.freedesktop.org/releases/${BPN}-${PV}.tar.xz \ + file://weston.png \ + file://weston.desktop \ + file://xwayland.weston-start \ + file://0001-weston-launch-Provide-a-default-version-that-doesn-t.patch \ + file://0001-tests-include-fcntl.h-for-open-O_RDWR-O_CLOEXEC-and-.patch \ +" + +SRC_URI_append_libc-musl = " file://dont-use-plane-add-prop.patch " + +SRC_URI[sha256sum] = "5cf5d6ce192e0eb15c1fc861a436bf21b5bb3b91dbdabbdebe83e1f83aa098fe" + +UPSTREAM_CHECK_URI = "https://wayland.freedesktop.org/releases.html" + +inherit meson pkgconfig useradd features_check +# depends on virtual/egl +REQUIRED_DISTRO_FEATURES = "opengl" + +DEPENDS = "libxkbcommon gdk-pixbuf pixman cairo glib-2.0" +DEPENDS += "wayland wayland-protocols libinput virtual/egl pango wayland-native" + +WESTON_MAJOR_VERSION = "${@'.'.join(d.getVar('PV').split('.')[0:1])}" + +EXTRA_OEMESON += "-Dbackend-default=auto -Dbackend-rdp=false -Dpipewire=false" + +PACKAGECONFIG ??= "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', 'kms fbdev wayland egl clients', '', d)} \ + ${@bb.utils.contains('DISTRO_FEATURES', 'x11 wayland', 'xwayland', '', d)} \ + ${@bb.utils.filter('DISTRO_FEATURES', 'pam systemd x11', d)} \ + ${@bb.utils.contains_any('DISTRO_FEATURES', 'wayland x11', '', 'headless', d)} \ + launch \ + image-jpeg \ + screenshare \ + shell-desktop \ + shell-fullscreen \ + shell-ivi" + +# +# Compositor choices +# +# Weston on KMS +PACKAGECONFIG[kms] = "-Dbackend-drm=true,-Dbackend-drm=false,drm udev virtual/egl virtual/libgles2 virtual/libgbm mtdev" +# Weston on Wayland (nested Weston) +PACKAGECONFIG[wayland] = "-Dbackend-wayland=true,-Dbackend-wayland=false,virtual/egl virtual/libgles2" +# Weston on X11 +PACKAGECONFIG[x11] = "-Dbackend-x11=true,-Dbackend-x11=false,virtual/libx11 libxcb libxcb libxcursor cairo" +# Headless Weston +PACKAGECONFIG[headless] = "-Dbackend-headless=true,-Dbackend-headless=false" +# Weston on framebuffer +PACKAGECONFIG[fbdev] = "-Dbackend-fbdev=true,-Dbackend-fbdev=false,udev mtdev" +# weston-launch +PACKAGECONFIG[launch] = "-Dweston-launch=true,-Dweston-launch=false,drm" +# VA-API desktop recorder +PACKAGECONFIG[vaapi] = "-Dbackend-drm-screencast-vaapi=true,-Dbackend-drm-screencast-vaapi=false,libva" +# Weston with EGL support +PACKAGECONFIG[egl] = "-Drenderer-gl=true,-Drenderer-gl=false,virtual/egl" +# Weston with lcms support +PACKAGECONFIG[lcms] = "-Dcolor-management-lcms=true,-Dcolor-management-lcms=false,lcms" +# Weston with webp support +PACKAGECONFIG[webp] = "-Dimage-webp=true,-Dimage-webp=false,libwebp" +# Weston with systemd-login support +PACKAGECONFIG[systemd] = "-Dsystemd=true -Dlauncher-logind=true,-Dsystemd=false -Dlauncher-logind=false,systemd dbus" +# Weston with Xwayland support (requires X11 and Wayland) +PACKAGECONFIG[xwayland] = "-Dxwayland=true,-Dxwayland=false" +# colord CMS support +PACKAGECONFIG[colord] = "-Dcolor-management-colord=true,-Dcolor-management-colord=false,colord" +# Clients support +PACKAGECONFIG[clients] = "-Dsimple-clients=all -Ddemo-clients=true,-Dsimple-clients= -Ddemo-clients=false" +# Virtual remote output with GStreamer on DRM backend +PACKAGECONFIG[remoting] = "-Dremoting=true,-Dremoting=false,gstreamer-1.0" +# Weston with PAM support +PACKAGECONFIG[pam] = "-Dpam=true,-Dpam=false,libpam" +# Weston with screen-share support +PACKAGECONFIG[screenshare] = "-Dscreenshare=true,-Dscreenshare=false" +# Traditional desktop shell +PACKAGECONFIG[shell-desktop] = "-Dshell-desktop=true,-Dshell-desktop=false" +# Fullscreen shell +PACKAGECONFIG[shell-fullscreen] = "-Dshell-fullscreen=true,-Dshell-fullscreen=false" +# In-Vehicle Infotainment (IVI) shell +PACKAGECONFIG[shell-ivi] = "-Dshell-ivi=true,-Dshell-ivi=false" +# JPEG image loading support +PACKAGECONFIG[image-jpeg] = "-Dimage-jpeg=true,-Dimage-jpeg=false, jpeg" + +do_install_append() { + # Weston doesn't need the .la files to load modules, so wipe them + rm -f ${D}/${libdir}/libweston-${WESTON_MAJOR_VERSION}/*.la + + # If X11, ship a desktop file to launch it + if [ "${@bb.utils.filter('DISTRO_FEATURES', 'x11', d)}" ]; then + install -d ${D}${datadir}/applications + install ${WORKDIR}/weston.desktop ${D}${datadir}/applications + + install -d ${D}${datadir}/icons/hicolor/48x48/apps + install ${WORKDIR}/weston.png ${D}${datadir}/icons/hicolor/48x48/apps + fi + + if [ "${@bb.utils.contains('PACKAGECONFIG', 'xwayland', 'yes', 'no', d)}" = "yes" ]; then + install -Dm 644 ${WORKDIR}/xwayland.weston-start ${D}${datadir}/weston-start/xwayland + fi + + if [ "${@bb.utils.contains('PACKAGECONFIG', 'launch', 'yes', 'no', d)}" = "yes" ]; then + chmod u+s ${D}${bindir}/weston-launch + fi +} + +PACKAGES += "${@bb.utils.contains('PACKAGECONFIG', 'xwayland', '${PN}-xwayland', '', d)} \ + libweston-${WESTON_MAJOR_VERSION} ${PN}-examples" + +FILES_${PN}-dev += "${libdir}/${BPN}/libexec_weston.so" +FILES_${PN} = "${bindir}/weston ${bindir}/weston-terminal ${bindir}/weston-info ${bindir}/weston-launch ${bindir}/wcap-decode ${libexecdir} ${libdir}/${BPN}/*.so* ${datadir}" + +FILES_libweston-${WESTON_MAJOR_VERSION} = "${libdir}/lib*${SOLIBS} ${libdir}/libweston-${WESTON_MAJOR_VERSION}/*.so" +SUMMARY_libweston-${WESTON_MAJOR_VERSION} = "Helper library for implementing 'wayland window managers'." + +FILES_${PN}-examples = "${bindir}/*" + +FILES_${PN}-xwayland = "${libdir}/libweston-${WESTON_MAJOR_VERSION}/xwayland.so" +RDEPENDS_${PN}-xwayland += "xserver-xorg-xwayland" + +RDEPENDS_${PN} += "xkeyboard-config" +RRECOMMENDS_${PN} = "weston-init liberation-fonts" +RRECOMMENDS_${PN}-dev += "wayland-protocols" + +USERADD_PACKAGES = "${PN}" +GROUPADD_PARAM_${PN} = "--system weston-launch" diff --git a/poky/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb b/poky/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb index 4f831932e..d2a16643f 100644 --- a/poky/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb +++ b/poky/meta/recipes-graphics/xinput-calibrator/xinput-calibrator_git.bb @@ -11,8 +11,8 @@ inherit autotools pkgconfig features_check # depends on virtual/libx11 REQUIRED_DISTRO_FEATURES = "x11" -SRCREV = "03dadf55109bd43d3380f040debe9f82f66f2f35" -SRC_URI = "git://github.com/tias/xinput_calibrator.git \ +SRCREV = "18ec53f1cada39f905614ebfaffed5c7754ecf46" +SRC_URI = "git://github.com/kreijack/xinput_calibrator.git;branch=libinput \ file://30xinput_calibrate.sh \ file://Allow-xinput_calibrator_pointercal.sh-to-be-run-as-n.patch \ file://0001-calibrator.hh-Include-string-to-get-std-string.patch \ diff --git a/poky/meta/recipes-graphics/xorg-app/xev/diet-x11.patch b/poky/meta/recipes-graphics/xorg-app/xev/diet-x11.patch index 53c0ac2e6..361369b29 100644 --- a/poky/meta/recipes-graphics/xorg-app/xev/diet-x11.patch +++ b/poky/meta/recipes-graphics/xorg-app/xev/diet-x11.patch @@ -1,14 +1,19 @@ +From b9b2b8d1af283a13cdccea55562cf332de48dcb9 Mon Sep 17 00:00:00 2001 +From: Ross Burton +Date: Wed, 28 Mar 2007 16:10:50 +0000 +Subject: [PATCH] Add xev + Upstream-Status: Inappropriate [disable feature] --- - xev.c | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) + xev.c | 64 +---------------------------------------------------------- + 1 file changed, 1 insertion(+), 63 deletions(-) -Index: xev-1.2.3/xev.c -=================================================================== ---- xev-1.2.3.orig/xev.c -+++ xev-1.2.3/xev.c -@@ -125,17 +125,6 @@ do_KeyPress(XEvent *eventp) +diff --git a/xev.c b/xev.c +index ea69234..6d5eb30 100644 +--- a/xev.c ++++ b/xev.c +@@ -175,17 +175,6 @@ do_KeyPress(XEvent *eventp) nbytes = XLookupString(e, str, 256, &ks, NULL); /* not supposed to call XmbLookupString on a key release event */ @@ -26,24 +31,24 @@ Index: xev-1.2.3/xev.c if (ks == NoSymbol) ksname = "NoSymbol"; -@@ -168,16 +157,6 @@ do_KeyPress(XEvent *eventp) +@@ -220,16 +209,6 @@ do_KeyPress(XEvent *eventp) } /* not supposed to call XmbLookupString on a key release event */ - if (e->type == KeyPress && xic) { -- printf(" XmbLookupString gives %d bytes: ", nmbbytes); +- output(Indent, "XmbLookupString gives %d bytes: ", nmbbytes); - if (nmbbytes > 0) { - dump(buf, nmbbytes); -- printf(" \"%s\"\n", buf); +- output(NewLine, " \"%s\"", buf); - } - else { -- printf("\n"); +- output_new_line(); - } - } - printf(" XFilterEvent returns: %s\n", + output(Indent | NewLine, "XFilterEvent returns: %s", XFilterEvent(eventp, e->window) ? "True" : "False"); -@@ -1141,7 +1120,7 @@ parse_event_mask(const char *s, long eve +@@ -1211,7 +1190,7 @@ parse_event_mask(const char *s, long event_masks[]) if (s) return True; } @@ -52,7 +57,7 @@ Index: xev-1.2.3/xev.c if (s != NULL) fprintf(stderr, "%s: unrecognized event mask '%s'\n", ProgramName, s); -@@ -1288,37 +1267,6 @@ main(int argc, char **argv) +@@ -1361,37 +1340,6 @@ main(int argc, char **argv) fprintf(stderr, "%s: XSetLocaleModifiers failed\n", ProgramName); } @@ -90,7 +95,7 @@ Index: xev-1.2.3/xev.c screen = DefaultScreen(dpy); attr.event_mask = event_masks[EVENT_MASK_INDEX_CORE]; -@@ -1373,16 +1321,6 @@ main(int argc, char **argv) +@@ -1446,16 +1394,6 @@ main(int argc, char **argv) printf("Outer window is 0x%lx, inner window is 0x%lx\n", w, subw); } diff --git a/poky/meta/recipes-graphics/xorg-app/xev_1.2.3.bb b/poky/meta/recipes-graphics/xorg-app/xev_1.2.3.bb deleted file mode 100644 index 6a69e747a..000000000 --- a/poky/meta/recipes-graphics/xorg-app/xev_1.2.3.bb +++ /dev/null @@ -1,18 +0,0 @@ -require xorg-app-common.inc - -SUMMARY = "X Event Viewer" -DESCRIPTION = "Xev creates a window and then asks the X server to send it events \ -whenever anything happens to the window (such as it being moved, resized, \ -typed in, clicked in, etc.). You can also attach it to an existing window." - -LIC_FILES_CHKSUM = "file://xev.c;beginline=1;endline=33;md5=577c99421f1803b891d2c79097ae4682" -LICENSE = "MIT" - -PE = "1" - -DEPENDS += "libxrandr xorgproto" - -SRC_URI += "file://diet-x11.patch" - -SRC_URI[md5sum] = "eec82a5d4b599736f0fa637e96136746" -SRC_URI[sha256sum] = "66bc4f1cfa1946d62612737815c34164e4ce40fcebd2c9e1d7e7a1117ad3ad09" diff --git a/poky/meta/recipes-graphics/xorg-app/xev_1.2.4.bb b/poky/meta/recipes-graphics/xorg-app/xev_1.2.4.bb new file mode 100644 index 000000000..9407fa65f --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-app/xev_1.2.4.bb @@ -0,0 +1,17 @@ +require xorg-app-common.inc + +SUMMARY = "X Event Viewer" +DESCRIPTION = "Xev creates a window and then asks the X server to send it events \ +whenever anything happens to the window (such as it being moved, resized, \ +typed in, clicked in, etc.). You can also attach it to an existing window." + +LIC_FILES_CHKSUM = "file://xev.c;beginline=1;endline=33;md5=577c99421f1803b891d2c79097ae4682" +LICENSE = "MIT" + +PE = "1" + +DEPENDS += "libxrandr xorgproto" + +SRC_URI += "file://diet-x11.patch" + +SRC_URI[sha256sum] = "d700e08bfe751ed2dbf802baa204b056d0e49348b6eb3c6f9cb035d8ae4885e2" diff --git a/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.3/nocompiler.patch b/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.3/nocompiler.patch deleted file mode 100644 index 0b9fb8ccc..000000000 --- a/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.3/nocompiler.patch +++ /dev/null @@ -1,32 +0,0 @@ -Upstream-Status: Inappropriate [configuration] - -XORG_DEFAULT_OPTIONS pulls in the following dependency chains: - -XORG_CWARNFLAGS -> AC_PROG_CC_C99 -XORG_STRICT_OPTION -> AC_PROG_CC_C99, XORG_CWARNFLAGS -XORG_MANPAGE_SECTIONS -> AC_CANONICAL_HOST -> Checks host - -each of which triggers the use of the host compiler. As an "all" -architecture package, it shouldn't need a compiler (and doesn't). - -RP 17/5/2011 - -diff -uNr font-alias-1.0.3.orig//configure.ac font-alias-1.0.3/configure.ac ---- font-alias-1.0.3.orig//configure.ac 2011-05-18 21:29:18.378258643 +0200 -+++ font-alias-1.0.3/configure.ac 2011-05-18 21:32:06.865258593 +0200 -@@ -28,12 +28,12 @@ - AM_INIT_AUTOMAKE([foreign dist-bzip2]) - AM_MAINTAINER_MODE - --# Require xorg-macros: XORG_DEFAULT_OPTIONS - m4_ifndef([XORG_MACROS_VERSION], - [m4_fatal([must install xorg-macros 1.3 or later before running autoconf/autogen])]) - XORG_MACROS_VERSION(1.3) --XORG_DEFAULT_OPTIONS -- -+XORG_RELEASE_VERSION -+XORG_CHANGELOG -+XORG_INSTALL - AC_PROG_INSTALL - - XORG_FONTROOTDIR diff --git a/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.4/nocompiler.patch b/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.4/nocompiler.patch new file mode 100644 index 000000000..e54eee4ea --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-font/font-alias-1.0.4/nocompiler.patch @@ -0,0 +1,42 @@ +From fa2bbd48a55d54bd2dae30edf7936e3ab7587c96 Mon Sep 17 00:00:00 2001 +From: Richard Purdie +Date: Tue, 17 May 2011 23:03:02 +0000 +Subject: [PATCH] Improve handling of 'all' architecture recipes and their + +Upstream-Status: Inappropriate [configuration] + +XORG_DEFAULT_OPTIONS pulls in the following dependency chains: + +XORG_CWARNFLAGS -> AC_PROG_CC_C99 +XORG_STRICT_OPTION -> AC_PROG_CC_C99, XORG_CWARNFLAGS +XORG_MANPAGE_SECTIONS -> AC_CANONICAL_HOST -> Checks host + +each of which triggers the use of the host compiler. As an "all" +architecture package, it shouldn't need a compiler (and doesn't). + +RP 17/5/2011 + +--- + configure.ac | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/configure.ac b/configure.ac +index 3407c69..9fe1f89 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -27,12 +27,12 @@ AC_INIT([font-alias], [1.0.4], + [font-alias]) + AM_INIT_AUTOMAKE([foreign dist-bzip2]) + +-# Require xorg-macros: XORG_DEFAULT_OPTIONS + m4_ifndef([XORG_MACROS_VERSION], + [m4_fatal([must install xorg-macros 1.3 or later before running autoconf/autogen])]) + XORG_MACROS_VERSION(1.3) +-XORG_DEFAULT_OPTIONS +- ++XORG_RELEASE_VERSION ++XORG_CHANGELOG ++XORG_INSTALL + AC_PROG_INSTALL + + # Require X.Org's font util macros 1.2 or later diff --git a/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.3.bb b/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.3.bb deleted file mode 100644 index c5990a9e4..000000000 --- a/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.3.bb +++ /dev/null @@ -1,24 +0,0 @@ -SUMMARY = "X font aliases" - -require xorg-font-common.inc - -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://COPYING;md5=bf0158b89be493d523d69d9f29265038 \ - file://cyrillic/fonts.alias;md5=f40795b0640d6785826aecd3b16f6124 \ - file://75dpi/fonts.alias;md5=6bc48023f2ae7f3bfc105db7b0ee6b49 \ - file://misc/fonts.alias;md5=a8ec05d528431d4c9703b55a7efd67a8 \ - file://100dpi/fonts.alias;md5=85bebd6ca213aa656c301a72eb4397cb" - -SRC_URI += "file://nocompiler.patch" - -DEPENDS = "util-macros-native font-util-native" -RDEPENDS_${PN} = "encodings font-util" -RDEPENDS_${PN}_class-native = "font-util-native" - -inherit allarch - -PE = "1" -PR = "r3" - -SRC_URI[md5sum] = "6d25f64796fef34b53b439c2e9efa562" -SRC_URI[sha256sum] = "8b453b2aae1cfa8090009ca037037b8c5e333550651d5a158b7264ce1d472c9a" diff --git a/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.4.bb b/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.4.bb new file mode 100644 index 000000000..e4b70c69d --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-font/font-alias_1.0.4.bb @@ -0,0 +1,23 @@ +SUMMARY = "X font aliases" + +require xorg-font-common.inc + +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://COPYING;md5=bf0158b89be493d523d69d9f29265038 \ + file://cyrillic/fonts.alias;md5=d27bc65a2655cacdbc2644b51c064c20 \ + file://75dpi/fonts.alias;md5=6bc48023f2ae7f3bfc105db7b0ee6b49 \ + file://misc/fonts.alias;md5=1bdafa7c31aa54f87f3531f2ef8ed5a6 \ + file://100dpi/fonts.alias;md5=85bebd6ca213aa656c301a72eb4397cb \ + " + +SRC_URI += "file://nocompiler.patch" + +DEPENDS = "util-macros-native font-util-native" +RDEPENDS_${PN} = "encodings font-util" +RDEPENDS_${PN}_class-native = "font-util-native" + +inherit allarch + +PE = "1" + +SRC_URI[sha256sum] = "f3111ae8bf2e980f5f56af400e8eefe5fc9f4207f4a412ea79637fd66c945276" diff --git a/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.12.bb b/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.12.bb new file mode 100644 index 000000000..de7f1c366 --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.12.bb @@ -0,0 +1,45 @@ +SUMMARY = "Xlib: C Language X Interface library" + +DESCRIPTION = "This package provides a client interface to the X Window \ +System, otherwise known as 'Xlib'. It provides a complete API for the \ +basic functions of the window system." + +require xorg-lib-common.inc + +FILESEXTRAPATHS =. "${FILE_DIRNAME}/libx11:" + +PE = "1" + +SRC_URI += "file://Fix-hanging-issue-in-_XReply.patch \ + file://disable_tests.patch \ + file://libx11-whitespace.patch" + +SRC_URI[sha256sum] = "f108227469419ac04d196df0f3b80ce1f7f65059bb54c0de811f4d8e03fd6ec7" + +PROVIDES = "virtual/libx11" + +XORG_PN = "libX11" +LICENSE = "MIT & MIT-style & BSD" +LIC_FILES_CHKSUM = "file://COPYING;md5=172255dee66bb0151435b2d5d709fcf7" + +DEPENDS += "xorgproto xtrans libxcb" +DEPENDS += "xorgproto-native" + +EXTRA_OECONF += "--with-keysymdefdir=${STAGING_INCDIR}/X11/ --disable-xf86bigfont" +EXTRA_OEMAKE += 'CWARNFLAGS=""' + +PACKAGECONFIG ??= "xcms" +PACKAGECONFIG[xcms] = "--enable-xcms,--disable-xcms" + +# src/util/makekeys is built natively but needs -D_GNU_SOURCE defined. +CPPFLAGS_FOR_BUILD += "-D_GNU_SOURCE" + +PACKAGES =+ "${PN}-xcb" + +inherit gettext + +FILES_${PN} += "${datadir}/X11/XKeysymDB ${datadir}/X11/XErrorDB ${datadir}/X11/Xcms.txt" +FILES_${PN}-xcb += "${libdir}/libX11-xcb.so.*" +FILES_${PN}-locale += "${datadir}/X11/locale ${libdir}/X11/locale" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb b/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb deleted file mode 100644 index ff60a4240..000000000 --- a/poky/meta/recipes-graphics/xorg-lib/libx11_1.6.9.bb +++ /dev/null @@ -1,46 +0,0 @@ -SUMMARY = "Xlib: C Language X Interface library" - -DESCRIPTION = "This package provides a client interface to the X Window \ -System, otherwise known as 'Xlib'. It provides a complete API for the \ -basic functions of the window system." - -require xorg-lib-common.inc - -FILESEXTRAPATHS =. "${FILE_DIRNAME}/libx11:" - -PE = "1" - -SRC_URI += "file://Fix-hanging-issue-in-_XReply.patch \ - file://disable_tests.patch \ - file://libx11-whitespace.patch" - -SRC_URI[md5sum] = "55adbfb6d4370ecac5e70598c4e7eed2" -SRC_URI[sha256sum] = "9cc7e8d000d6193fa5af580d50d689380b8287052270f5bb26a5fb6b58b2bed1" - -PROVIDES = "virtual/libx11" - -XORG_PN = "libX11" -LICENSE = "MIT & MIT-style & BSD" -LIC_FILES_CHKSUM = "file://COPYING;md5=172255dee66bb0151435b2d5d709fcf7" - -DEPENDS += "xorgproto xtrans libxcb" -DEPENDS += "xorgproto-native" - -EXTRA_OECONF += "--with-keysymdefdir=${STAGING_INCDIR}/X11/ --disable-xf86bigfont" -EXTRA_OEMAKE += 'CWARNFLAGS=""' - -PACKAGECONFIG ??= "xcms" -PACKAGECONFIG[xcms] = "--enable-xcms,--disable-xcms" - -# src/util/makekeys is built natively but needs -D_GNU_SOURCE defined. -CPPFLAGS_FOR_BUILD += "-D_GNU_SOURCE" - -PACKAGES =+ "${PN}-xcb" - -inherit gettext - -FILES_${PN} += "${datadir}/X11/XKeysymDB ${datadir}/X11/XErrorDB ${datadir}/X11/Xcms.txt" -FILES_${PN}-xcb += "${libdir}/libX11-xcb.so.*" -FILES_${PN}-locale += "${datadir}/X11/locale ${libdir}/X11/locale" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Fix-segfault-on-probing-a-non-PCI-platform-device-on.patch b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Fix-segfault-on-probing-a-non-PCI-platform-device-on.patch new file mode 100644 index 000000000..7b9e3b4ca --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg/0001-Fix-segfault-on-probing-a-non-PCI-platform-device-on.patch @@ -0,0 +1,34 @@ +From e50c85f4ebf559a3bac4817b41074c43d4691779 Mon Sep 17 00:00:00 2001 +From: Eric Anholt +Date: Fri, 26 Oct 2018 17:47:30 -0700 +Subject: [PATCH] Fix segfault on probing a non-PCI platform device on a system + with PCI. + +Some Broadcom set-top-box boards have PCI busses, but the GPU is still +probed through DT. We would dereference a null busid here in that +case. + +Signed-off-by: Eric Anholt + +Upstream-status: Backport [https://github.com/freedesktop/xorg-xserver/commit/e50c85f4e] +Signed-off-by: Aníbal Limón +--- + hw/xfree86/common/xf86platformBus.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/hw/xfree86/common/xf86platformBus.c b/hw/xfree86/common/xf86platformBus.c +index cef47da03..dadbac6c8 100644 +--- a/hw/xfree86/common/xf86platformBus.c ++++ b/hw/xfree86/common/xf86platformBus.c +@@ -289,7 +289,7 @@ xf86platformProbe(void) + for (i = 0; i < xf86_num_platform_devices; i++) { + char *busid = xf86_platform_odev_attributes(i)->busid; + +- if (pci && (strncmp(busid, "pci:", 4) == 0)) { ++ if (pci && busid && (strncmp(busid, "pci:", 4) == 0)) { + platform_find_pci_info(&xf86_platform_devices[i], busid); + } + +-- +2.28.0 + diff --git a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb deleted file mode 100644 index 8c19692de..000000000 --- a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.8.bb +++ /dev/null @@ -1,33 +0,0 @@ -require xserver-xorg.inc - -SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.patch \ - file://pkgconfig.patch \ - file://0001-test-xtest-Initialize-array-with-braces.patch \ - file://sdksyms-no-build-path.patch \ - file://0001-drmmode_display.c-add-missing-mi.h-include.patch \ - file://0001-Avoid-duplicate-definitions-of-IOPortBase.patch \ - " -SRC_URI[md5sum] = "a770aec600116444a953ff632f51f839" -SRC_URI[sha256sum] = "d17b646bee4ba0fb7850c1cc55b18e3e8513ed5c02bdf38da7e107f84e2d0146" - -# These extensions are now integrated into the server, so declare the migration -# path for in-place upgrades. - -RREPLACES_${PN} = "${PN}-extension-dri \ - ${PN}-extension-dri2 \ - ${PN}-extension-record \ - ${PN}-extension-extmod \ - ${PN}-extension-dbe \ - " -RPROVIDES_${PN} = "${PN}-extension-dri \ - ${PN}-extension-dri2 \ - ${PN}-extension-record \ - ${PN}-extension-extmod \ - ${PN}-extension-dbe \ - " -RCONFLICTS_${PN} = "${PN}-extension-dri \ - ${PN}-extension-dri2 \ - ${PN}-extension-record \ - ${PN}-extension-extmod \ - ${PN}-extension-dbe \ - " diff --git a/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.9.bb b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.9.bb new file mode 100644 index 000000000..c83e43ed9 --- /dev/null +++ b/poky/meta/recipes-graphics/xorg-xserver/xserver-xorg_1.20.9.bb @@ -0,0 +1,33 @@ +require xserver-xorg.inc + +SRC_URI += "file://0001-xf86pciBus.c-use-Intel-ddx-only-for-pre-gen4-hardwar.patch \ + file://pkgconfig.patch \ + file://0001-test-xtest-Initialize-array-with-braces.patch \ + file://sdksyms-no-build-path.patch \ + file://0001-drmmode_display.c-add-missing-mi.h-include.patch \ + file://0001-Avoid-duplicate-definitions-of-IOPortBase.patch \ + file://0001-Fix-segfault-on-probing-a-non-PCI-platform-device-on.patch \ + " +SRC_URI[sha256sum] = "e219f2e0dfe455467939149d7cd2ee53b79b512cc1d2094ae4f5c9ed9ccd3571" + +# These extensions are now integrated into the server, so declare the migration +# path for in-place upgrades. + +RREPLACES_${PN} = "${PN}-extension-dri \ + ${PN}-extension-dri2 \ + ${PN}-extension-record \ + ${PN}-extension-extmod \ + ${PN}-extension-dbe \ + " +RPROVIDES_${PN} = "${PN}-extension-dri \ + ${PN}-extension-dri2 \ + ${PN}-extension-record \ + ${PN}-extension-extmod \ + ${PN}-extension-dbe \ + " +RCONFLICTS_${PN} = "${PN}-extension-dri \ + ${PN}-extension-dri2 \ + ${PN}-extension-record \ + ${PN}-extension-extmod \ + ${PN}-extension-dbe \ + " diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.10.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.10.bb deleted file mode 100644 index c55577c66..000000000 --- a/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.10.bb +++ /dev/null @@ -1,13 +0,0 @@ -require cryptodev.inc - -SUMMARY = "A /dev/crypto device driver header file" - -do_compile[noexec] = "1" - -# Just install cryptodev.h which is the only header file needed to be exported -do_install() { - install -D ${S}/crypto/cryptodev.h ${D}${includedir}/crypto/cryptodev.h -} - -ALLOW_EMPTY_${PN} = "1" -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.11.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.11.bb new file mode 100644 index 000000000..c55577c66 --- /dev/null +++ b/poky/meta/recipes-kernel/cryptodev/cryptodev-linux_1.11.bb @@ -0,0 +1,13 @@ +require cryptodev.inc + +SUMMARY = "A /dev/crypto device driver header file" + +do_compile[noexec] = "1" + +# Just install cryptodev.h which is the only header file needed to be exported +do_install() { + install -D ${S}/crypto/cryptodev.h ${D}${includedir}/crypto/cryptodev.h +} + +ALLOW_EMPTY_${PN} = "1" +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb deleted file mode 100644 index 6474599c4..000000000 --- a/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.10.bb +++ /dev/null @@ -1,18 +0,0 @@ -require cryptodev.inc - -SUMMARY = "A /dev/crypto device driver kernel module" - -inherit module - -# Header file provided by a separate package -DEPENDS += "cryptodev-linux" - -SRC_URI += " \ -file://0001-Disable-installing-header-file-provided-by-another-p.patch \ -file://0001-Fix-build-for-Linux-5.8-rc1.patch \ -" - -EXTRA_OEMAKE='KERNEL_DIR="${STAGING_KERNEL_DIR}" PREFIX="${D}"' - -RCONFLICTS_${PN} = "ocf-linux" -RREPLACES_${PN} = "ocf-linux" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.11.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.11.bb new file mode 100644 index 000000000..b3b554c7d --- /dev/null +++ b/poky/meta/recipes-kernel/cryptodev/cryptodev-module_1.11.bb @@ -0,0 +1,16 @@ +require cryptodev.inc + +SUMMARY = "A /dev/crypto device driver kernel module" + +inherit module + +# Header file provided by a separate package +DEPENDS += "cryptodev-linux" + +SRC_URI += "file://0001-Disable-installing-header-file-provided-by-another-p.patch \ + " + +EXTRA_OEMAKE='KERNEL_DIR="${STAGING_KERNEL_DIR}" PREFIX="${D}"' + +RCONFLICTS_${PN} = "ocf-linux" +RREPLACES_${PN} = "ocf-linux" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.10.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.10.bb deleted file mode 100644 index 9cb5dcb94..000000000 --- a/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.10.bb +++ /dev/null @@ -1,21 +0,0 @@ -require cryptodev.inc - -SUMMARY = "A test suite for /dev/crypto device driver" - -DEPENDS += "openssl" - -SRC_URI += " \ -file://0001-Add-the-compile-and-install-rules-for-cryptodev-test.patch \ -" - -EXTRA_OEMAKE='KERNEL_DIR="${STAGING_EXECPREFIXDIR}" PREFIX="${D}"' - -do_compile() { - oe_runmake testprogs -} - -do_install() { - oe_runmake install_tests -} - -FILES_${PN} = "${bindir}/*" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.11.bb b/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.11.bb new file mode 100644 index 000000000..9cb5dcb94 --- /dev/null +++ b/poky/meta/recipes-kernel/cryptodev/cryptodev-tests_1.11.bb @@ -0,0 +1,21 @@ +require cryptodev.inc + +SUMMARY = "A test suite for /dev/crypto device driver" + +DEPENDS += "openssl" + +SRC_URI += " \ +file://0001-Add-the-compile-and-install-rules-for-cryptodev-test.patch \ +" + +EXTRA_OEMAKE='KERNEL_DIR="${STAGING_EXECPREFIXDIR}" PREFIX="${D}"' + +do_compile() { + oe_runmake testprogs +} + +do_install() { + oe_runmake install_tests +} + +FILES_${PN} = "${bindir}/*" diff --git a/poky/meta/recipes-kernel/cryptodev/cryptodev.inc b/poky/meta/recipes-kernel/cryptodev/cryptodev.inc index f99f8bc9f..c050eda49 100644 --- a/poky/meta/recipes-kernel/cryptodev/cryptodev.inc +++ b/poky/meta/recipes-kernel/cryptodev/cryptodev.inc @@ -5,7 +5,7 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=b234ee4d69f5fce4486a80fdaf4a4263" SRC_URI = "git://github.com/cryptodev-linux/cryptodev-linux \ " -SRCREV = "a87053bee5680878c295b7d23cf0d7065576ac2b" +SRCREV = "fabe5989a3dc9fba0cd0a40b612247cdde351c01" S = "${WORKDIR}/git" diff --git a/poky/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch b/poky/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch deleted file mode 100644 index 02c721a4f..000000000 --- a/poky/meta/recipes-kernel/cryptodev/files/0001-Fix-build-for-Linux-5.8-rc1.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 9e765068582aae3696520346a7500322ca6cc2de Mon Sep 17 00:00:00 2001 -From: Joan Bruguera -Date: Sat, 13 Jun 2020 19:46:44 +0200 -Subject: [PATCH] Fix build for Linux 5.8-rc1 - -See also: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=9740ca4e95b43b91a4a848694a20d01ba6818f7b - https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=da1c55f1b272f4bd54671d459b39ea7b54944ef9 - https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d8ed45c5dcd455fc5848d47f86883a1b872ac0d0 - -Signed-off-by: Joan Bruguera - -Upstream-Status: Backport [9e765068582aae3696520346a7500322ca6cc2de] - -Signed-off-by: He Zhe ---- - zc.c | 8 ++++++++ - 1 file changed, 8 insertions(+) - -diff --git a/zc.c b/zc.c -index ae464ff..2c286bb 100644 ---- a/zc.c -+++ b/zc.c -@@ -58,7 +58,11 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len, int write, - return 0; - } - -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) - down_read(&mm->mmap_sem); -+#else -+ mmap_read_lock(mm); -+#endif - #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) - ret = get_user_pages(task, mm, - (unsigned long)addr, pgcount, write, 0, pg, NULL); -@@ -74,7 +78,11 @@ int __get_userbuf(uint8_t __user *addr, uint32_t len, int write, - (unsigned long)addr, pgcount, write ? FOLL_WRITE : 0, - pg, NULL, NULL); - #endif -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0)) - up_read(&mm->mmap_sem); -+#else -+ mmap_read_unlock(mm); -+#endif - if (ret != pgcount) - return -EINVAL; - --- -2.17.1 - diff --git a/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch b/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch index a3ba0912d..31f4d00b5 100644 --- a/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch +++ b/poky/meta/recipes-kernel/kexec/kexec-tools/0001-kexec-Fix-build-with-fno-common.patch @@ -66,3 +66,13 @@ Signed-off-by: Khem Raj int bzImage64_probe(const char *buf, off_t len) { +--- a/kexec/arch/ppc/kexec-elf-ppc.c ++++ b/kexec/arch/ppc/kexec-elf-ppc.c +@@ -33,7 +33,6 @@ + static const int probe_debug = 0; + + unsigned char reuse_initrd; +-const char *ramdisk; + int create_flatten_tree(struct kexec_info *, unsigned char **, unsigned long *, + char *); + diff --git a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb deleted file mode 100644 index bf25ff8b7..000000000 --- a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200619.bb +++ /dev/null @@ -1,951 +0,0 @@ -SUMMARY = "Firmware files for use with Linux kernel" -SECTION = "kernel" - -LICENSE = "\ - Firmware-Abilis \ - & Firmware-adsp_sst \ - & Firmware-agere \ - & Firmware-amdgpu \ - & Firmware-amd-ucode \ - & Firmware-amlogic_vdec \ - & Firmware-atheros_firmware \ - & Firmware-atmel \ - & Firmware-broadcom_bcm43xx \ - & Firmware-ca0132 \ - & Firmware-cavium \ - & Firmware-chelsio_firmware \ - & Firmware-cw1200 \ - & Firmware-cypress \ - & Firmware-dib0700 \ - & Firmware-e100 \ - & Firmware-ene_firmware \ - & Firmware-fw_sst_0f28 \ - & Firmware-go7007 \ - & Firmware-GPLv2 \ - & Firmware-hfi1_firmware \ - & Firmware-i2400m \ - & Firmware-i915 \ - & Firmware-ibt_firmware \ - & Firmware-ice \ - & Firmware-it913x \ - & Firmware-iwlwifi_firmware \ - & Firmware-IntcSST2 \ - & Firmware-kaweth \ - & Firmware-Marvell \ - & Firmware-moxa \ - & Firmware-myri10ge_firmware \ - & Firmware-netronome \ - & Firmware-nvidia \ - & Firmware-OLPC \ - & Firmware-ath9k-htc \ - & Firmware-phanfw \ - & Firmware-qat \ - & Firmware-qcom \ - & Firmware-qla1280 \ - & Firmware-qla2xxx \ - & Firmware-qualcommAthos_ar3k \ - & Firmware-qualcommAthos_ath10k \ - & Firmware-r8a779x_usb3 \ - & Firmware-radeon \ - & Firmware-ralink_a_mediatek_company_firmware \ - & Firmware-ralink-firmware \ - & Firmware-rtlwifi_firmware \ - & Firmware-imx-sdma_firmware \ - & Firmware-siano \ - & Firmware-tda7706-firmware \ - & Firmware-ti-connectivity \ - & Firmware-ti-keystone \ - & Firmware-ueagle-atm4-firmware \ - & Firmware-via_vt6656 \ - & Firmware-wl1251 \ - & Firmware-xc4000 \ - & Firmware-xc5000 \ - & Firmware-xc5000c \ - & WHENCE \ -" - -LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \ - file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \ - file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \ - file://LICENSE.amdgpu;md5=d357524f5099e2a3db3c1838921c593f \ - file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \ - file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \ - file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \ - file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \ - file://LICENCE.broadcom_bcm43xx;md5=3160c14df7228891b868060e1951dfbc \ - file://LICENCE.ca0132;md5=209b33e66ee5be0461f13d31da392198 \ - file://LICENCE.cadence;md5=009f46816f6956cfb75ede13d3e1cee0 \ - file://LICENCE.cavium;md5=c37aaffb1ebe5939b2580d073a95daea \ - file://LICENCE.chelsio_firmware;md5=819aa8c3fa453f1b258ed8d168a9d903 \ - file://LICENCE.cw1200;md5=f0f770864e7a8444a5c5aa9d12a3a7ed \ - file://LICENCE.cypress;md5=48cd9436c763bf873961f9ed7b5c147b \ - file://LICENSE.dib0700;md5=f7411825c8a555a1a3e5eab9ca773431 \ - file://LICENCE.e100;md5=ec0f84136766df159a3ae6d02acdf5a8 \ - file://LICENCE.ene_firmware;md5=ed67f0f62f8f798130c296720b7d3921 \ - file://LICENCE.fw_sst_0f28;md5=6353931c988ad52818ae733ac61cd293 \ - file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \ - file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ - file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \ - file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \ - file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \ - file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \ - file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \ - file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ - file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \ - file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \ - file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \ - file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \ - file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \ - file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \ - file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \ - file://LICENCE.Netronome;md5=4add08f2577086d44447996503cddf5f \ - file://LICENCE.nvidia;md5=4428a922ed3ba2ceec95f076a488ce07 \ - file://LICENCE.NXP;md5=58bb8ba632cd729b9ba6183bc6aed36f \ - file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \ - file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \ - file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \ - file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ - file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \ - file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \ - file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \ - file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \ - file://LICENSE.QualcommAtheros_ath10k;md5=cb42b686ee5f5cb890275e4321db60a8 \ - file://LICENCE.r8a779x_usb3;md5=4c1671656153025d7076105a5da7e498 \ - file://LICENSE.radeon;md5=68ec28bacb3613200bca44f404c69b16 \ - file://LICENCE.ralink_a_mediatek_company_firmware;md5=728f1a85fd53fd67fa8d7afb080bc435 \ - file://LICENCE.ralink-firmware.txt;md5=ab2c269277c45476fb449673911a2dfd \ - file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \ - file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \ - file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \ - file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \ - file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \ - file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \ - file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \ - file://LICENCE.via_vt6656;md5=e4159694cba42d4377a912e78a6e850f \ - file://LICENCE.wl1251;md5=ad3f81922bb9e197014bb187289d3b5b \ - file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \ - file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \ - file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \ - file://WHENCE;md5=d373d30188c38dabffec0d3cc87abbfd \ - " - -# These are not common licenses, set NO_GENERIC_LICENSE for them -# so that the license files will be copied from fetched source -NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENCE.Abilis" -NO_GENERIC_LICENSE[Firmware-adsp_sst] = "LICENCE.adsp_sst" -NO_GENERIC_LICENSE[Firmware-agere] = "LICENCE.agere" -NO_GENERIC_LICENSE[Firmware-amdgpu] = "LICENSE.amdgpu" -NO_GENERIC_LICENSE[Firmware-amd-ucode] = "LICENSE.amd-ucode" -NO_GENERIC_LICENSE[Firmware-amlogic_vdec] = "LICENSE.amlogic_vdec" -NO_GENERIC_LICENSE[Firmware-atheros_firmware] = "LICENCE.atheros_firmware" -NO_GENERIC_LICENSE[Firmware-atmel] = "LICENSE.atmel" -NO_GENERIC_LICENSE[Firmware-broadcom_bcm43xx] = "LICENCE.broadcom_bcm43xx" -NO_GENERIC_LICENSE[Firmware-ca0132] = "LICENCE.ca0132" -NO_GENERIC_LICENSE[Firmware-cadence] = "LICENCE.cadence" -NO_GENERIC_LICENSE[Firmware-cavium] = "LICENCE.cavium" -NO_GENERIC_LICENSE[Firmware-chelsio_firmware] = "LICENCE.chelsio_firmware" -NO_GENERIC_LICENSE[Firmware-cw1200] = "LICENCE.cw1200" -NO_GENERIC_LICENSE[Firmware-cypress] = "LICENCE.cypress" -NO_GENERIC_LICENSE[Firmware-dib0700] = "LICENSE.dib0700" -NO_GENERIC_LICENSE[Firmware-e100] = "LICENCE.e100" -NO_GENERIC_LICENSE[Firmware-ene_firmware] = "LICENCE.ene_firmware" -NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28" -NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007" -NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2" -NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware" -NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m" -NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915" -NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware" -NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice" -NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2" -NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x" -NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware" -NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth" -NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell" -NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek" -NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa" -NO_GENERIC_LICENSE[Firmware-myri10ge_firmware] = "LICENCE.myri10ge_firmware" -NO_GENERIC_LICENSE[Firmware-netronome] = "LICENCE.Netronome" -NO_GENERIC_LICENSE[Firmware-nvidia] = "LICENCE.nvidia" -NO_GENERIC_LICENSE[Firmware-OLPC] = "LICENCE.OLPC" -NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware" -NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw" -NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware" -NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom" -NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280" -NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx" -NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k" -NO_GENERIC_LICENSE[Firmware-qualcommAthos_ath10k] = "LICENSE.QualcommAtheros_ath10k" -NO_GENERIC_LICENSE[Firmware-r8a779x_usb3] = "LICENCE.r8a779x_usb3" -NO_GENERIC_LICENSE[Firmware-radeon] = "LICENSE.radeon" -NO_GENERIC_LICENSE[Firmware-ralink_a_mediatek_company_firmware] = "LICENCE.ralink_a_mediatek_company_firmware" -NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt" -NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt" -NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano" -NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware" -NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt" -NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity" -NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone" -NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware" -NO_GENERIC_LICENSE[Firmware-via_vt6656] = "LICENCE.via_vt6656" -NO_GENERIC_LICENSE[Firmware-wl1251] = "LICENCE.wl1251" -NO_GENERIC_LICENSE[Firmware-xc4000] = "LICENCE.xc4000" -NO_GENERIC_LICENSE[Firmware-xc5000] = "LICENCE.xc5000" -NO_GENERIC_LICENSE[Firmware-xc5000c] = "LICENCE.xc5000c" -NO_GENERIC_LICENSE[WHENCE] = "WHENCE" - -PE = "1" - -SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz" - -SRC_URI[sha256sum] = "962d3ae197d226c8259f9cc7746f7ef12a9d23787cd56bd27302021ba6339722" - -inherit allarch - -CLEANBROKEN = "1" - -do_compile() { - : -} - -do_install() { - oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install - cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/ -} - - -PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \ - ${PN}-mt7601u-license ${PN}-mt7601u \ - ${PN}-radeon-license ${PN}-radeon \ - ${PN}-marvell-license ${PN}-pcie8897 ${PN}-pcie8997 \ - ${PN}-sd8686 ${PN}-sd8688 ${PN}-sd8787 ${PN}-sd8797 ${PN}-sd8801 ${PN}-sd8887 ${PN}-sd8897 \ - ${PN}-usb8997 \ - ${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \ - ${PN}-vt6656-license ${PN}-vt6656 \ - ${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \ - ${PN}-rtl8168 \ - ${PN}-cypress-license \ - ${PN}-broadcom-license \ - ${PN}-bcm-0bb4-0306 \ - ${PN}-bcm43143 \ - ${PN}-bcm43236b \ - ${PN}-bcm43241b0 \ - ${PN}-bcm43241b4 \ - ${PN}-bcm43241b5 \ - ${PN}-bcm43242a \ - ${PN}-bcm4329 \ - ${PN}-bcm4329-fullmac \ - ${PN}-bcm4330 \ - ${PN}-bcm4334 \ - ${PN}-bcm43340 \ - ${PN}-bcm4335 \ - ${PN}-bcm43362 \ - ${PN}-bcm4339 \ - ${PN}-bcm43430 \ - ${PN}-bcm43430a0 \ - ${PN}-bcm43455 \ - ${PN}-bcm4350 \ - ${PN}-bcm4350c2 \ - ${PN}-bcm4354 \ - ${PN}-bcm4356 \ - ${PN}-bcm4356-pcie \ - ${PN}-bcm43569 \ - ${PN}-bcm43570 \ - ${PN}-bcm4358 \ - ${PN}-bcm43602 \ - ${PN}-bcm4366b \ - ${PN}-bcm4366c \ - ${PN}-bcm4371 \ - ${PN}-bcm4373 \ - ${PN}-bcm43xx \ - ${PN}-bcm43xx-hdr \ - ${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k \ - ${PN}-gplv2-license ${PN}-carl9170 \ - ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-qca \ - \ - ${PN}-imx-sdma-license ${PN}-imx-sdma-imx6q ${PN}-imx-sdma-imx7d \ - \ - ${PN}-iwlwifi-license ${PN}-iwlwifi \ - ${PN}-iwlwifi-135-6 \ - ${PN}-iwlwifi-3160-7 ${PN}-iwlwifi-3160-8 ${PN}-iwlwifi-3160-9 \ - ${PN}-iwlwifi-3160-10 ${PN}-iwlwifi-3160-12 ${PN}-iwlwifi-3160-13 \ - ${PN}-iwlwifi-3160-16 ${PN}-iwlwifi-3160-17 \ - ${PN}-iwlwifi-6000-4 ${PN}-iwlwifi-6000g2a-5 ${PN}-iwlwifi-6000g2a-6 \ - ${PN}-iwlwifi-6000g2b-5 ${PN}-iwlwifi-6000g2b-6 \ - ${PN}-iwlwifi-6050-4 ${PN}-iwlwifi-6050-5 \ - ${PN}-iwlwifi-7260 \ - ${PN}-iwlwifi-7265 \ - ${PN}-iwlwifi-7265d ${PN}-iwlwifi-8000c ${PN}-iwlwifi-8265 \ - ${PN}-iwlwifi-9000 \ - ${PN}-iwlwifi-misc \ - ${PN}-ibt-license ${PN}-ibt \ - ${PN}-ibt-11-5 ${PN}-ibt-12-16 ${PN}-ibt-hw-37-7 ${PN}-ibt-hw-37-8 \ - ${PN}-ibt-17 \ - ${PN}-ibt-20 \ - ${PN}-ibt-misc \ - ${PN}-i915-license ${PN}-i915 \ - ${PN}-ice-license ${PN}-ice \ - ${PN}-adsp-sst-license ${PN}-adsp-sst \ - ${PN}-bnx2-mips \ - ${PN}-liquidio \ - ${PN}-netronome-license ${PN}-netronome \ - ${PN}-qat ${PN}-qat-license \ - ${PN}-qcom-license \ - ${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 \ - ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 ${PN}-qcom-adreno-a630 \ - ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \ - ${PN}-whence-license \ - ${PN}-license \ - " - -# For atheros -LICENSE_${PN}-ar9170 = "Firmware-atheros_firmware" -LICENSE_${PN}-ath6k = "Firmware-atheros_firmware" -LICENSE_${PN}-ath9k = "Firmware-atheros_firmware" -LICENSE_${PN}-atheros-license = "Firmware-atheros_firmware" - -FILES_${PN}-atheros-license = "${nonarch_base_libdir}/firmware/LICENCE.atheros_firmware" -FILES_${PN}-ar9170 = " \ - ${nonarch_base_libdir}/firmware/ar9170*.fw \ -" -FILES_${PN}-ath6k = " \ - ${nonarch_base_libdir}/firmware/ath6k \ -" -FILES_${PN}-ath9k = " \ - ${nonarch_base_libdir}/firmware/ar9271.fw \ - ${nonarch_base_libdir}/firmware/ar7010*.fw \ - ${nonarch_base_libdir}/firmware/htc_9271.fw \ - ${nonarch_base_libdir}/firmware/htc_7010.fw \ - ${nonarch_base_libdir}/firmware/ath9k_htc/htc_7010-1.4.0.fw \ - ${nonarch_base_libdir}/firmware/ath9k_htc/htc_9271-1.4.0.fw \ -" - -RDEPENDS_${PN}-ar9170 += "${PN}-atheros-license" -RDEPENDS_${PN}-ath6k += "${PN}-atheros-license" -RDEPENDS_${PN}-ath9k += "${PN}-atheros-license" - -# For carl9170 -LICENSE_${PN}-carl9170 = "Firmware-GPLv2" -LICENSE_${PN}-gplv2-license = "Firmware-GPLv2" - -FILES_${PN}-gplv2-license = "${nonarch_base_libdir}/firmware/GPL-2" -FILES_${PN}-carl9170 = " \ - ${nonarch_base_libdir}/firmware/carl9170*.fw \ -" - -RDEPENDS_${PN}-carl9170 += "${PN}-gplv2-license" - -# For QualCommAthos -LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k" -LICENSE_${PN}-ar3k-license = "Firmware-qualcommAthos_ar3k" -LICENSE_${PN}-ath10k = "Firmware-qualcommAthos_ath10k" -LICENSE_${PN}-ath10k-license = "Firmware-qualcommAthos_ath10k" -LICENSE_${PN}-qca = "Firmware-qualcommAthos_ath10k" - -FILES_${PN}-ar3k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ar3k" -FILES_${PN}-ar3k = " \ - ${nonarch_base_libdir}/firmware/ar3k \ -" - -FILES_${PN}-ath10k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ath10k" -FILES_${PN}-ath10k = " \ - ${nonarch_base_libdir}/firmware/ath10k \ -" - -FILES_${PN}-qca = " \ - ${nonarch_base_libdir}/firmware/qca \ -" - -RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license" -RDEPENDS_${PN}-ath10k += "${PN}-ath10k-license" -RDEPENDS_${PN}-qca += "${PN}-ath10k-license" - -# For ralink -LICENSE_${PN}-ralink = "Firmware-ralink-firmware" -LICENSE_${PN}-ralink-license = "Firmware-ralink-firmware" - -FILES_${PN}-ralink-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink-firmware.txt" -FILES_${PN}-ralink = " \ - ${nonarch_base_libdir}/firmware/rt*.bin \ -" - -RDEPENDS_${PN}-ralink += "${PN}-ralink-license" - -# For mediatek MT7601U -LICENSE_${PN}-mt7601u = "Firmware-ralink_a_mediatek_company_firmware" -LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware" - -FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware" -FILES_${PN}-mt7601u = " \ - ${nonarch_base_libdir}/firmware/mt7601u.bin \ -" - -RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license" - -# For radeon -LICENSE_${PN}-radeon = "Firmware-radeon" -LICENSE_${PN}-radeon-license = "Firmware-radeon" - -FILES_${PN}-radeon-license = "${nonarch_base_libdir}/firmware/LICENSE.radeon" -FILES_${PN}-radeon = " \ - ${nonarch_base_libdir}/firmware/radeon \ -" - -RDEPENDS_${PN}-radeon += "${PN}-radeon-license" - -# For marvell -LICENSE_${PN}-pcie8897 = "Firmware-Marvell" -LICENSE_${PN}-pcie8997 = "Firmware-Marvell" -LICENSE_${PN}-sd8686 = "Firmware-Marvell" -LICENSE_${PN}-sd8688 = "Firmware-Marvell" -LICENSE_${PN}-sd8787 = "Firmware-Marvell" -LICENSE_${PN}-sd8797 = "Firmware-Marvell" -LICENSE_${PN}-sd8801 = "Firmware-Marvell" -LICENSE_${PN}-sd8887 = "Firmware-Marvell" -LICENSE_${PN}-sd8897 = "Firmware-Marvell" -LICENSE_${PN}-usb8997 = "Firmware-Marvell" -LICENSE_${PN}-marvell-license = "Firmware-Marvell" - -FILES_${PN}-marvell-license = "${nonarch_base_libdir}/firmware/LICENCE.Marvell" -FILES_${PN}-pcie8897 = " \ - ${nonarch_base_libdir}/firmware/mrvl/pcie8897_uapsta.bin \ -" -FILES_${PN}-pcie8997 = " \ - ${nonarch_base_libdir}/firmware/mrvl/pcie8997_wlan_v4.bin \ - ${nonarch_base_libdir}/firmware/mrvl/pcieuart8997_combo_v4.bin \ - ${nonarch_base_libdir}/firmware/mrvl/pcieusb8997_combo_v4.bin \ -" -FILES_${PN}-sd8686 = " \ - ${nonarch_base_libdir}/firmware/libertas/sd8686_v9* \ - ${nonarch_base_libdir}/firmware/sd8686* \ -" -FILES_${PN}-sd8688 = " \ - ${nonarch_base_libdir}/firmware/libertas/sd8688* \ - ${nonarch_base_libdir}/firmware/mrvl/sd8688* \ -" -FILES_${PN}-sd8787 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8787_uapsta.bin \ -" -FILES_${PN}-sd8797 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8797_uapsta.bin \ -" -FILES_${PN}-sd8801 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8801_uapsta.bin \ -" -FILES_${PN}-sd8887 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8887_uapsta.bin \ -" -FILES_${PN}-sd8897 = " \ - ${nonarch_base_libdir}/firmware/mrvl/sd8897_uapsta.bin \ -" -FILES_${PN}-usb8997 = " \ - ${nonarch_base_libdir}/firmware/mrvl/usbusb8997_combo_v4.bin \ -" - -RDEPENDS_${PN}-sd8686 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8688 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8787 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8797 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8801 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8887 += "${PN}-marvell-license" -RDEPENDS_${PN}-sd8897 += "${PN}-marvell-license" -RDEPENDS_${PN}-usb8997 += "${PN}-marvell-license" - -# For netronome -LICENSE_${PN}-netronome = "Firmware-netronome" - -FILES_${PN}-netronome-license = " \ - ${nonarch_base_libdir}/firmware/LICENCE.Netronome \ -" -FILES_${PN}-netronome = " \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0081*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0096*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0097*.nffw \ - ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0099*.nffw \ -" - -RDEPENDS_${PN}-netronome += "${PN}-netronome-license" - -# For rtl -LICENSE_${PN}-rtl8188 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192cu = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware" -LICENSE_${PN}-rtl8168 = "WHENCE" - -FILES_${PN}-rtl-license = " \ - ${nonarch_base_libdir}/firmware/LICENCE.rtlwifi_firmware.txt \ -" -FILES_${PN}-rtl8188 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8188*.bin \ -" -FILES_${PN}-rtl8192cu = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cufw*.bin \ -" -FILES_${PN}-rtl8192ce = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cfw*.bin \ -" -FILES_${PN}-rtl8192su = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8712u.bin \ -" -FILES_${PN}-rtl8723 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8723*.bin \ -" -FILES_${PN}-rtl8821 = " \ - ${nonarch_base_libdir}/firmware/rtlwifi/rtl8821*.bin \ -" -FILES_${PN}-rtl8168 = " \ - ${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \ -" - -RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license" -RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license" - -# For ti-connectivity -LICENSE_${PN}-wlcommon = "Firmware-ti-connectivity" -LICENSE_${PN}-wl12xx = "Firmware-ti-connectivity" -LICENSE_${PN}-wl18xx = "Firmware-ti-connectivity" -LICENSE_${PN}-ti-connectivity-license = "Firmware-ti-connectivity" - -FILES_${PN}-ti-connectivity-license = "${nonarch_base_libdir}/firmware/LICENCE.ti-connectivity" -# wl18xx optionally needs wl1271-nvs.bin (which itself is a symlink to -# wl127x-nvs.bin) - see linux/drivers/net/wireless/ti/wlcore/sdio.c -# and drivers/net/wireless/ti/wlcore/spi.c. -# While they're optional and actually only used to override the MAC -# address on wl18xx, driver loading will delay (by udev timout - 60s) -# if not there. So let's make it available always. Because it's a -# symlink, both need to go to wlcommon. -FILES_${PN}-wlcommon = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/TI* \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl127x-nvs.bin \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl1271-nvs.bin \ -" -FILES_${PN}-wl12xx = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl12* \ -" -FILES_${PN}-wl18xx = " \ - ${nonarch_base_libdir}/firmware/ti-connectivity/wl18* \ -" - -RDEPENDS_${PN}-wl12xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" -RDEPENDS_${PN}-wl18xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" - -# For vt6656 -LICENSE_${PN}-vt6656 = "Firmware-via_vt6656" -LICENSE_${PN}-vt6656-license = "Firmware-via_vt6656" - -FILES_${PN}-vt6656-license = "${nonarch_base_libdir}/firmware/LICENCE.via_vt6656" -FILES_${PN}-vt6656 = " \ - ${nonarch_base_libdir}/firmware/vntwusb.fw \ -" - -RDEPENDS_${PN}-vt6656 = "${PN}-vt6656-license" - -# For broadcom - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e " \${PN}-$pkg \\"; done | sort -u - -LICENSE_${PN}-broadcom-license = "Firmware-broadcom_bcm43xx" -FILES_${PN}-broadcom-license = "${nonarch_base_libdir}/firmware/LICENCE.broadcom_bcm43xx" - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo "$i - $pkg"; echo -e "FILES_\${PN}-$pkg = \"\${nonarch_base_libdir}/firmware/brcm/$i\""; done | grep ^FILES - -FILES_${PN}-bcm43xx = "${nonarch_base_libdir}/firmware/brcm/bcm43xx-0.fw" -FILES_${PN}-bcm43xx-hdr = "${nonarch_base_libdir}/firmware/brcm/bcm43xx_hdr-0.fw" -FILES_${PN}-bcm4329-fullmac = "${nonarch_base_libdir}/firmware/brcm/bcm4329-fullmac-4.bin" -FILES_${PN}-bcm43236b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43236b.bin" -FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bin" -FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*" -FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin" -FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin" -FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin" -FILES_${PN}-bcm43241b0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b0-sdio.bin" -FILES_${PN}-bcm43241b4 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b4-sdio.bin" -FILES_${PN}-bcm43241b5 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b5-sdio.bin" -FILES_${PN}-bcm43242a = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43242a.bin" -FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \ -" -FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*" -FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*" -FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin" -FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin" -FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin" -FILES_${PN}-bcm43569 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43569.bin" -FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin" -FILES_${PN}-bcm4358 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4358-pcie.bin" -FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \ -" -FILES_${PN}-bcm4366b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366b-pcie.bin" -FILES_${PN}-bcm4366c = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366c-pcie.bin" -FILES_${PN}-bcm4371 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4371-pcie.bin" - -# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e "LICENSE_\${PN}-$pkg = \"Firmware-broadcom_bcm43xx\"\nRDEPENDS_\${PN}-$pkg += \"\${PN}-broadcom-license\""; done -# Currently 1st one and last 6 have cypress LICENSE - -LICENSE_${PN}-bcm43xx = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43xx += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43xx-hdr = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43xx-hdr += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4329-fullmac = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4329-fullmac += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43236b = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43236b += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4329 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4329 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4330 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4330 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4334 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4334 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4335 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4335 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4339 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4339 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b0 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b0 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b4 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b4 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43241b5 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43241b5 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43242a = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43242a += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43143 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43143 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43430a0 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43430a0 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43455 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43455 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4350c2 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4350c2 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4350 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4350 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4356 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4356 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43569 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43569 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43570 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43570 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4358 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4358 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm43602 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm43602 += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4366b = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4366b += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4366c = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4366c += "${PN}-broadcom-license" -LICENSE_${PN}-bcm4371 = "Firmware-broadcom_bcm43xx" -RDEPENDS_${PN}-bcm4371 += "${PN}-broadcom-license" - -# For broadcom cypress - -LICENSE_${PN}-cypress-license = "Firmware-cypress" -FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress" - -FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd" -FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*" -FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*" -FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*" -FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin" -FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*" -FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \ - ${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \ -" - -LICENSE_${PN}-bcm-0bb4-0306 = "Firmware-cypress" -RDEPENDS_${PN}-bcm-0bb4-0306 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43340 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43340 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43362 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43362 += "${PN}-cypress-license" -LICENSE_${PN}-bcm43430 = "Firmware-cypress" -RDEPENDS_${PN}-bcm43430 += "${PN}-cypress-license" -LICENSE_${PN}-bcm4354 = "Firmware-cypress" -RDEPENDS_${PN}-bcm4354 += "${PN}-cypress-license" -LICENSE_${PN}-bcm4356-pcie = "Firmware-cypress" -RDEPENDS_${PN}-bcm4356-pcie += "${PN}-cypress-license" -LICENSE_${PN}-bcm4373 = "Firmware-cypress" -RDEPENDS_${PN}-bcm4373 += "${PN}-cypress-license" - -# For Broadcom bnx2-mips -# -# which is a separate case to the other Broadcom firmwares since its -# license is contained in the shared WHENCE file. - -LICENSE_${PN}-bnx2-mips = "WHENCE" -LICENSE_${PN}-whence-license = "WHENCE" - -FILES_${PN}-bnx2-mips = "${nonarch_base_libdir}/firmware/bnx2/bnx2-mips-09-6.2.1b.fw" -FILES_${PN}-whence-license = "${nonarch_base_libdir}/firmware/WHENCE" - -RDEPENDS_${PN}-bnx2-mips += "${PN}-whence-license" - -# For imx-sdma -LICENSE_${PN}-imx-sdma-imx6q = "Firmware-imx-sdma_firmware" -LICENSE_${PN}-imx-sdma-imx7d = "Firmware-imx-sdma_firmware" -LICENSE_${PN}-imx-sdma-license = "Firmware-imx-sdma_firmware" - -FILES_${PN}-imx-sdma-imx6q = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx6q.bin" - -RPROVIDES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" -RREPLACES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" -RCONFLICTS_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" - -FILES_${PN}-imx-sdma-imx7d = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx7d.bin" - -FILES_${PN}-imx-sdma-license = "${nonarch_base_libdir}/firmware/LICENSE.sdma_firmware" - -RDEPENDS_${PN}-imx-sdma-imx6q += "${PN}-imx-sdma-license" -RDEPENDS_${PN}-imx-sdma-imx7d += "${PN}-imx-sdma-license" - -# For iwlwifi -LICENSE_${PN}-iwlwifi = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-135-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-7 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-8 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-9 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-10 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-12 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-13 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-16 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-3160-17 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000-4 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2a-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2a-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2b-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6000g2b-6 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6050-4 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-6050-5 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7260 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7265 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-7265d = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-8000c = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-8265 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-9000 = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-misc = "Firmware-iwlwifi_firmware" -LICENSE_${PN}-iwlwifi-license = "Firmware-iwlwifi_firmware" - - -FILES_${PN}-iwlwifi-license = "${nonarch_base_libdir}/firmware/LICENCE.iwlwifi_firmware" -FILES_${PN}-iwlwifi-135-6 = "${nonarch_base_libdir}/firmware/iwlwifi-135-6.ucode" -FILES_${PN}-iwlwifi-3160-7 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-7.ucode" -FILES_${PN}-iwlwifi-3160-8 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-8.ucode" -FILES_${PN}-iwlwifi-3160-9 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-9.ucode" -FILES_${PN}-iwlwifi-3160-10 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-10.ucode" -FILES_${PN}-iwlwifi-3160-12 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-12.ucode" -FILES_${PN}-iwlwifi-3160-13 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-13.ucode" -FILES_${PN}-iwlwifi-3160-16 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-16.ucode" -FILES_${PN}-iwlwifi-3160-17 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-17.ucode" -FILES_${PN}-iwlwifi-6000-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6000-4.ucode" -FILES_${PN}-iwlwifi-6000g2a-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-5.ucode" -FILES_${PN}-iwlwifi-6000g2a-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-6.ucode" -FILES_${PN}-iwlwifi-6000g2b-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-5.ucode" -FILES_${PN}-iwlwifi-6000g2b-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-6.ucode" -FILES_${PN}-iwlwifi-6050-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-4.ucode" -FILES_${PN}-iwlwifi-6050-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-5.ucode" -FILES_${PN}-iwlwifi-7260 = "${nonarch_base_libdir}/firmware/iwlwifi-7260-*.ucode" -FILES_${PN}-iwlwifi-7265 = "${nonarch_base_libdir}/firmware/iwlwifi-7265-*.ucode" -FILES_${PN}-iwlwifi-7265d = "${nonarch_base_libdir}/firmware/iwlwifi-7265D-*.ucode" -FILES_${PN}-iwlwifi-8000c = "${nonarch_base_libdir}/firmware/iwlwifi-8000C-*.ucode" -FILES_${PN}-iwlwifi-8265 = "${nonarch_base_libdir}/firmware/iwlwifi-8265-*.ucode" -FILES_${PN}-iwlwifi-9000 = "${nonarch_base_libdir}/firmware/iwlwifi-9000-*.ucode" -FILES_${PN}-iwlwifi-misc = "${nonarch_base_libdir}/firmware/iwlwifi-*.ucode" - -RDEPENDS_${PN}-iwlwifi-135-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-7 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-8 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-9 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-10 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-12 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-13 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-16 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-3160-17 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000-4 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2a-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2a-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2b-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6000g2b-6 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6050-4 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-6050-5 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-7265d = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-8000c = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-8265 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-9000 = "${PN}-iwlwifi-license" -RDEPENDS_${PN}-iwlwifi-misc = "${PN}-iwlwifi-license" - -# -iwlwifi-misc is a "catch all" package that includes all the iwlwifi -# firmwares that are not already included in other -iwlwifi- packages. -# -iwlwifi is a virtual package that depends upon all iwlwifi packages. -# These are distinct in order to allow the -misc firmwares to be installed -# without pulling in every other iwlwifi package. -ALLOW_EMPTY_${PN}-iwlwifi = "1" -ALLOW_EMPTY_${PN}-iwlwifi-misc = "1" - -# Handle package updating for the newly merged iwlwifi groupings -RPROVIDES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" -RREPLACES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" -RCONFLICTS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" - -RPROVIDES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" -RREPLACES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" -RCONFLICTS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" - -# For ibt -LICENSE_${PN}-ibt-license = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-hw-37-7 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-hw-37-8 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-11-5 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-12-16 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-17 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-20 = "Firmware-ibt_firmware" -LICENSE_${PN}-ibt-misc = "Firmware-ibt_firmware" - -FILES_${PN}-ibt-license = "${nonarch_base_libdir}/firmware/LICENCE.ibt_firmware" -FILES_${PN}-ibt-hw-37-7 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.7*.bseq" -FILES_${PN}-ibt-hw-37-8 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.8*.bseq" -FILES_${PN}-ibt-11-5 = "${nonarch_base_libdir}/firmware/intel/ibt-11-5.sfi ${nonarch_base_libdir}/firmware/intel/ibt-11-5.ddc" -FILES_${PN}-ibt-12-16 = "${nonarch_base_libdir}/firmware/intel/ibt-12-16.sfi ${nonarch_base_libdir}/firmware/intel/ibt-12-16.ddc" -FILES_${PN}-ibt-17 = "${nonarch_base_libdir}/firmware/intel/ibt-17-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-17-*.ddc" -FILES_${PN}-ibt-20 = "${nonarch_base_libdir}/firmware/intel/ibt-20-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-20-*.ddc" -FILES_${PN}-ibt-misc = "${nonarch_base_libdir}/firmware/intel/ibt-*" - -RDEPENDS_${PN}-ibt-hw-37-7 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-hw-37.8 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-11-5 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-12-16 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-17 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-20 = "${PN}-ibt-license" -RDEPENDS_${PN}-ibt-misc = "${PN}-ibt-license" - -ALLOW_EMPTY_${PN}-ibt= "1" -ALLOW_EMPTY_${PN}-ibt-misc = "1" - -LICENSE_${PN}-i915 = "Firmware-i915" -LICENSE_${PN}-i915-license = "Firmware-i915" -FILES_${PN}-i915-license = "${nonarch_base_libdir}/firmware/LICENSE.i915" -FILES_${PN}-i915 = "${nonarch_base_libdir}/firmware/i915" -RDEPENDS_${PN}-i915 = "${PN}-i915-license" - -LICENSE_${PN}-ice = "Firmware-ice" -LICENSE_${PN}-ice-license = "Firmware-ice" -FILES_${PN}-ice-license = "${nonarch_base_libdir}/firmware/LICENSE.ice" -FILES_${PN}-ice = "${nonarch_base_libdir}/firmware/intel/ice" -RDEPENDS_${PN}-ice = "${PN}-ice-license" - -FILES_${PN}-adsp-sst-license = "${nonarch_base_libdir}/firmware/LICENCE.adsp_sst" -LICENSE_${PN}-adsp-sst = "Firmware-adsp_sst" -LICENSE_${PN}-adsp-sst-license = "Firmware-adsp_sst" -FILES_${PN}-adsp-sst = "${nonarch_base_libdir}/firmware/intel/dsp_fw*" -RDEPENDS_${PN}-adsp-sst = "${PN}-adsp-sst-license" - -# For QAT -LICENSE_${PN}-qat = "Firmware-qat" -LICENSE_${PN}-qat-license = "Firmware-qat" -FILES_${PN}-qat-license = "${nonarch_base_libdir}/firmware/LICENCE.qat_firmware" -FILES_${PN}-qat = "${nonarch_base_libdir}/firmware/qat*.bin" -RDEPENDS_${PN}-qat = "${PN}-qat-license" - -# For QCOM VPU/GPU and SDM845 -LICENSE_${PN}-qcom-license = "Firmware-qcom" -FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt" -FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*" -FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*" -FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw" -FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*" -FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*" -FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*" -FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*" -FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn" -RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-venus-4.2 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license" -RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license" - -FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio" - -# For other firmwares -# Maybe split out to separate packages when needed. -LICENSE_${PN} = "\ - Firmware-Abilis \ - & Firmware-agere \ - & Firmware-amdgpu \ - & Firmware-amd-ucode \ - & Firmware-atmel \ - & Firmware-ca0132 \ - & Firmware-cavium \ - & Firmware-chelsio_firmware \ - & Firmware-cw1200 \ - & Firmware-dib0700 \ - & Firmware-e100 \ - & Firmware-ene_firmware \ - & Firmware-fw_sst_0f28 \ - & Firmware-go7007 \ - & Firmware-hfi1_firmware \ - & Firmware-i2400m \ - & Firmware-ibt_firmware \ - & Firmware-it913x \ - & Firmware-IntcSST2 \ - & Firmware-kaweth \ - & Firmware-moxa \ - & Firmware-myri10ge_firmware \ - & Firmware-nvidia \ - & Firmware-OLPC \ - & Firmware-ath9k-htc \ - & Firmware-phanfw \ - & Firmware-qat \ - & Firmware-qcom \ - & Firmware-qla1280 \ - & Firmware-qla2xxx \ - & Firmware-r8a779x_usb3 \ - & Firmware-radeon \ - & Firmware-ralink_a_mediatek_company_firmware \ - & Firmware-ralink-firmware \ - & Firmware-imx-sdma_firmware \ - & Firmware-siano \ - & Firmware-tda7706-firmware \ - & Firmware-ti-connectivity \ - & Firmware-ti-keystone \ - & Firmware-ueagle-atm4-firmware \ - & Firmware-wl1251 \ - & Firmware-xc4000 \ - & Firmware-xc5000 \ - & Firmware-xc5000c \ - & WHENCE \ -" - -FILES_${PN}-license += "${nonarch_base_libdir}/firmware/LICEN*" -FILES_${PN} += "${nonarch_base_libdir}/firmware/*" -RDEPENDS_${PN} += "${PN}-license" -RDEPENDS_${PN} += "${PN}-whence-license" - -# Make linux-firmware depend on all of the split-out packages. -# Make linux-firmware-iwlwifi depend on all of the split-out iwlwifi packages. -# Make linux-firmware-ibt depend on all of the split-out ibt packages. -python populate_packages_prepend () { - firmware_pkgs = oe.utils.packages_filter_out_system(d) - d.appendVar('RRECOMMENDS_linux-firmware', ' ' + ' '.join(firmware_pkgs)) - - iwlwifi_pkgs = filter(lambda x: x.find('-iwlwifi-') != -1, firmware_pkgs) - d.appendVar('RRECOMMENDS_linux-firmware-iwlwifi', ' ' + ' '.join(iwlwifi_pkgs)) - - ibt_pkgs = filter(lambda x: x.find('-ibt-') != -1, firmware_pkgs) - d.appendVar('RRECOMMENDS_linux-firmware-ibt', ' ' + ' '.join(ibt_pkgs)) -} - -# Firmware files are generally not ran on the CPU, so they can be -# allarch despite being architecture specific -INSANE_SKIP = "arch" diff --git a/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200817.bb b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200817.bb new file mode 100644 index 000000000..ffeb8e692 --- /dev/null +++ b/poky/meta/recipes-kernel/linux-firmware/linux-firmware_20200817.bb @@ -0,0 +1,955 @@ +SUMMARY = "Firmware files for use with Linux kernel" +SECTION = "kernel" + +LICENSE = "\ + Firmware-Abilis \ + & Firmware-adsp_sst \ + & Firmware-agere \ + & Firmware-amdgpu \ + & Firmware-amd-ucode \ + & Firmware-amlogic_vdec \ + & Firmware-atheros_firmware \ + & Firmware-atmel \ + & Firmware-broadcom_bcm43xx \ + & Firmware-ca0132 \ + & Firmware-cavium \ + & Firmware-chelsio_firmware \ + & Firmware-cw1200 \ + & Firmware-cypress \ + & Firmware-dib0700 \ + & Firmware-e100 \ + & Firmware-ene_firmware \ + & Firmware-fw_sst_0f28 \ + & Firmware-go7007 \ + & Firmware-GPLv2 \ + & Firmware-hfi1_firmware \ + & Firmware-i2400m \ + & Firmware-i915 \ + & Firmware-ibt_firmware \ + & Firmware-ice \ + & Firmware-it913x \ + & Firmware-iwlwifi_firmware \ + & Firmware-IntcSST2 \ + & Firmware-kaweth \ + & Firmware-Marvell \ + & Firmware-moxa \ + & Firmware-myri10ge_firmware \ + & Firmware-netronome \ + & Firmware-nvidia \ + & Firmware-OLPC \ + & Firmware-ath9k-htc \ + & Firmware-phanfw \ + & Firmware-qat \ + & Firmware-qcom \ + & Firmware-qla1280 \ + & Firmware-qla2xxx \ + & Firmware-qualcommAthos_ar3k \ + & Firmware-qualcommAthos_ath10k \ + & Firmware-r8a779x_usb3 \ + & Firmware-radeon \ + & Firmware-ralink_a_mediatek_company_firmware \ + & Firmware-ralink-firmware \ + & Firmware-rtlwifi_firmware \ + & Firmware-imx-sdma_firmware \ + & Firmware-siano \ + & Firmware-tda7706-firmware \ + & Firmware-ti-connectivity \ + & Firmware-ti-keystone \ + & Firmware-ueagle-atm4-firmware \ + & Firmware-via_vt6656 \ + & Firmware-wl1251 \ + & Firmware-xc4000 \ + & Firmware-xc5000 \ + & Firmware-xc5000c \ + & WHENCE \ +" + +LIC_FILES_CHKSUM = "file://LICENCE.Abilis;md5=b5ee3f410780e56711ad48eadc22b8bc \ + file://LICENCE.adsp_sst;md5=615c45b91a5a4a9fe046d6ab9a2df728 \ + file://LICENCE.agere;md5=af0133de6b4a9b2522defd5f188afd31 \ + file://LICENSE.amdgpu;md5=d357524f5099e2a3db3c1838921c593f \ + file://LICENSE.amd-ucode;md5=3c5399dc9148d7f0e1f41e34b69cf14f \ + file://LICENSE.amlogic_vdec;md5=dc44f59bf64a81643e500ad3f39a468a \ + file://LICENCE.atheros_firmware;md5=30a14c7823beedac9fa39c64fdd01a13 \ + file://LICENSE.atmel;md5=aa74ac0c60595dee4d4e239107ea77a3 \ + file://LICENCE.broadcom_bcm43xx;md5=3160c14df7228891b868060e1951dfbc \ + file://LICENCE.ca0132;md5=209b33e66ee5be0461f13d31da392198 \ + file://LICENCE.cadence;md5=009f46816f6956cfb75ede13d3e1cee0 \ + file://LICENCE.cavium;md5=c37aaffb1ebe5939b2580d073a95daea \ + file://LICENCE.chelsio_firmware;md5=819aa8c3fa453f1b258ed8d168a9d903 \ + file://LICENCE.cw1200;md5=f0f770864e7a8444a5c5aa9d12a3a7ed \ + file://LICENCE.cypress;md5=48cd9436c763bf873961f9ed7b5c147b \ + file://LICENSE.dib0700;md5=f7411825c8a555a1a3e5eab9ca773431 \ + file://LICENCE.e100;md5=ec0f84136766df159a3ae6d02acdf5a8 \ + file://LICENCE.ene_firmware;md5=ed67f0f62f8f798130c296720b7d3921 \ + file://LICENCE.fw_sst_0f28;md5=6353931c988ad52818ae733ac61cd293 \ + file://LICENCE.go7007;md5=c0bb9f6aaaba55b0529ee9b30aa66beb \ + file://GPL-2;md5=b234ee4d69f5fce4486a80fdaf4a4263 \ + file://LICENSE.hfi1_firmware;md5=5e7b6e586ce7339d12689e49931ad444 \ + file://LICENCE.i2400m;md5=14b901969e23c41881327c0d9e4b7d36 \ + file://LICENSE.i915;md5=2b0b2e0d20984affd4490ba2cba02570 \ + file://LICENCE.ibt_firmware;md5=fdbee1ddfe0fb7ab0b2fcd6b454a366b \ + file://LICENSE.ice;md5=742ab4850f2670792940e6d15c974b2f \ + file://LICENCE.IntcSST2;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ + file://LICENCE.it913x;md5=1fbf727bfb6a949810c4dbfa7e6ce4f8 \ + file://LICENCE.iwlwifi_firmware;md5=3fd842911ea93c29cd32679aa23e1c88 \ + file://LICENCE.kaweth;md5=b1d876e562f4b3b8d391ad8395dfe03f \ + file://LICENCE.Marvell;md5=28b6ed8bd04ba105af6e4dcd6e997772 \ + file://LICENCE.mediatek;md5=7c1976b63217d76ce47d0a11d8a79cf2 \ + file://LICENCE.moxa;md5=1086614767d8ccf744a923289d3d4261 \ + file://LICENCE.myri10ge_firmware;md5=42e32fb89f6b959ca222e25ac8df8fed \ + file://LICENCE.Netronome;md5=4add08f2577086d44447996503cddf5f \ + file://LICENCE.nvidia;md5=4428a922ed3ba2ceec95f076a488ce07 \ + file://LICENCE.NXP;md5=58bb8ba632cd729b9ba6183bc6aed36f \ + file://LICENCE.OLPC;md5=5b917f9d8c061991be4f6f5f108719cd \ + file://LICENCE.open-ath9k-htc-firmware;md5=1b33c9f4d17bc4d457bdb23727046837 \ + file://LICENCE.phanfw;md5=954dcec0e051f9409812b561ea743bfa \ + file://LICENCE.qat_firmware;md5=9e7d8bea77612d7cc7d9e9b54b623062 \ + file://LICENSE.qcom;md5=164e3362a538eb11d3ac51e8e134294b \ + file://LICENCE.qla1280;md5=d6895732e622d950609093223a2c4f5d \ + file://LICENCE.qla2xxx;md5=505855e921b75f1be4a437ad9b79dff0 \ + file://LICENSE.QualcommAtheros_ar3k;md5=b5fe244fb2b532311de1472a3bc06da5 \ + file://LICENSE.QualcommAtheros_ath10k;md5=cb42b686ee5f5cb890275e4321db60a8 \ + file://LICENCE.r8a779x_usb3;md5=4c1671656153025d7076105a5da7e498 \ + file://LICENSE.radeon;md5=68ec28bacb3613200bca44f404c69b16 \ + file://LICENCE.ralink_a_mediatek_company_firmware;md5=728f1a85fd53fd67fa8d7afb080bc435 \ + file://LICENCE.ralink-firmware.txt;md5=ab2c269277c45476fb449673911a2dfd \ + file://LICENCE.rtlwifi_firmware.txt;md5=00d06cfd3eddd5a2698948ead2ad54a5 \ + file://LICENSE.sdma_firmware;md5=51e8c19ecc2270f4b8ea30341ad63ce9 \ + file://LICENCE.siano;md5=4556c1bf830067f12ca151ad953ec2a5 \ + file://LICENCE.tda7706-firmware.txt;md5=835997cf5e3c131d0dddd695c7d9103e \ + file://LICENCE.ti-connectivity;md5=c5e02be633f1499c109d1652514d85ec \ + file://LICENCE.ti-keystone;md5=3a86335d32864b0bef996bee26cc0f2c \ + file://LICENCE.ueagle-atm4-firmware;md5=4ed7ea6b507ccc583b9d594417714118 \ + file://LICENCE.via_vt6656;md5=e4159694cba42d4377a912e78a6e850f \ + file://LICENCE.wl1251;md5=ad3f81922bb9e197014bb187289d3b5b \ + file://LICENCE.xc4000;md5=0ff51d2dc49fce04814c9155081092f0 \ + file://LICENCE.xc5000;md5=1e170c13175323c32c7f4d0998d53f66 \ + file://LICENCE.xc5000c;md5=12b02efa3049db65d524aeb418dd87ca \ + file://WHENCE;md5=4d229f79f8770b5b2c4aac655b9fabef \ + " + +# These are not common licenses, set NO_GENERIC_LICENSE for them +# so that the license files will be copied from fetched source +NO_GENERIC_LICENSE[Firmware-Abilis] = "LICENCE.Abilis" +NO_GENERIC_LICENSE[Firmware-adsp_sst] = "LICENCE.adsp_sst" +NO_GENERIC_LICENSE[Firmware-agere] = "LICENCE.agere" +NO_GENERIC_LICENSE[Firmware-amdgpu] = "LICENSE.amdgpu" +NO_GENERIC_LICENSE[Firmware-amd-ucode] = "LICENSE.amd-ucode" +NO_GENERIC_LICENSE[Firmware-amlogic_vdec] = "LICENSE.amlogic_vdec" +NO_GENERIC_LICENSE[Firmware-atheros_firmware] = "LICENCE.atheros_firmware" +NO_GENERIC_LICENSE[Firmware-atmel] = "LICENSE.atmel" +NO_GENERIC_LICENSE[Firmware-broadcom_bcm43xx] = "LICENCE.broadcom_bcm43xx" +NO_GENERIC_LICENSE[Firmware-ca0132] = "LICENCE.ca0132" +NO_GENERIC_LICENSE[Firmware-cadence] = "LICENCE.cadence" +NO_GENERIC_LICENSE[Firmware-cavium] = "LICENCE.cavium" +NO_GENERIC_LICENSE[Firmware-chelsio_firmware] = "LICENCE.chelsio_firmware" +NO_GENERIC_LICENSE[Firmware-cw1200] = "LICENCE.cw1200" +NO_GENERIC_LICENSE[Firmware-cypress] = "LICENCE.cypress" +NO_GENERIC_LICENSE[Firmware-dib0700] = "LICENSE.dib0700" +NO_GENERIC_LICENSE[Firmware-e100] = "LICENCE.e100" +NO_GENERIC_LICENSE[Firmware-ene_firmware] = "LICENCE.ene_firmware" +NO_GENERIC_LICENSE[Firmware-fw_sst_0f28] = "LICENCE.fw_sst_0f28" +NO_GENERIC_LICENSE[Firmware-go7007] = "LICENCE.go7007" +NO_GENERIC_LICENSE[Firmware-GPLv2] = "GPL-2" +NO_GENERIC_LICENSE[Firmware-hfi1_firmware] = "LICENSE.hfi1_firmware" +NO_GENERIC_LICENSE[Firmware-i2400m] = "LICENCE.i2400m" +NO_GENERIC_LICENSE[Firmware-i915] = "LICENSE.i915" +NO_GENERIC_LICENSE[Firmware-ibt_firmware] = "LICENCE.ibt_firmware" +NO_GENERIC_LICENSE[Firmware-ice] = "LICENSE.ice" +NO_GENERIC_LICENSE[Firmware-IntcSST2] = "LICENCE.IntcSST2" +NO_GENERIC_LICENSE[Firmware-it913x] = "LICENCE.it913x" +NO_GENERIC_LICENSE[Firmware-iwlwifi_firmware] = "LICENCE.iwlwifi_firmware" +NO_GENERIC_LICENSE[Firmware-kaweth] = "LICENCE.kaweth" +NO_GENERIC_LICENSE[Firmware-Marvell] = "LICENCE.Marvell" +NO_GENERIC_LICENSE[Firmware-mediatek] = "LICENCE.mediatek" +NO_GENERIC_LICENSE[Firmware-moxa] = "LICENCE.moxa" +NO_GENERIC_LICENSE[Firmware-myri10ge_firmware] = "LICENCE.myri10ge_firmware" +NO_GENERIC_LICENSE[Firmware-netronome] = "LICENCE.Netronome" +NO_GENERIC_LICENSE[Firmware-nvidia] = "LICENCE.nvidia" +NO_GENERIC_LICENSE[Firmware-OLPC] = "LICENCE.OLPC" +NO_GENERIC_LICENSE[Firmware-ath9k-htc] = "LICENCE.open-ath9k-htc-firmware" +NO_GENERIC_LICENSE[Firmware-phanfw] = "LICENCE.phanfw" +NO_GENERIC_LICENSE[Firmware-qat] = "LICENCE.qat_firmware" +NO_GENERIC_LICENSE[Firmware-qcom] = "LICENSE.qcom" +NO_GENERIC_LICENSE[Firmware-qla1280] = "LICENCE.qla1280" +NO_GENERIC_LICENSE[Firmware-qla2xxx] = "LICENCE.qla2xxx" +NO_GENERIC_LICENSE[Firmware-qualcommAthos_ar3k] = "LICENSE.QualcommAtheros_ar3k" +NO_GENERIC_LICENSE[Firmware-qualcommAthos_ath10k] = "LICENSE.QualcommAtheros_ath10k" +NO_GENERIC_LICENSE[Firmware-r8a779x_usb3] = "LICENCE.r8a779x_usb3" +NO_GENERIC_LICENSE[Firmware-radeon] = "LICENSE.radeon" +NO_GENERIC_LICENSE[Firmware-ralink_a_mediatek_company_firmware] = "LICENCE.ralink_a_mediatek_company_firmware" +NO_GENERIC_LICENSE[Firmware-ralink-firmware] = "LICENCE.ralink-firmware.txt" +NO_GENERIC_LICENSE[Firmware-rtlwifi_firmware] = "LICENCE.rtlwifi_firmware.txt" +NO_GENERIC_LICENSE[Firmware-siano] = "LICENCE.siano" +NO_GENERIC_LICENSE[Firmware-imx-sdma_firmware] = "LICENSE.sdma_firmware" +NO_GENERIC_LICENSE[Firmware-tda7706-firmware] = "LICENCE.tda7706-firmware.txt" +NO_GENERIC_LICENSE[Firmware-ti-connectivity] = "LICENCE.ti-connectivity" +NO_GENERIC_LICENSE[Firmware-ti-keystone] = "LICENCE.ti-keystone" +NO_GENERIC_LICENSE[Firmware-ueagle-atm4-firmware] = "LICENCE.ueagle-atm4-firmware" +NO_GENERIC_LICENSE[Firmware-via_vt6656] = "LICENCE.via_vt6656" +NO_GENERIC_LICENSE[Firmware-wl1251] = "LICENCE.wl1251" +NO_GENERIC_LICENSE[Firmware-xc4000] = "LICENCE.xc4000" +NO_GENERIC_LICENSE[Firmware-xc5000] = "LICENCE.xc5000" +NO_GENERIC_LICENSE[Firmware-xc5000c] = "LICENCE.xc5000c" +NO_GENERIC_LICENSE[WHENCE] = "WHENCE" + +PE = "1" + +SRC_URI = "${KERNELORG_MIRROR}/linux/kernel/firmware/${BPN}-${PV}.tar.xz" + +SRC_URI[sha256sum] = "76d05d5f1eff268d3b80675245fa596f557bd55ee2e16ddd54d18ffeae943887" + +inherit allarch + +CLEANBROKEN = "1" + +do_compile() { + : +} + +do_install() { + oe_runmake 'DESTDIR=${D}' 'FIRMWAREDIR=${nonarch_base_libdir}/firmware' install + cp GPL-2 LICEN[CS]E.* WHENCE ${D}${nonarch_base_libdir}/firmware/ +} + + +PACKAGES =+ "${PN}-ralink-license ${PN}-ralink \ + ${PN}-mt7601u-license ${PN}-mt7601u \ + ${PN}-radeon-license ${PN}-radeon \ + ${PN}-marvell-license ${PN}-pcie8897 ${PN}-pcie8997 \ + ${PN}-sd8686 ${PN}-sd8688 ${PN}-sd8787 ${PN}-sd8797 ${PN}-sd8801 ${PN}-sd8887 ${PN}-sd8897 \ + ${PN}-usb8997 \ + ${PN}-ti-connectivity-license ${PN}-wlcommon ${PN}-wl12xx ${PN}-wl18xx \ + ${PN}-vt6656-license ${PN}-vt6656 \ + ${PN}-rtl-license ${PN}-rtl8188 ${PN}-rtl8192cu ${PN}-rtl8192ce ${PN}-rtl8192su ${PN}-rtl8723 ${PN}-rtl8821 \ + ${PN}-rtl8168 \ + ${PN}-cypress-license \ + ${PN}-broadcom-license \ + ${PN}-bcm-0bb4-0306 \ + ${PN}-bcm43143 \ + ${PN}-bcm43236b \ + ${PN}-bcm43241b0 \ + ${PN}-bcm43241b4 \ + ${PN}-bcm43241b5 \ + ${PN}-bcm43242a \ + ${PN}-bcm4329 \ + ${PN}-bcm4329-fullmac \ + ${PN}-bcm4330 \ + ${PN}-bcm4334 \ + ${PN}-bcm43340 \ + ${PN}-bcm4335 \ + ${PN}-bcm43362 \ + ${PN}-bcm4339 \ + ${PN}-bcm43430 \ + ${PN}-bcm43430a0 \ + ${PN}-bcm43455 \ + ${PN}-bcm4350 \ + ${PN}-bcm4350c2 \ + ${PN}-bcm4354 \ + ${PN}-bcm4356 \ + ${PN}-bcm4356-pcie \ + ${PN}-bcm43569 \ + ${PN}-bcm43570 \ + ${PN}-bcm4358 \ + ${PN}-bcm43602 \ + ${PN}-bcm4366b \ + ${PN}-bcm4366c \ + ${PN}-bcm4371 \ + ${PN}-bcm4373 \ + ${PN}-bcm43xx \ + ${PN}-bcm43xx-hdr \ + ${PN}-atheros-license ${PN}-ar9170 ${PN}-ath6k ${PN}-ath9k \ + ${PN}-gplv2-license ${PN}-carl9170 \ + ${PN}-ar3k-license ${PN}-ar3k ${PN}-ath10k-license ${PN}-ath10k ${PN}-qca \ + \ + ${PN}-imx-sdma-license ${PN}-imx-sdma-imx6q ${PN}-imx-sdma-imx7d \ + \ + ${PN}-iwlwifi-license ${PN}-iwlwifi \ + ${PN}-iwlwifi-135-6 \ + ${PN}-iwlwifi-3160-7 ${PN}-iwlwifi-3160-8 ${PN}-iwlwifi-3160-9 \ + ${PN}-iwlwifi-3160-10 ${PN}-iwlwifi-3160-12 ${PN}-iwlwifi-3160-13 \ + ${PN}-iwlwifi-3160-16 ${PN}-iwlwifi-3160-17 \ + ${PN}-iwlwifi-6000-4 ${PN}-iwlwifi-6000g2a-5 ${PN}-iwlwifi-6000g2a-6 \ + ${PN}-iwlwifi-6000g2b-5 ${PN}-iwlwifi-6000g2b-6 \ + ${PN}-iwlwifi-6050-4 ${PN}-iwlwifi-6050-5 \ + ${PN}-iwlwifi-7260 \ + ${PN}-iwlwifi-7265 \ + ${PN}-iwlwifi-7265d ${PN}-iwlwifi-8000c ${PN}-iwlwifi-8265 \ + ${PN}-iwlwifi-9000 \ + ${PN}-iwlwifi-misc \ + ${PN}-ibt-license ${PN}-ibt \ + ${PN}-ibt-11-5 ${PN}-ibt-12-16 ${PN}-ibt-hw-37-7 ${PN}-ibt-hw-37-8 \ + ${PN}-ibt-17 \ + ${PN}-ibt-20 \ + ${PN}-ibt-misc \ + ${PN}-i915-license ${PN}-i915 \ + ${PN}-ice-license ${PN}-ice \ + ${PN}-adsp-sst-license ${PN}-adsp-sst \ + ${PN}-bnx2-mips \ + ${PN}-liquidio \ + ${PN}-netronome-license ${PN}-netronome \ + ${PN}-qat ${PN}-qat-license \ + ${PN}-qcom-license \ + ${PN}-qcom-venus-1.8 ${PN}-qcom-venus-4.2 ${PN}-qcom-venus-5.2 ${PN}-qcom-venus-5.4 \ + ${PN}-qcom-adreno-a3xx ${PN}-qcom-adreno-a530 ${PN}-qcom-adreno-a630 \ + ${PN}-qcom-sdm845-audio ${PN}-qcom-sdm845-compute ${PN}-qcom-sdm845-modem \ + ${PN}-whence-license \ + ${PN}-license \ + " + +# For atheros +LICENSE_${PN}-ar9170 = "Firmware-atheros_firmware" +LICENSE_${PN}-ath6k = "Firmware-atheros_firmware" +LICENSE_${PN}-ath9k = "Firmware-atheros_firmware" +LICENSE_${PN}-atheros-license = "Firmware-atheros_firmware" + +FILES_${PN}-atheros-license = "${nonarch_base_libdir}/firmware/LICENCE.atheros_firmware" +FILES_${PN}-ar9170 = " \ + ${nonarch_base_libdir}/firmware/ar9170*.fw \ +" +FILES_${PN}-ath6k = " \ + ${nonarch_base_libdir}/firmware/ath6k \ +" +FILES_${PN}-ath9k = " \ + ${nonarch_base_libdir}/firmware/ar9271.fw \ + ${nonarch_base_libdir}/firmware/ar7010*.fw \ + ${nonarch_base_libdir}/firmware/htc_9271.fw \ + ${nonarch_base_libdir}/firmware/htc_7010.fw \ + ${nonarch_base_libdir}/firmware/ath9k_htc/htc_7010-1.4.0.fw \ + ${nonarch_base_libdir}/firmware/ath9k_htc/htc_9271-1.4.0.fw \ +" + +RDEPENDS_${PN}-ar9170 += "${PN}-atheros-license" +RDEPENDS_${PN}-ath6k += "${PN}-atheros-license" +RDEPENDS_${PN}-ath9k += "${PN}-atheros-license" + +# For carl9170 +LICENSE_${PN}-carl9170 = "Firmware-GPLv2" +LICENSE_${PN}-gplv2-license = "Firmware-GPLv2" + +FILES_${PN}-gplv2-license = "${nonarch_base_libdir}/firmware/GPL-2" +FILES_${PN}-carl9170 = " \ + ${nonarch_base_libdir}/firmware/carl9170*.fw \ +" + +RDEPENDS_${PN}-carl9170 += "${PN}-gplv2-license" + +# For QualCommAthos +LICENSE_${PN}-ar3k = "Firmware-qualcommAthos_ar3k" +LICENSE_${PN}-ar3k-license = "Firmware-qualcommAthos_ar3k" +LICENSE_${PN}-ath10k = "Firmware-qualcommAthos_ath10k" +LICENSE_${PN}-ath10k-license = "Firmware-qualcommAthos_ath10k" +LICENSE_${PN}-qca = "Firmware-qualcommAthos_ath10k" + +FILES_${PN}-ar3k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ar3k" +FILES_${PN}-ar3k = " \ + ${nonarch_base_libdir}/firmware/ar3k \ +" + +FILES_${PN}-ath10k-license = "${nonarch_base_libdir}/firmware/LICENSE.QualcommAtheros_ath10k" +FILES_${PN}-ath10k = " \ + ${nonarch_base_libdir}/firmware/ath10k \ +" + +FILES_${PN}-qca = " \ + ${nonarch_base_libdir}/firmware/qca \ +" + +RDEPENDS_${PN}-ar3k += "${PN}-ar3k-license" +RDEPENDS_${PN}-ath10k += "${PN}-ath10k-license" +RDEPENDS_${PN}-qca += "${PN}-ath10k-license" + +# For ralink +LICENSE_${PN}-ralink = "Firmware-ralink-firmware" +LICENSE_${PN}-ralink-license = "Firmware-ralink-firmware" + +FILES_${PN}-ralink-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink-firmware.txt" +FILES_${PN}-ralink = " \ + ${nonarch_base_libdir}/firmware/rt*.bin \ +" + +RDEPENDS_${PN}-ralink += "${PN}-ralink-license" + +# For mediatek MT7601U +LICENSE_${PN}-mt7601u = "Firmware-ralink_a_mediatek_company_firmware" +LICENSE_${PN}-mt7601u-license = "Firmware-ralink_a_mediatek_company_firmware" + +FILES_${PN}-mt7601u-license = "${nonarch_base_libdir}/firmware/LICENCE.ralink_a_mediatek_company_firmware" +FILES_${PN}-mt7601u = " \ + ${nonarch_base_libdir}/firmware/mt7601u.bin \ +" + +RDEPENDS_${PN}-mt7601u += "${PN}-mt7601u-license" + +# For radeon +LICENSE_${PN}-radeon = "Firmware-radeon" +LICENSE_${PN}-radeon-license = "Firmware-radeon" + +FILES_${PN}-radeon-license = "${nonarch_base_libdir}/firmware/LICENSE.radeon" +FILES_${PN}-radeon = " \ + ${nonarch_base_libdir}/firmware/radeon \ +" + +RDEPENDS_${PN}-radeon += "${PN}-radeon-license" + +# For marvell +LICENSE_${PN}-pcie8897 = "Firmware-Marvell" +LICENSE_${PN}-pcie8997 = "Firmware-Marvell" +LICENSE_${PN}-sd8686 = "Firmware-Marvell" +LICENSE_${PN}-sd8688 = "Firmware-Marvell" +LICENSE_${PN}-sd8787 = "Firmware-Marvell" +LICENSE_${PN}-sd8797 = "Firmware-Marvell" +LICENSE_${PN}-sd8801 = "Firmware-Marvell" +LICENSE_${PN}-sd8887 = "Firmware-Marvell" +LICENSE_${PN}-sd8897 = "Firmware-Marvell" +LICENSE_${PN}-usb8997 = "Firmware-Marvell" +LICENSE_${PN}-marvell-license = "Firmware-Marvell" + +FILES_${PN}-marvell-license = "${nonarch_base_libdir}/firmware/LICENCE.Marvell" +FILES_${PN}-pcie8897 = " \ + ${nonarch_base_libdir}/firmware/mrvl/pcie8897_uapsta.bin \ +" +FILES_${PN}-pcie8997 = " \ + ${nonarch_base_libdir}/firmware/mrvl/pcie8997_wlan_v4.bin \ + ${nonarch_base_libdir}/firmware/mrvl/pcieuart8997_combo_v4.bin \ + ${nonarch_base_libdir}/firmware/mrvl/pcieusb8997_combo_v4.bin \ +" +FILES_${PN}-sd8686 = " \ + ${nonarch_base_libdir}/firmware/libertas/sd8686_v9* \ + ${nonarch_base_libdir}/firmware/sd8686* \ +" +FILES_${PN}-sd8688 = " \ + ${nonarch_base_libdir}/firmware/libertas/sd8688* \ + ${nonarch_base_libdir}/firmware/mrvl/sd8688* \ +" +FILES_${PN}-sd8787 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8787_uapsta.bin \ +" +FILES_${PN}-sd8797 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8797_uapsta.bin \ +" +FILES_${PN}-sd8801 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8801_uapsta.bin \ +" +FILES_${PN}-sd8887 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8887_uapsta.bin \ +" +FILES_${PN}-sd8897 = " \ + ${nonarch_base_libdir}/firmware/mrvl/sd8897_uapsta.bin \ +" +FILES_${PN}-usb8997 = " \ + ${nonarch_base_libdir}/firmware/mrvl/usbusb8997_combo_v4.bin \ +" + +RDEPENDS_${PN}-sd8686 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8688 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8787 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8797 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8801 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8887 += "${PN}-marvell-license" +RDEPENDS_${PN}-sd8897 += "${PN}-marvell-license" +RDEPENDS_${PN}-usb8997 += "${PN}-marvell-license" + +# For netronome +LICENSE_${PN}-netronome = "Firmware-netronome" + +FILES_${PN}-netronome-license = " \ + ${nonarch_base_libdir}/firmware/LICENCE.Netronome \ +" +FILES_${PN}-netronome = " \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0081*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0096*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0097*.nffw \ + ${nonarch_base_libdir}/firmware/netronome/nic_AMDA0099*.nffw \ +" + +RDEPENDS_${PN}-netronome += "${PN}-netronome-license" + +# For rtl +LICENSE_${PN}-rtl8188 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192cu = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192ce = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8192su = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8723 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8821 = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl-license = "Firmware-rtlwifi_firmware" +LICENSE_${PN}-rtl8168 = "WHENCE" + +FILES_${PN}-rtl-license = " \ + ${nonarch_base_libdir}/firmware/LICENCE.rtlwifi_firmware.txt \ +" +FILES_${PN}-rtl8188 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8188*.bin \ +" +FILES_${PN}-rtl8192cu = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cufw*.bin \ +" +FILES_${PN}-rtl8192ce = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8192cfw*.bin \ +" +FILES_${PN}-rtl8192su = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8712u.bin \ +" +FILES_${PN}-rtl8723 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8723*.bin \ +" +FILES_${PN}-rtl8821 = " \ + ${nonarch_base_libdir}/firmware/rtlwifi/rtl8821*.bin \ +" +FILES_${PN}-rtl8168 = " \ + ${nonarch_base_libdir}/firmware/rtl_nic/rtl8168*.fw \ +" + +RDEPENDS_${PN}-rtl8188 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192ce += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192cu += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8192su = "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8723 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8821 += "${PN}-rtl-license" +RDEPENDS_${PN}-rtl8168 += "${PN}-whence-license" + +# For ti-connectivity +LICENSE_${PN}-wlcommon = "Firmware-ti-connectivity" +LICENSE_${PN}-wl12xx = "Firmware-ti-connectivity" +LICENSE_${PN}-wl18xx = "Firmware-ti-connectivity" +LICENSE_${PN}-ti-connectivity-license = "Firmware-ti-connectivity" + +FILES_${PN}-ti-connectivity-license = "${nonarch_base_libdir}/firmware/LICENCE.ti-connectivity" +# wl18xx optionally needs wl1271-nvs.bin (which itself is a symlink to +# wl127x-nvs.bin) - see linux/drivers/net/wireless/ti/wlcore/sdio.c +# and drivers/net/wireless/ti/wlcore/spi.c. +# While they're optional and actually only used to override the MAC +# address on wl18xx, driver loading will delay (by udev timout - 60s) +# if not there. So let's make it available always. Because it's a +# symlink, both need to go to wlcommon. +FILES_${PN}-wlcommon = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/TI* \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl127x-nvs.bin \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl1271-nvs.bin \ +" +FILES_${PN}-wl12xx = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl12* \ +" +FILES_${PN}-wl18xx = " \ + ${nonarch_base_libdir}/firmware/ti-connectivity/wl18* \ +" + +RDEPENDS_${PN}-wl12xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" +RDEPENDS_${PN}-wl18xx = "${PN}-ti-connectivity-license ${PN}-wlcommon" + +# For vt6656 +LICENSE_${PN}-vt6656 = "Firmware-via_vt6656" +LICENSE_${PN}-vt6656-license = "Firmware-via_vt6656" + +FILES_${PN}-vt6656-license = "${nonarch_base_libdir}/firmware/LICENCE.via_vt6656" +FILES_${PN}-vt6656 = " \ + ${nonarch_base_libdir}/firmware/vntwusb.fw \ +" + +RDEPENDS_${PN}-vt6656 = "${PN}-vt6656-license" + +# For broadcom + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e " \${PN}-$pkg \\"; done | sort -u + +LICENSE_${PN}-broadcom-license = "Firmware-broadcom_bcm43xx" +FILES_${PN}-broadcom-license = "${nonarch_base_libdir}/firmware/LICENCE.broadcom_bcm43xx" + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo "$i - $pkg"; echo -e "FILES_\${PN}-$pkg = \"\${nonarch_base_libdir}/firmware/brcm/$i\""; done | grep ^FILES + +FILES_${PN}-bcm43xx = "${nonarch_base_libdir}/firmware/brcm/bcm43xx-0.fw" +FILES_${PN}-bcm43xx-hdr = "${nonarch_base_libdir}/firmware/brcm/bcm43xx_hdr-0.fw" +FILES_${PN}-bcm4329-fullmac = "${nonarch_base_libdir}/firmware/brcm/bcm4329-fullmac-4.bin" +FILES_${PN}-bcm43236b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43236b.bin" +FILES_${PN}-bcm4329 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4329-sdio.bin" +FILES_${PN}-bcm4330 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4330-sdio.*" +FILES_${PN}-bcm4334 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4334-sdio.bin" +FILES_${PN}-bcm4335 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4335-sdio.bin" +FILES_${PN}-bcm4339 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4339-sdio.bin" +FILES_${PN}-bcm43241b0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b0-sdio.bin" +FILES_${PN}-bcm43241b4 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b4-sdio.bin" +FILES_${PN}-bcm43241b5 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43241b5-sdio.bin" +FILES_${PN}-bcm43242a = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43242a.bin" +FILES_${PN}-bcm43143 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43143.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac43143-sdio.bin \ +" +FILES_${PN}-bcm43430a0 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430a0-sdio.*" +FILES_${PN}-bcm43455 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43455-sdio.*" +FILES_${PN}-bcm4350c2 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350c2-pcie.bin" +FILES_${PN}-bcm4350 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4350-pcie.bin" +FILES_${PN}-bcm4356 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-sdio.bin" +FILES_${PN}-bcm43569 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43569.bin" +FILES_${PN}-bcm43570 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43570-pcie.bin" +FILES_${PN}-bcm4358 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4358-pcie.bin" +FILES_${PN}-bcm43602 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac43602-pcie.ap.bin \ +" +FILES_${PN}-bcm4366b = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366b-pcie.bin" +FILES_${PN}-bcm4366c = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4366c-pcie.bin" +FILES_${PN}-bcm4371 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4371-pcie.bin" + +# for i in `grep brcm WHENCE | grep ^File | sed 's/File: brcm.//g'`; do pkg=`echo $i | sed 's/-[sp40].*//g; s/\.bin//g; s/brcmfmac/bcm/g; s/_hdr/-hdr/g; s/BCM/bcm-0bb4-0306/g'`; echo -e "LICENSE_\${PN}-$pkg = \"Firmware-broadcom_bcm43xx\"\nRDEPENDS_\${PN}-$pkg += \"\${PN}-broadcom-license\""; done +# Currently 1st one and last 6 have cypress LICENSE + +LICENSE_${PN}-bcm43xx = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43xx += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43xx-hdr = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43xx-hdr += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4329-fullmac = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4329-fullmac += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43236b = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43236b += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4329 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4329 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4330 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4330 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4334 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4334 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4335 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4335 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4339 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4339 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b0 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b0 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b4 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b4 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43241b5 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43241b5 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43242a = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43242a += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43143 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43143 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43430a0 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43430a0 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43455 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43455 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4350c2 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4350c2 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4350 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4350 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4356 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4356 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43569 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43569 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43570 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43570 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4358 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4358 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm43602 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm43602 += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4366b = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4366b += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4366c = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4366c += "${PN}-broadcom-license" +LICENSE_${PN}-bcm4371 = "Firmware-broadcom_bcm43xx" +RDEPENDS_${PN}-bcm4371 += "${PN}-broadcom-license" + +# For broadcom cypress + +LICENSE_${PN}-cypress-license = "Firmware-cypress" +FILES_${PN}-cypress-license = "${nonarch_base_libdir}/firmware/LICENCE.cypress" + +FILES_${PN}-bcm-0bb4-0306 = "${nonarch_base_libdir}/firmware/brcm/BCM-0bb4-0306.hcd" +FILES_${PN}-bcm43340 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43340-sdio.*" +FILES_${PN}-bcm43362 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43362-sdio.*" +FILES_${PN}-bcm43430 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac43430-sdio.*" +FILES_${PN}-bcm4354 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4354-sdio.bin" +FILES_${PN}-bcm4356-pcie = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4356-pcie.*" +FILES_${PN}-bcm4373 = "${nonarch_base_libdir}/firmware/brcm/brcmfmac4373-sdio.bin \ + ${nonarch_base_libdir}/firmware/brcm/brcmfmac4373.bin \ +" + +LICENSE_${PN}-bcm-0bb4-0306 = "Firmware-cypress" +RDEPENDS_${PN}-bcm-0bb4-0306 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43340 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43340 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43362 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43362 += "${PN}-cypress-license" +LICENSE_${PN}-bcm43430 = "Firmware-cypress" +RDEPENDS_${PN}-bcm43430 += "${PN}-cypress-license" +LICENSE_${PN}-bcm4354 = "Firmware-cypress" +RDEPENDS_${PN}-bcm4354 += "${PN}-cypress-license" +LICENSE_${PN}-bcm4356-pcie = "Firmware-cypress" +RDEPENDS_${PN}-bcm4356-pcie += "${PN}-cypress-license" +LICENSE_${PN}-bcm4373 = "Firmware-cypress" +RDEPENDS_${PN}-bcm4373 += "${PN}-cypress-license" + +# For Broadcom bnx2-mips +# +# which is a separate case to the other Broadcom firmwares since its +# license is contained in the shared WHENCE file. + +LICENSE_${PN}-bnx2-mips = "WHENCE" +LICENSE_${PN}-whence-license = "WHENCE" + +FILES_${PN}-bnx2-mips = "${nonarch_base_libdir}/firmware/bnx2/bnx2-mips-09-6.2.1b.fw" +FILES_${PN}-whence-license = "${nonarch_base_libdir}/firmware/WHENCE" + +RDEPENDS_${PN}-bnx2-mips += "${PN}-whence-license" + +# For imx-sdma +LICENSE_${PN}-imx-sdma-imx6q = "Firmware-imx-sdma_firmware" +LICENSE_${PN}-imx-sdma-imx7d = "Firmware-imx-sdma_firmware" +LICENSE_${PN}-imx-sdma-license = "Firmware-imx-sdma_firmware" + +FILES_${PN}-imx-sdma-imx6q = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx6q.bin" + +RPROVIDES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" +RREPLACES_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" +RCONFLICTS_${PN}-imx-sdma-imx6q = "firmware-imx-sdma-imx6q" + +FILES_${PN}-imx-sdma-imx7d = "${nonarch_base_libdir}/firmware/imx/sdma/sdma-imx7d.bin" + +FILES_${PN}-imx-sdma-license = "${nonarch_base_libdir}/firmware/LICENSE.sdma_firmware" + +RDEPENDS_${PN}-imx-sdma-imx6q += "${PN}-imx-sdma-license" +RDEPENDS_${PN}-imx-sdma-imx7d += "${PN}-imx-sdma-license" + +# For iwlwifi +LICENSE_${PN}-iwlwifi = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-135-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-7 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-8 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-9 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-10 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-12 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-13 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-16 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-3160-17 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000-4 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2a-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2a-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2b-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6000g2b-6 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6050-4 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-6050-5 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7260 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7265 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-7265d = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-8000c = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-8265 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-9000 = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-misc = "Firmware-iwlwifi_firmware" +LICENSE_${PN}-iwlwifi-license = "Firmware-iwlwifi_firmware" + + +FILES_${PN}-iwlwifi-license = "${nonarch_base_libdir}/firmware/LICENCE.iwlwifi_firmware" +FILES_${PN}-iwlwifi-135-6 = "${nonarch_base_libdir}/firmware/iwlwifi-135-6.ucode" +FILES_${PN}-iwlwifi-3160-7 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-7.ucode" +FILES_${PN}-iwlwifi-3160-8 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-8.ucode" +FILES_${PN}-iwlwifi-3160-9 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-9.ucode" +FILES_${PN}-iwlwifi-3160-10 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-10.ucode" +FILES_${PN}-iwlwifi-3160-12 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-12.ucode" +FILES_${PN}-iwlwifi-3160-13 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-13.ucode" +FILES_${PN}-iwlwifi-3160-16 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-16.ucode" +FILES_${PN}-iwlwifi-3160-17 = "${nonarch_base_libdir}/firmware/iwlwifi-3160-17.ucode" +FILES_${PN}-iwlwifi-6000-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6000-4.ucode" +FILES_${PN}-iwlwifi-6000g2a-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-5.ucode" +FILES_${PN}-iwlwifi-6000g2a-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2a-6.ucode" +FILES_${PN}-iwlwifi-6000g2b-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-5.ucode" +FILES_${PN}-iwlwifi-6000g2b-6 = "${nonarch_base_libdir}/firmware/iwlwifi-6000g2b-6.ucode" +FILES_${PN}-iwlwifi-6050-4 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-4.ucode" +FILES_${PN}-iwlwifi-6050-5 = "${nonarch_base_libdir}/firmware/iwlwifi-6050-5.ucode" +FILES_${PN}-iwlwifi-7260 = "${nonarch_base_libdir}/firmware/iwlwifi-7260-*.ucode" +FILES_${PN}-iwlwifi-7265 = "${nonarch_base_libdir}/firmware/iwlwifi-7265-*.ucode" +FILES_${PN}-iwlwifi-7265d = "${nonarch_base_libdir}/firmware/iwlwifi-7265D-*.ucode" +FILES_${PN}-iwlwifi-8000c = "${nonarch_base_libdir}/firmware/iwlwifi-8000C-*.ucode" +FILES_${PN}-iwlwifi-8265 = "${nonarch_base_libdir}/firmware/iwlwifi-8265-*.ucode" +FILES_${PN}-iwlwifi-9000 = "${nonarch_base_libdir}/firmware/iwlwifi-9000-*.ucode" +FILES_${PN}-iwlwifi-misc = "${nonarch_base_libdir}/firmware/iwlwifi-*.ucode" + +RDEPENDS_${PN}-iwlwifi-135-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-7 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-8 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-9 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-10 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-12 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-13 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-16 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-3160-17 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000-4 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2a-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2a-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2b-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6000g2b-6 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6050-4 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-6050-5 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-7265d = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-8000c = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-8265 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-9000 = "${PN}-iwlwifi-license" +RDEPENDS_${PN}-iwlwifi-misc = "${PN}-iwlwifi-license" + +# -iwlwifi-misc is a "catch all" package that includes all the iwlwifi +# firmwares that are not already included in other -iwlwifi- packages. +# -iwlwifi is a virtual package that depends upon all iwlwifi packages. +# These are distinct in order to allow the -misc firmwares to be installed +# without pulling in every other iwlwifi package. +ALLOW_EMPTY_${PN}-iwlwifi = "1" +ALLOW_EMPTY_${PN}-iwlwifi-misc = "1" + +# Handle package updating for the newly merged iwlwifi groupings +RPROVIDES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" +RREPLACES_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" +RCONFLICTS_${PN}-iwlwifi-7265 = "${PN}-iwlwifi-7265-8 ${PN}-iwlwifi-7265-9" + +RPROVIDES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" +RREPLACES_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" +RCONFLICTS_${PN}-iwlwifi-7260 = "${PN}-iwlwifi-7260-7 ${PN}-iwlwifi-7260-8 ${PN}-iwlwifi-7260-9" + +# For ibt +LICENSE_${PN}-ibt-license = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-hw-37-7 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-hw-37-8 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-11-5 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-12-16 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-17 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-20 = "Firmware-ibt_firmware" +LICENSE_${PN}-ibt-misc = "Firmware-ibt_firmware" + +FILES_${PN}-ibt-license = "${nonarch_base_libdir}/firmware/LICENCE.ibt_firmware" +FILES_${PN}-ibt-hw-37-7 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.7*.bseq" +FILES_${PN}-ibt-hw-37-8 = "${nonarch_base_libdir}/firmware/intel/ibt-hw-37.8*.bseq" +FILES_${PN}-ibt-11-5 = "${nonarch_base_libdir}/firmware/intel/ibt-11-5.sfi ${nonarch_base_libdir}/firmware/intel/ibt-11-5.ddc" +FILES_${PN}-ibt-12-16 = "${nonarch_base_libdir}/firmware/intel/ibt-12-16.sfi ${nonarch_base_libdir}/firmware/intel/ibt-12-16.ddc" +FILES_${PN}-ibt-17 = "${nonarch_base_libdir}/firmware/intel/ibt-17-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-17-*.ddc" +FILES_${PN}-ibt-20 = "${nonarch_base_libdir}/firmware/intel/ibt-20-*.sfi ${nonarch_base_libdir}/firmware/intel/ibt-20-*.ddc" +FILES_${PN}-ibt-misc = "${nonarch_base_libdir}/firmware/intel/ibt-*" + +RDEPENDS_${PN}-ibt-hw-37-7 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-hw-37.8 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-11-5 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-12-16 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-17 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-20 = "${PN}-ibt-license" +RDEPENDS_${PN}-ibt-misc = "${PN}-ibt-license" + +ALLOW_EMPTY_${PN}-ibt= "1" +ALLOW_EMPTY_${PN}-ibt-misc = "1" + +LICENSE_${PN}-i915 = "Firmware-i915" +LICENSE_${PN}-i915-license = "Firmware-i915" +FILES_${PN}-i915-license = "${nonarch_base_libdir}/firmware/LICENSE.i915" +FILES_${PN}-i915 = "${nonarch_base_libdir}/firmware/i915" +RDEPENDS_${PN}-i915 = "${PN}-i915-license" + +LICENSE_${PN}-ice = "Firmware-ice" +LICENSE_${PN}-ice-license = "Firmware-ice" +FILES_${PN}-ice-license = "${nonarch_base_libdir}/firmware/LICENSE.ice" +FILES_${PN}-ice = "${nonarch_base_libdir}/firmware/intel/ice" +RDEPENDS_${PN}-ice = "${PN}-ice-license" + +FILES_${PN}-adsp-sst-license = "${nonarch_base_libdir}/firmware/LICENCE.adsp_sst" +LICENSE_${PN}-adsp-sst = "Firmware-adsp_sst" +LICENSE_${PN}-adsp-sst-license = "Firmware-adsp_sst" +FILES_${PN}-adsp-sst = "${nonarch_base_libdir}/firmware/intel/dsp_fw*" +RDEPENDS_${PN}-adsp-sst = "${PN}-adsp-sst-license" + +# For QAT +LICENSE_${PN}-qat = "Firmware-qat" +LICENSE_${PN}-qat-license = "Firmware-qat" +FILES_${PN}-qat-license = "${nonarch_base_libdir}/firmware/LICENCE.qat_firmware" +FILES_${PN}-qat = "${nonarch_base_libdir}/firmware/qat*.bin" +RDEPENDS_${PN}-qat = "${PN}-qat-license" + +# For QCOM VPU/GPU and SDM845 +LICENSE_${PN}-qcom-license = "Firmware-qcom" +FILES_${PN}-qcom-license = "${nonarch_base_libdir}/firmware/LICENSE.qcom ${nonarch_base_libdir}/firmware/qcom/NOTICE.txt" +FILES_${PN}-qcom-venus-1.8 = "${nonarch_base_libdir}/firmware/qcom/venus-1.8/*" +FILES_${PN}-qcom-venus-4.2 = "${nonarch_base_libdir}/firmware/qcom/venus-4.2/*" +FILES_${PN}-qcom-venus-5.2 = "${nonarch_base_libdir}/firmware/qcom/venus-5.2/*" +FILES_${PN}-qcom-venus-5.4 = "${nonarch_base_libdir}/firmware/qcom/venus-5.4/*" +FILES_${PN}-qcom-adreno-a3xx = "${nonarch_base_libdir}/firmware/qcom/a300_*.fw ${nonarch_base_libdir}/firmware/a300_*.fw" +FILES_${PN}-qcom-adreno-a530 = "${nonarch_base_libdir}/firmware/qcom/a530*.*" +FILES_${PN}-qcom-adreno-a630 = "${nonarch_base_libdir}/firmware/qcom/a630*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/a630*.*" +FILES_${PN}-qcom-sdm845-audio = "${nonarch_base_libdir}/firmware/qcom/sdm845/adsp*.*" +FILES_${PN}-qcom-sdm845-compute = "${nonarch_base_libdir}/firmware/qcom/sdm845/cdsp*.*" +FILES_${PN}-qcom-sdm845-modem = "${nonarch_base_libdir}/firmware/qcom/sdm845/mba.mbn ${nonarch_base_libdir}/firmware/qcom/sdm845/modem*.* ${nonarch_base_libdir}/firmware/qcom/sdm845/wlanmdsp.mbn" +RDEPENDS_${PN}-qcom-venus-1.8 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-venus-4.2 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-venus-5.2 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-venus-5.4 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a3xx = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a530 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-adreno-a630 = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-audio = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-compute = "${PN}-qcom-license" +RDEPENDS_${PN}-qcom-sdm845-modem = "${PN}-qcom-license" + +FILES_${PN}-liquidio = "${nonarch_base_libdir}/firmware/liquidio" + +# For other firmwares +# Maybe split out to separate packages when needed. +LICENSE_${PN} = "\ + Firmware-Abilis \ + & Firmware-agere \ + & Firmware-amdgpu \ + & Firmware-amd-ucode \ + & Firmware-atmel \ + & Firmware-ca0132 \ + & Firmware-cavium \ + & Firmware-chelsio_firmware \ + & Firmware-cw1200 \ + & Firmware-dib0700 \ + & Firmware-e100 \ + & Firmware-ene_firmware \ + & Firmware-fw_sst_0f28 \ + & Firmware-go7007 \ + & Firmware-hfi1_firmware \ + & Firmware-i2400m \ + & Firmware-ibt_firmware \ + & Firmware-it913x \ + & Firmware-IntcSST2 \ + & Firmware-kaweth \ + & Firmware-moxa \ + & Firmware-myri10ge_firmware \ + & Firmware-nvidia \ + & Firmware-OLPC \ + & Firmware-ath9k-htc \ + & Firmware-phanfw \ + & Firmware-qat \ + & Firmware-qcom \ + & Firmware-qla1280 \ + & Firmware-qla2xxx \ + & Firmware-r8a779x_usb3 \ + & Firmware-radeon \ + & Firmware-ralink_a_mediatek_company_firmware \ + & Firmware-ralink-firmware \ + & Firmware-imx-sdma_firmware \ + & Firmware-siano \ + & Firmware-tda7706-firmware \ + & Firmware-ti-connectivity \ + & Firmware-ti-keystone \ + & Firmware-ueagle-atm4-firmware \ + & Firmware-wl1251 \ + & Firmware-xc4000 \ + & Firmware-xc5000 \ + & Firmware-xc5000c \ + & WHENCE \ +" + +FILES_${PN}-license += "${nonarch_base_libdir}/firmware/LICEN*" +FILES_${PN} += "${nonarch_base_libdir}/firmware/*" +RDEPENDS_${PN} += "${PN}-license" +RDEPENDS_${PN} += "${PN}-whence-license" + +# Make linux-firmware depend on all of the split-out packages. +# Make linux-firmware-iwlwifi depend on all of the split-out iwlwifi packages. +# Make linux-firmware-ibt depend on all of the split-out ibt packages. +python populate_packages_prepend () { + firmware_pkgs = oe.utils.packages_filter_out_system(d) + d.appendVar('RRECOMMENDS_linux-firmware', ' ' + ' '.join(firmware_pkgs)) + + iwlwifi_pkgs = filter(lambda x: x.find('-iwlwifi-') != -1, firmware_pkgs) + d.appendVar('RRECOMMENDS_linux-firmware-iwlwifi', ' ' + ' '.join(iwlwifi_pkgs)) + + ibt_pkgs = filter(lambda x: x.find('-ibt-') != -1, firmware_pkgs) + d.appendVar('RRECOMMENDS_linux-firmware-ibt', ' ' + ' '.join(ibt_pkgs)) +} + +# Firmware files are generally not ran on the CPU, so they can be +# allarch despite being architecture specific +INSANE_SKIP = "arch" diff --git a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc index b1cb553c7..3bceac40c 100644 --- a/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc +++ b/poky/meta/recipes-kernel/linux-libc-headers/linux-libc-headers.inc @@ -16,7 +16,7 @@ LICENSE = "GPLv2" # and have a machine specific libc. # # But you have some kernel headers you need for some driver? That is fine -# but get them from STAGING_KERNEL_DIR where the kernel installs itself. +# but get them from STAGING_KERNEL_BUILDDIR where the kernel installs itself. # This will make the package using them machine specific but this is much # better than having a machine specific C library. This does mean your # recipe needs a diff --git a/poky/meta/recipes-kernel/linux/kernel-devsrc.bb b/poky/meta/recipes-kernel/linux/kernel-devsrc.bb index a9c7be0f8..aa8e162f4 100644 --- a/poky/meta/recipes-kernel/linux/kernel-devsrc.bb +++ b/poky/meta/recipes-kernel/linux/kernel-devsrc.bb @@ -261,6 +261,8 @@ do_install() { if [ -e "$kerneldir/build/include/config/auto.conf.cmd" ]; then sed -i 's/ifneq "$(CC)" ".*-linux-.*gcc.*$/ifneq "$(CC)" "gcc"/' "$kerneldir/build/include/config/auto.conf.cmd" sed -i 's/ifneq "$(LD)" ".*-linux-.*ld.bfd.*$/ifneq "$(LD)" "ld"/' "$kerneldir/build/include/config/auto.conf.cmd" + sed -i 's/ifneq "$(HOSTCXX)" ".*$/ifneq "$(HOSTCXX)" "g++"/' "$kerneldir/build/include/config/auto.conf.cmd" + sed -i 's/ifneq "$(HOSTCC)" ".*$/ifneq "$(HOSTCC)" "gcc"/' "$kerneldir/build/include/config/auto.conf.cmd" sed -i 's/ifneq "$(CC_VERSION_TEXT)".*\(gcc.*\)"/ifneq "$(CC_VERSION_TEXT)" "\1"/' "$kerneldir/build/include/config/auto.conf.cmd" sed -i 's/ifneq "$(srctree)" ".*"/ifneq "$(srctree)" "."/' "$kerneldir/build/include/config/auto.conf.cmd" # we don't build against the defconfig, so make sure it isn't the trigger for syncconfig diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb b/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb index 175836ef9..c937173d0 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-dev.bb @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto-dev.git;branch=${KBRANCH};name SRCREV_machine ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' SRCREV_meta ?= '${@oe.utils.conditional("PREFERRED_PROVIDER_virtual/kernel", "linux-yocto-dev", "${AUTOREV}", "29594404d7fe73cd80eaa4ee8c43dcc53970c60e", d)}' -LINUX_VERSION ?= "5.8-rc+" +LINUX_VERSION ?= "5.9-rc+" LINUX_VERSION_EXTENSION ?= "-yoctodev-${LINUX_KERNEL_TYPE}" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb index cfe3277e8..73876bb99 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.4.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "22664d170488313b5c2713b6d9c8df6563387728" -SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" +SRCREV_machine ?= "1d9e25c4f35155580cef313ff2a76de545124a1d" +SRCREV_meta ?= "0d860e075788a92601dff3eb9b615ee41e465040" SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" -LINUX_VERSION ?= "5.4.58" +LINUX_VERSION ?= "5.4.65" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb index e23e7dcfb..d29c5985c 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-rt_5.8.bb @@ -11,13 +11,13 @@ python () { raise bb.parse.SkipRecipe("Set PREFERRED_PROVIDER_virtual/kernel to linux-yocto-rt to enable it") } -SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" +SRCREV_machine ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_meta ?= "a933cb2f91915dceb138775c3878212e228d3eff" SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;branch=${KBRANCH};name=machine \ git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.8;destsuffix=${KMETA}" -LINUX_VERSION ?= "5.8.1" +LINUX_VERSION ?= "5.8.9" LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb index b90b1259a..853fc9369 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.4.bb @@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "5.4.58" +LINUX_VERSION ?= "5.4.65" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" @@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine_qemuarm ?= "d192ae0b9995a7be2a33b12005a95348ec6aae94" -SRCREV_machine ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" +SRCREV_machine_qemuarm ?= "bb77791bc00cfa70211dd238d312b4db950c0808" +SRCREV_machine ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_meta ?= "0d860e075788a92601dff3eb9b615ee41e465040" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb index 36a8ae457..d32e5d372 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto-tiny_5.8.bb @@ -6,7 +6,7 @@ KCONFIG_MODE = "--allnoconfig" require recipes-kernel/linux/linux-yocto.inc -LINUX_VERSION ?= "5.8.1" +LINUX_VERSION ?= "5.8.9" LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" @@ -15,9 +15,9 @@ DEPENDS += "openssl-native util-linux-native" KMETA = "kernel-meta" KCONF_BSP_AUDIT_LEVEL = "2" -SRCREV_machine_qemuarm ?= "566e869df9400258b6f162bf34933f5b6dcd0115" -SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" +SRCREV_machine_qemuarm ?= "830cb9af40e856615b7a435a4fac57b748ba56d6" +SRCREV_machine ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_meta ?= "a933cb2f91915dceb138775c3878212e228d3eff" PV = "${LINUX_VERSION}+git${SRCPV}" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb index f85e37d2c..fe9369196 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto_5.4.bb @@ -12,16 +12,16 @@ KBRANCH_qemux86 ?= "v5.4/standard/base" KBRANCH_qemux86-64 ?= "v5.4/standard/base" KBRANCH_qemumips64 ?= "v5.4/standard/mti-malta64" -SRCREV_machine_qemuarm ?= "7bbd138602fda3d69d74674460e73bffdec73cd2" -SRCREV_machine_qemuarm64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_machine_qemumips ?= "e43ed1586cd85a007b0fae3c63d6980d4f5cb336" -SRCREV_machine_qemuppc ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_machine_qemuriscv64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_machine_qemux86 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_machine_qemux86-64 ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_machine_qemumips64 ?= "d1ff96887c64f70de00add62eb91d4c36f1b181a" -SRCREV_machine ?= "706efec4c1e270ec5dda92275898cd465dfdc7dd" -SRCREV_meta ?= "83311f062f4aede9928eca82a34ddf73f264fe2a" +SRCREV_machine_qemuarm ?= "894e63d3256613faa39931a6ae505cfd196df067" +SRCREV_machine_qemuarm64 ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_machine_qemumips ?= "aa13fdb2c5f8c4e4e432bfee6df9c8f76ec8ac70" +SRCREV_machine_qemuppc ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_machine_qemuriscv64 ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_machine_qemux86 ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_machine_qemux86-64 ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_machine_qemumips64 ?= "4fb21d604fc54db63221ea28ab90622c29d74202" +SRCREV_machine ?= "406008bf3232dfc9e63b6e7bf745ca883c45041e" +SRCREV_meta ?= "0d860e075788a92601dff3eb9b615ee41e465040" # remap qemuarm to qemuarma15 for the 5.4 kernel # KMACHINE_qemuarm ?= "qemuarma15" @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.4;destsuffix=${KMETA}" LIC_FILES_CHKSUM = "file://COPYING;md5=bbea815ee2795b2f4230826c0c6b8814" -LINUX_VERSION ?= "5.4.58" +LINUX_VERSION ?= "5.4.65" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" DEPENDS += "openssl-native util-linux-native" diff --git a/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb b/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb index aad689590..9ff1d5da8 100644 --- a/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb +++ b/poky/meta/recipes-kernel/linux/linux-yocto_5.8.bb @@ -12,16 +12,16 @@ KBRANCH_qemux86 ?= "v5.8/standard/base" KBRANCH_qemux86-64 ?= "v5.8/standard/base" KBRANCH_qemumips64 ?= "v5.8/standard/mti-malta64" -SRCREV_machine_qemuarm ?= "097417e785af04be0cbe757bc6e24456a3f701fd" -SRCREV_machine_qemuarm64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_machine_qemumips ?= "1fc5490bef8322680d73f6ab2c7b666eccc3bce1" -SRCREV_machine_qemuppc ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_machine_qemuriscv64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_machine_qemux86 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_machine_qemux86-64 ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_machine_qemumips64 ?= "e61fc06792254eed92c6908a9b35790ed54b0ace" -SRCREV_machine ?= "d3c69e89ee5b5d4c3c19b8614bdcdc3f5dc7a8b3" -SRCREV_meta ?= "a3138cb23c3b7409c516d5d2115da9534c120a0c" +SRCREV_machine_qemuarm ?= "d351bf87c9c0e96a1f27f87f16d298bc4470e0b5" +SRCREV_machine_qemuarm64 ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_machine_qemumips ?= "93d29a70890b19fb5482ebcab5f3a49301851daf" +SRCREV_machine_qemuppc ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_machine_qemuriscv64 ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_machine_qemux86 ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_machine_qemux86-64 ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_machine_qemumips64 ?= "4faa049b6b7b51c5d12d20c5e9fcf8e0a3ba8d42" +SRCREV_machine ?= "31fafe701e2adec65d2b2a74a3e592a358915c67" +SRCREV_meta ?= "a933cb2f91915dceb138775c3878212e228d3eff" # remap qemuarm to qemuarma15 for the 5.8 kernel # KMACHINE_qemuarm ?= "qemuarma15" @@ -30,7 +30,7 @@ SRC_URI = "git://git.yoctoproject.org/linux-yocto.git;name=machine;branch=${KBRA git://git.yoctoproject.org/yocto-kernel-cache;type=kmeta;name=meta;branch=yocto-5.8;destsuffix=${KMETA}" LIC_FILES_CHKSUM = "file://COPYING;md5=6bc538ed5bd9a7fc9398086aedcd7e46" -LINUX_VERSION ?= "5.8.1" +LINUX_VERSION ?= "5.8.9" DEPENDS += "${@bb.utils.contains('ARCH', 'x86', 'elfutils-native', '', d)}" DEPENDS += "openssl-native util-linux-native" diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Kconfig-fix-dependency-issue-when-building-in-tree-w.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Kconfig-fix-dependency-issue-when-building-in-tree-w.patch new file mode 100644 index 000000000..ae8bec45d --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0001-Kconfig-fix-dependency-issue-when-building-in-tree-w.patch @@ -0,0 +1,54 @@ +From ff4d1d7e85be94ef43709cd698f0ec9a12f247d1 Mon Sep 17 00:00:00 2001 +From: Beniamin Sandu +Date: Thu, 13 Aug 2020 16:24:39 +0300 +Subject: [PATCH 01/10] Kconfig: fix dependency issue when building in-tree + without CONFIG_FTRACE + +When building in-tree, one could disable CONFIG_FTRACE from kernel +config which will leave CONFIG_TRACEPOINTS selected by LTTNG modules, +but generate a lot of linker errors like below because it leaves out +other stuff, e.g.: + +trace.c:(.text+0xd86b): undefined reference to `trace_event_buffer_reserve' +ld: trace.c:(.text+0xd8de): undefined reference to `trace_event_buffer_commit' +ld: trace.c:(.text+0xd926): undefined reference to `event_triggers_call' +ld: trace.c:(.text+0xd942): undefined reference to `trace_event_ignore_this_pid' +ld: net/mac80211/trace.o: in function `trace_event_raw_event_drv_tdls_cancel_channel_switch': + +It appears to be caused by the fact that TRACE_EVENT macros in the Linux +kernel depend on the Ftrace ring buffer as soon as CONFIG_TRACEPOINTS is +enabled. + +Steps to reproduce: + +- Get a clone of an upstream stable kernel and use scripts/built-in.sh on it + +- Configure a standard x86-64 build, enable built-in LTTNG but disable + CONFIG_FTRACE from Kernel Hacking-->Tracers using menuconfig + +- Build will fail at linking stage + +Upstream-Status: Backport + +Signed-off-by: Beniamin Sandu +Signed-off-by: Mathieu Desnoyers +--- + Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/Kconfig b/Kconfig +index acdab73..10eccff 100644 +--- a/Kconfig ++++ b/Kconfig +@@ -2,7 +2,7 @@ + + config LTTNG + tristate "LTTng support" +- select TRACEPOINTS ++ select TRACING + help + LTTng is an open source tracing framework for Linux. + +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0002-fix-Move-mmutrace.h-into-the-mmu-sub-directory-v5.9.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0002-fix-Move-mmutrace.h-into-the-mmu-sub-directory-v5.9.patch new file mode 100644 index 000000000..fab673b85 --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0002-fix-Move-mmutrace.h-into-the-mmu-sub-directory-v5.9.patch @@ -0,0 +1,41 @@ +From e10ab43dd0e425df5bc0ac763447664ed075ba05 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 10 Aug 2020 11:22:05 -0400 +Subject: [PATCH 02/10] fix: Move mmutrace.h into the mmu/ sub-directory (v5.9) + + commit 33e3042dac6bcc33b80835f7d7b502b1d74c457c + Author: Sean Christopherson + Date: Mon Jun 22 13:20:29 2020 -0700 + + KVM: x86/mmu: Move mmu_audit.c and mmutrace.h into the mmu/ sub-directory + + Move mmu_audit.c and mmutrace.h under mmu/ where they belong. + +Upstream-Status: Backport + +Change-Id: I582525ccca34e1e3bd62870364108a7d3e9df2e4 +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +--- + probes/lttng-probe-kvm-x86-mmu.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/probes/lttng-probe-kvm-x86-mmu.c b/probes/lttng-probe-kvm-x86-mmu.c +index 37384a2..5a7ef1e 100644 +--- a/probes/lttng-probe-kvm-x86-mmu.c ++++ b/probes/lttng-probe-kvm-x86-mmu.c +@@ -24,7 +24,11 @@ + */ + #include + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++#include <../../arch/x86/kvm/mmu/mmutrace.h> ++#else + #include <../../arch/x86/kvm/mmutrace.h> ++#endif + + #undef TRACE_INCLUDE_PATH + #undef TRACE_INCLUDE_FILE +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0003-fix-KVM-x86-mmu-Make-kvm_mmu_page-definition-and-acc.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0003-fix-KVM-x86-mmu-Make-kvm_mmu_page-definition-and-acc.patch new file mode 100644 index 000000000..524631cc7 --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0003-fix-KVM-x86-mmu-Make-kvm_mmu_page-definition-and-acc.patch @@ -0,0 +1,39 @@ +From f16315cc45c4c6b880de541bb092ca18a13952b7 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 10 Aug 2020 11:36:03 -0400 +Subject: [PATCH 03/10] fix: KVM: x86/mmu: Make kvm_mmu_page definition and + accessor internal-only (v5.9) + + commit 985ab2780164698ec6e7d73fad523d50449261dd + Author: Sean Christopherson + Date: Mon Jun 22 13:20:32 2020 -0700 + + KVM: x86/mmu: Make kvm_mmu_page definition and accessor internal-only + + Make 'struct kvm_mmu_page' MMU-only, nothing outside of the MMU should + be poking into the gory details of shadow pages. + +Upstream-Status: Backport + +Change-Id: Ia5c1b9c49c2b00dad1d5b17c50c3dc730dafda20 +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +--- + probes/lttng-probe-kvm-x86-mmu.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/probes/lttng-probe-kvm-x86-mmu.c b/probes/lttng-probe-kvm-x86-mmu.c +index 5a7ef1e..8f98186 100644 +--- a/probes/lttng-probe-kvm-x86-mmu.c ++++ b/probes/lttng-probe-kvm-x86-mmu.c +@@ -25,6 +25,7 @@ + #include + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++#include <../../arch/x86/kvm/mmu/mmu_internal.h> + #include <../../arch/x86/kvm/mmu/mmutrace.h> + #else + #include <../../arch/x86/kvm/mmutrace.h> +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0004-fix-ext4-limit-the-length-of-per-inode-prealloc-list.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0004-fix-ext4-limit-the-length-of-per-inode-prealloc-list.patch new file mode 100644 index 000000000..e29c07252 --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0004-fix-ext4-limit-the-length-of-per-inode-prealloc-list.patch @@ -0,0 +1,84 @@ +From 8fe742807e65af29dac3fea568ff93cbc5dd9a56 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 24 Aug 2020 15:26:04 -0400 +Subject: [PATCH 04/10] fix: ext4: limit the length of per-inode prealloc list + (v5.9) +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +See upstream commit: + + commit 27bc446e2def38db3244a6eb4bb1d6312936610a + Author: brookxu + Date: Mon Aug 17 15:36:15 2020 +0800 + + ext4: limit the length of per-inode prealloc list + + In the scenario of writing sparse files, the per-inode prealloc list may + be very long, resulting in high overhead for ext4_mb_use_preallocated(). + To circumvent this problem, we limit the maximum length of per-inode + prealloc list to 512 and allow users to modify it. + + After patching, we observed that the sys ratio of cpu has dropped, and + the system throughput has increased significantly. We created a process + to write the sparse file, and the running time of the process on the + fixed kernel was significantly reduced, as follows: + + Running time on unfixed kernel: + [root@TENCENT64 ~]# time taskset 0x01 ./sparse /data1/sparce.dat + real 0m2.051s + user 0m0.008s + sys 0m2.026s + + Running time on fixed kernel: + [root@TENCENT64 ~]# time taskset 0x01 ./sparse /data1/sparce.dat + real 0m0.471s + user 0m0.004s + sys 0m0.395s + +Upstream-Status: Backport + +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +Change-Id: I5169cb24853d4da32e2862a6626f1f058689b053 +--- + instrumentation/events/lttng-module/ext4.h | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h +index 5f7ab28..72ad4c9 100644 +--- a/instrumentation/events/lttng-module/ext4.h ++++ b/instrumentation/events/lttng-module/ext4.h +@@ -460,6 +460,20 @@ LTTNG_TRACEPOINT_EVENT(ext4_mb_release_group_pa, + ) + #endif + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++LTTNG_TRACEPOINT_EVENT(ext4_discard_preallocations, ++ TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), ++ ++ TP_ARGS(inode, len, needed), ++ ++ TP_FIELDS( ++ ctf_integer(dev_t, dev, inode->i_sb->s_dev) ++ ctf_integer(ino_t, ino, inode->i_ino) ++ ctf_integer(unsigned int, len, len) ++ ctf_integer(unsigned int, needed, needed) ++ ) ++) ++#else + LTTNG_TRACEPOINT_EVENT(ext4_discard_preallocations, + TP_PROTO(struct inode *inode), + +@@ -470,6 +484,7 @@ LTTNG_TRACEPOINT_EVENT(ext4_discard_preallocations, + ctf_integer(ino_t, ino, inode->i_ino) + ) + ) ++#endif + + LTTNG_TRACEPOINT_EVENT(ext4_mb_discard_preallocations, + TP_PROTO(struct super_block *sb, int needed), +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-indicate-via-a-block-bitmap-read-is-prefetc.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-indicate-via-a-block-bitmap-read-is-prefetc.patch new file mode 100644 index 000000000..f76e9698c --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0005-fix-ext4-indicate-via-a-block-bitmap-read-is-prefetc.patch @@ -0,0 +1,63 @@ +From 52563d02a9234215b62c5f519aa1b5d8589ccd0a Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 24 Aug 2020 15:37:50 -0400 +Subject: [PATCH 05/10] =?UTF-8?q?fix:=20ext4:=20indicate=20via=20a=20block?= + =?UTF-8?q?=20bitmap=20read=20is=20prefetched=E2=80=A6=20(v5.9)?= +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +See upstream commit: + + commit ab74c7b23f3770935016e3eb3ecdf1e42b73efaa + Author: Theodore Ts'o + Date: Wed Jul 15 11:48:55 2020 -0400 + + ext4: indicate via a block bitmap read is prefetched via a tracepoint + + Modify the ext4_read_block_bitmap_load tracepoint so that it tells us + whether a block bitmap is being prefetched. + +Upstream-Status: Backport + +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +Change-Id: I0e5e2c5b8004223d0928235c092449ee16a940e1 +--- + instrumentation/events/lttng-module/ext4.h | 14 ++++++++++++++ + 1 file changed, 14 insertions(+) + +diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h +index 72ad4c9..4476abb 100644 +--- a/instrumentation/events/lttng-module/ext4.h ++++ b/instrumentation/events/lttng-module/ext4.h +@@ -893,12 +893,26 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(ext4__bitmap_load, ext4_mb_buddy_bitmap_load, + TP_ARGS(sb, group) + ) + ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++LTTNG_TRACEPOINT_EVENT(ext4_read_block_bitmap_load, ++ TP_PROTO(struct super_block *sb, unsigned long group, bool prefetch), ++ ++ TP_ARGS(sb, group, prefetch), ++ ++ TP_FIELDS( ++ ctf_integer(dev_t, dev, sb->s_dev) ++ ctf_integer(__u32, group, group) ++ ctf_integer(bool, prefetch, prefetch) ++ ) ++) ++#else + LTTNG_TRACEPOINT_EVENT_INSTANCE(ext4__bitmap_load, ext4_read_block_bitmap_load, + + TP_PROTO(struct super_block *sb, unsigned long group), + + TP_ARGS(sb, group) + ) ++#endif + + LTTNG_TRACEPOINT_EVENT_INSTANCE(ext4__bitmap_load, ext4_load_inode_bitmap, + +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0006-fix-removal-of-smp_-read_barrier_depends-v5.9.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0006-fix-removal-of-smp_-read_barrier_depends-v5.9.patch new file mode 100644 index 000000000..0970dd30a --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0006-fix-removal-of-smp_-read_barrier_depends-v5.9.patch @@ -0,0 +1,391 @@ +From 57ccbfa6a8a79c7b84394c2097efaf7935607aa5 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Tue, 25 Aug 2020 10:56:29 -0400 +Subject: [PATCH 06/10] fix: removal of [smp_]read_barrier_depends (v5.9) + +See upstream commits: + + commit 76ebbe78f7390aee075a7f3768af197ded1bdfbb + Author: Will Deacon + Date: Tue Oct 24 11:22:47 2017 +0100 + + locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE() + + In preparation for the removal of lockless_dereference(), which is the + same as READ_ONCE() on all architectures other than Alpha, add an + implicit smp_read_barrier_depends() to READ_ONCE() so that it can be + used to head dependency chains on all architectures. + + commit 76ebbe78f7390aee075a7f3768af197ded1bdfbb + Author: Will Deacon + Date: Tue Oct 24 11:22:47 2017 +0100 + + locking/barriers: Add implicit smp_read_barrier_depends() to READ_ONCE() + + In preparation for the removal of lockless_dereference(), which is the + same as READ_ONCE() on all architectures other than Alpha, add an + implicit smp_read_barrier_depends() to READ_ONCE() so that it can be + used to head dependency chains on all architectures. + +Upstream-Status: Backport + +Change-Id: Ife8880bd9378dca2972da8838f40fc35ccdfaaac +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +--- + instrumentation/events/lttng-module/i2c.h | 4 ++-- + lib/ringbuffer/backend.h | 2 +- + lib/ringbuffer/backend_internal.h | 2 +- + lib/ringbuffer/frontend.h | 4 ++-- + lib/ringbuffer/ring_buffer_frontend.c | 4 ++-- + lib/ringbuffer/ring_buffer_iterator.c | 2 +- + lttng-events.c | 8 ++++---- + probes/lttng-kprobes.c | 6 +++--- + probes/lttng-kretprobes.c | 6 +++--- + probes/lttng-tracepoint-event-impl.h | 12 ++++++------ + probes/lttng-uprobes.c | 6 +++--- + wrapper/compiler.h | 18 ++++++++++++++++++ + wrapper/trace-clock.h | 15 +++++---------- + 13 files changed, 51 insertions(+), 38 deletions(-) + +diff --git a/instrumentation/events/lttng-module/i2c.h b/instrumentation/events/lttng-module/i2c.h +index dcbabf6..131d134 100644 +--- a/instrumentation/events/lttng-module/i2c.h ++++ b/instrumentation/events/lttng-module/i2c.h +@@ -23,7 +23,7 @@ LTTNG_TRACEPOINT_EVENT_CODE(i2c_write, + + TP_code_pre( + tp_locvar->extract_sensitive_payload = +- READ_ONCE(extract_sensitive_payload); ++ LTTNG_READ_ONCE(extract_sensitive_payload); + ), + + TP_FIELDS( +@@ -78,7 +78,7 @@ LTTNG_TRACEPOINT_EVENT_CODE(i2c_reply, + + TP_code_pre( + tp_locvar->extract_sensitive_payload = +- READ_ONCE(extract_sensitive_payload); ++ LTTNG_READ_ONCE(extract_sensitive_payload); + ), + + TP_FIELDS( +diff --git a/lib/ringbuffer/backend.h b/lib/ringbuffer/backend.h +index da937f2..43e1d47 100644 +--- a/lib/ringbuffer/backend.h ++++ b/lib/ringbuffer/backend.h +@@ -156,7 +156,7 @@ size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config, + * Only read source character once, in case it is + * modified concurrently. + */ +- c = READ_ONCE(src[count]); ++ c = LTTNG_READ_ONCE(src[count]); + if (!c) + break; + lib_ring_buffer_do_copy(config, &dest[count], &c, 1); +diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h +index 2d6a345..1226fd8 100644 +--- a/lib/ringbuffer/backend_internal.h ++++ b/lib/ringbuffer/backend_internal.h +@@ -367,7 +367,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, + * Performing a volatile access to read the sb_pages, because we want to + * read a coherent version of the pointer and the associated noref flag. + */ +- id = READ_ONCE(bufb->buf_wsb[idx].id); ++ id = LTTNG_READ_ONCE(bufb->buf_wsb[idx].id); + for (;;) { + /* This check is called on the fast path for each record. */ + if (likely(!subbuffer_id_is_noref(config, id))) { +diff --git a/lib/ringbuffer/frontend.h b/lib/ringbuffer/frontend.h +index 6f516d9..41382fe 100644 +--- a/lib/ringbuffer/frontend.h ++++ b/lib/ringbuffer/frontend.h +@@ -79,7 +79,7 @@ void *channel_destroy(struct channel *chan); + #define for_each_channel_cpu(cpu, chan) \ + for ((cpu) = -1; \ + ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \ +- smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });) ++ smp_rmb(); (cpu) < nr_cpu_ids; });) + + extern struct lib_ring_buffer *channel_get_ring_buffer( + const struct lib_ring_buffer_config *config, +@@ -155,7 +155,7 @@ static inline + int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer *buf) + { +- int finalized = READ_ONCE(buf->finalized); ++ int finalized = LTTNG_READ_ONCE(buf->finalized); + /* + * Read finalized before counters. + */ +diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c +index 3cab365..4980d20 100644 +--- a/lib/ringbuffer/ring_buffer_frontend.c ++++ b/lib/ringbuffer/ring_buffer_frontend.c +@@ -1074,7 +1074,7 @@ int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, + int finalized; + + retry: +- finalized = READ_ONCE(buf->finalized); ++ finalized = LTTNG_READ_ONCE(buf->finalized); + /* + * Read finalized before counters. + */ +@@ -1245,7 +1245,7 @@ int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, + return -EBUSY; + } + retry: +- finalized = READ_ONCE(buf->finalized); ++ finalized = LTTNG_READ_ONCE(buf->finalized); + /* + * Read finalized before counters. + */ +diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c +index d25db72..7b4f20a 100644 +--- a/lib/ringbuffer/ring_buffer_iterator.c ++++ b/lib/ringbuffer/ring_buffer_iterator.c +@@ -46,7 +46,7 @@ restart: + switch (iter->state) { + case ITER_GET_SUBBUF: + ret = lib_ring_buffer_get_next_subbuf(buf); +- if (ret && !READ_ONCE(buf->finalized) ++ if (ret && !LTTNG_READ_ONCE(buf->finalized) + && config->alloc == RING_BUFFER_ALLOC_GLOBAL) { + /* + * Use "pull" scheme for global buffers. The reader +diff --git a/lttng-events.c b/lttng-events.c +index be7e389..d719294 100644 +--- a/lttng-events.c ++++ b/lttng-events.c +@@ -1719,7 +1719,7 @@ int lttng_metadata_printf(struct lttng_session *session, + size_t len; + va_list ap; + +- WARN_ON_ONCE(!READ_ONCE(session->active)); ++ WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active)); + + va_start(ap, fmt); + str = kvasprintf(GFP_KERNEL, fmt, ap); +@@ -2305,7 +2305,7 @@ int _lttng_event_metadata_statedump(struct lttng_session *session, + { + int ret = 0; + +- if (event->metadata_dumped || !READ_ONCE(session->active)) ++ if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active)) + return 0; + if (chan->channel_type == METADATA_CHANNEL) + return 0; +@@ -2377,7 +2377,7 @@ int _lttng_channel_metadata_statedump(struct lttng_session *session, + { + int ret = 0; + +- if (chan->metadata_dumped || !READ_ONCE(session->active)) ++ if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active)) + return 0; + + if (chan->channel_type == METADATA_CHANNEL) +@@ -2604,7 +2604,7 @@ int _lttng_session_metadata_statedump(struct lttng_session *session) + struct lttng_event *event; + int ret = 0; + +- if (!READ_ONCE(session->active)) ++ if (!LTTNG_READ_ONCE(session->active)) + return 0; + + lttng_metadata_begin(session); +diff --git a/probes/lttng-kprobes.c b/probes/lttng-kprobes.c +index a44eaa1..38fb72e 100644 +--- a/probes/lttng-kprobes.c ++++ b/probes/lttng-kprobes.c +@@ -31,11 +31,11 @@ int lttng_kprobes_handler_pre(struct kprobe *p, struct pt_regs *regs) + int ret; + unsigned long data = (unsigned long) p->addr; + +- if (unlikely(!READ_ONCE(chan->session->active))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->session->active))) + return 0; +- if (unlikely(!READ_ONCE(chan->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->enabled))) + return 0; +- if (unlikely(!READ_ONCE(event->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(event->enabled))) + return 0; + + lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx, sizeof(data), +diff --git a/probes/lttng-kretprobes.c b/probes/lttng-kretprobes.c +index ab98ff2..a6bcd21 100644 +--- a/probes/lttng-kretprobes.c ++++ b/probes/lttng-kretprobes.c +@@ -51,11 +51,11 @@ int _lttng_kretprobes_handler(struct kretprobe_instance *krpi, + unsigned long parent_ip; + } payload; + +- if (unlikely(!READ_ONCE(chan->session->active))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->session->active))) + return 0; +- if (unlikely(!READ_ONCE(chan->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->enabled))) + return 0; +- if (unlikely(!READ_ONCE(event->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(event->enabled))) + return 0; + + payload.ip = (unsigned long) krpi->rp->kp.addr; +diff --git a/probes/lttng-tracepoint-event-impl.h b/probes/lttng-tracepoint-event-impl.h +index 77b8638..72a669e 100644 +--- a/probes/lttng-tracepoint-event-impl.h ++++ b/probes/lttng-tracepoint-event-impl.h +@@ -1132,11 +1132,11 @@ static void __event_probe__##_name(void *__data, _proto) \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ +- if (unlikely(!READ_ONCE(__session->active))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__session->active))) \ + return; \ +- if (unlikely(!READ_ONCE(__chan->enabled))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \ + return; \ +- if (unlikely(!READ_ONCE(__event->enabled))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \ + return; \ + __lf = lttng_rcu_dereference(__session->pid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \ +@@ -1225,11 +1225,11 @@ static void __event_probe__##_name(void *__data) \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ +- if (unlikely(!READ_ONCE(__session->active))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__session->active))) \ + return; \ +- if (unlikely(!READ_ONCE(__chan->enabled))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \ + return; \ +- if (unlikely(!READ_ONCE(__event->enabled))) \ ++ if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \ + return; \ + __lf = lttng_rcu_dereference(__session->pid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \ +diff --git a/probes/lttng-uprobes.c b/probes/lttng-uprobes.c +index bc10128..bda1d9b 100644 +--- a/probes/lttng-uprobes.c ++++ b/probes/lttng-uprobes.c +@@ -40,11 +40,11 @@ int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs) + unsigned long ip; + } payload; + +- if (unlikely(!READ_ONCE(chan->session->active))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->session->active))) + return 0; +- if (unlikely(!READ_ONCE(chan->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(chan->enabled))) + return 0; +- if (unlikely(!READ_ONCE(event->enabled))) ++ if (unlikely(!LTTNG_READ_ONCE(event->enabled))) + return 0; + + lib_ring_buffer_ctx_init(&ctx, chan->chan, <tng_probe_ctx, +diff --git a/wrapper/compiler.h b/wrapper/compiler.h +index 1496f33..b9f8c51 100644 +--- a/wrapper/compiler.h ++++ b/wrapper/compiler.h +@@ -9,6 +9,7 @@ + #define _LTTNG_WRAPPER_COMPILER_H + + #include ++#include + + /* + * Don't allow compiling with buggy compiler. +@@ -39,4 +40,21 @@ + # define WRITE_ONCE(x, val) ({ ACCESS_ONCE(x) = val; }) + #endif + ++/* ++ * In v4.15 a smp read barrier was added to READ_ONCE to replace ++ * lockless_dereference(), replicate this behavior on prior kernels ++ * and remove calls to smp_read_barrier_depends which was dropped ++ * in v5.9. ++ */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) ++#define LTTNG_READ_ONCE(x) READ_ONCE(x) ++#else ++#define LTTNG_READ_ONCE(x) \ ++({ \ ++ typeof(x) __val = READ_ONCE(x); \ ++ smp_read_barrier_depends(); \ ++ __val; \ ++}) ++#endif ++ + #endif /* _LTTNG_WRAPPER_COMPILER_H */ +diff --git a/wrapper/trace-clock.h b/wrapper/trace-clock.h +index 9f4e366..187fc82 100644 +--- a/wrapper/trace-clock.h ++++ b/wrapper/trace-clock.h +@@ -160,33 +160,30 @@ static inline void put_trace_clock(void) + + static inline u64 trace_clock_read64(void) + { +- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); ++ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock); + + if (likely(!ltc)) { + return trace_clock_read64_monotonic(); + } else { +- read_barrier_depends(); /* load ltc before content */ + return ltc->read64(); + } + } + + static inline u64 trace_clock_freq(void) + { +- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); ++ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock); + + if (!ltc) { + return trace_clock_freq_monotonic(); + } else { +- read_barrier_depends(); /* load ltc before content */ + return ltc->freq(); + } + } + + static inline int trace_clock_uuid(char *uuid) + { +- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); ++ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock); + +- read_barrier_depends(); /* load ltc before content */ + /* Use default UUID cb when NULL */ + if (!ltc || !ltc->uuid) { + return trace_clock_uuid_monotonic(uuid); +@@ -197,24 +194,22 @@ static inline int trace_clock_uuid(char *uuid) + + static inline const char *trace_clock_name(void) + { +- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); ++ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock); + + if (!ltc) { + return trace_clock_name_monotonic(); + } else { +- read_barrier_depends(); /* load ltc before content */ + return ltc->name(); + } + } + + static inline const char *trace_clock_description(void) + { +- struct lttng_trace_clock *ltc = READ_ONCE(lttng_trace_clock); ++ struct lttng_trace_clock *ltc = LTTNG_READ_ONCE(lttng_trace_clock); + + if (!ltc) { + return trace_clock_description_monotonic(); + } else { +- read_barrier_depends(); /* load ltc before content */ + return ltc->description(); + } + } +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0007-fix-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0007-fix-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch new file mode 100644 index 000000000..2843c9cb6 --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0007-fix-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch @@ -0,0 +1,59 @@ +From eae02feb58064eee5ce15a9f6bdffd107c47da05 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 31 Aug 2020 11:41:38 -0400 +Subject: [PATCH 07/10] fix: writeback: Drop I_DIRTY_TIME_EXPIRE (v5.9) + +See upstream commit: + + commit 5fcd57505c002efc5823a7355e21f48dd02d5a51 + Author: Jan Kara + Date: Fri May 29 16:24:43 2020 +0200 + + writeback: Drop I_DIRTY_TIME_EXPIRE + + The only use of I_DIRTY_TIME_EXPIRE is to detect in + __writeback_single_inode() that inode got there because flush worker + decided it's time to writeback the dirty inode time stamps (either + because we are syncing or because of age). However we can detect this + directly in __writeback_single_inode() and there's no need for the + strange propagation with I_DIRTY_TIME_EXPIRE flag. + +Upstream-Status: Backport + +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +Change-Id: I92e37c2ff3ec36d431e8f9de5c8e37c5a2da55ea +--- + instrumentation/events/lttng-module/writeback.h | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +diff --git a/instrumentation/events/lttng-module/writeback.h b/instrumentation/events/lttng-module/writeback.h +index affb4eb..ece67ad 100644 +--- a/instrumentation/events/lttng-module/writeback.h ++++ b/instrumentation/events/lttng-module/writeback.h +@@ -46,7 +46,21 @@ static inline struct backing_dev_info *lttng_inode_to_bdi(struct inode *inode) + + #endif + +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++#define show_inode_state(state) \ ++ __print_flags(state, "|", \ ++ {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ ++ {I_DIRTY_DATASYNC, "I_DIRTY_DATASYNC"}, \ ++ {I_DIRTY_PAGES, "I_DIRTY_PAGES"}, \ ++ {I_NEW, "I_NEW"}, \ ++ {I_WILL_FREE, "I_WILL_FREE"}, \ ++ {I_FREEING, "I_FREEING"}, \ ++ {I_CLEAR, "I_CLEAR"}, \ ++ {I_SYNC, "I_SYNC"}, \ ++ {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ ++ {I_REFERENCED, "I_REFERENCED"} \ ++ ) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0)) + #define show_inode_state(state) \ + __print_flags(state, "|", \ + {I_DIRTY_SYNC, "I_DIRTY_SYNC"}, \ +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0008-fix-writeback-Fix-sync-livelock-due-to-b_dirty_time-.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0008-fix-writeback-Fix-sync-livelock-due-to-b_dirty_time-.patch new file mode 100644 index 000000000..7a0d9a38b --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0008-fix-writeback-Fix-sync-livelock-due-to-b_dirty_time-.patch @@ -0,0 +1,117 @@ +From 87b2affc3eb06f3fb2d0923f18af37713eb6814b Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Mon, 31 Aug 2020 14:16:01 -0400 +Subject: [PATCH 08/10] fix: writeback: Fix sync livelock due to b_dirty_time + processing (v5.9) + +See upstream commit: + + commit f9cae926f35e8230330f28c7b743ad088611a8de + Author: Jan Kara + Date: Fri May 29 16:08:58 2020 +0200 + + writeback: Fix sync livelock due to b_dirty_time processing + + When we are processing writeback for sync(2), move_expired_inodes() + didn't set any inode expiry value (older_than_this). This can result in + writeback never completing if there's steady stream of inodes added to + b_dirty_time list as writeback rechecks dirty lists after each writeback + round whether there's more work to be done. Fix the problem by using + sync(2) start time is inode expiry value when processing b_dirty_time + list similarly as for ordinarily dirtied inodes. This requires some + refactoring of older_than_this handling which simplifies the code + noticeably as a bonus. + +Upstream-Status: Backport + +Change-Id: I8b894b13ccc14d9b8983ee4c2810a927c319560b +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +--- + .../events/lttng-module/writeback.h | 39 ++++++++++++------- + 1 file changed, 26 insertions(+), 13 deletions(-) + +diff --git a/instrumentation/events/lttng-module/writeback.h b/instrumentation/events/lttng-module/writeback.h +index ece67ad..e9018dd 100644 +--- a/instrumentation/events/lttng-module/writeback.h ++++ b/instrumentation/events/lttng-module/writeback.h +@@ -384,34 +384,48 @@ LTTNG_TRACEPOINT_EVENT_WBC_INSTANCE(wbc_balance_dirty_wait, writeback_wbc_balanc + #endif + LTTNG_TRACEPOINT_EVENT_WBC_INSTANCE(wbc_writepage, writeback_wbc_writepage) + +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++LTTNG_TRACEPOINT_EVENT(writeback_queue_io, ++ TP_PROTO(struct bdi_writeback *wb, ++ struct wb_writeback_work *work, ++ unsigned long dirtied_before, ++ int moved), ++ TP_ARGS(wb, work, dirtied_before, moved), ++ TP_FIELDS( ++ ctf_array_text(char, name, dev_name(wb->bdi->dev), 32) ++ ctf_integer(unsigned long, older, dirtied_before) ++ ctf_integer(int, moved, moved) ++ ) ++) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + LTTNG_TRACEPOINT_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + struct wb_writeback_work *work, +-#else +- unsigned long *older_than_this, +-#endif + int moved), +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + TP_ARGS(wb, work, moved), +-#else ++ TP_FIELDS( ++ ctf_array_text(char, name, dev_name(wb->bdi->dev), 32) ++ ctf_integer(int, moved, moved) ++ ) ++) ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) ++LTTNG_TRACEPOINT_EVENT(writeback_queue_io, ++ TP_PROTO(struct bdi_writeback *wb, ++ unsigned long *older_than_this, ++ int moved), + TP_ARGS(wb, older_than_this, moved), +-#endif + TP_FIELDS( + ctf_array_text(char, name, dev_name(wb->bdi->dev), 32) +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) +-#else + ctf_integer(unsigned long, older, + older_than_this ? *older_than_this : 0) + ctf_integer(long, age, + older_than_this ? + (jiffies - *older_than_this) * 1000 / HZ + : -1) +-#endif + ctf_integer(int, moved, moved) + ) + ) ++#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) + LTTNG_TRACEPOINT_EVENT_MAP(global_dirty_state, +@@ -460,7 +474,7 @@ LTTNG_TRACEPOINT_EVENT_MAP(global_dirty_state, + ctf_integer(unsigned long, dirty_limit, global_dirty_limit) + ) + ) +-#else ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) + LTTNG_TRACEPOINT_EVENT_MAP(global_dirty_state, + + writeback_global_dirty_state, +@@ -485,7 +499,6 @@ LTTNG_TRACEPOINT_EVENT_MAP(global_dirty_state, + ) + ) + #endif +-#endif + + #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) + +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0009-fix-version-ranges-for-ext4_discard_preallocations-a.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0009-fix-version-ranges-for-ext4_discard_preallocations-a.patch new file mode 100644 index 000000000..346e1d63a --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0009-fix-version-ranges-for-ext4_discard_preallocations-a.patch @@ -0,0 +1,52 @@ +From b74b25f349e92d7b5bdc8684e406d6a889f13773 Mon Sep 17 00:00:00 2001 +From: Michael Jeanson +Date: Fri, 4 Sep 2020 11:52:51 -0400 +Subject: [PATCH 09/10] fix: version ranges for ext4_discard_preallocations and + writeback_queue_io + +Upstream-Status: Backport + +Signed-off-by: Michael Jeanson +Signed-off-by: Mathieu Desnoyers +Change-Id: Id4fa53cb2e713cbda651e1a75deed91013115592 +--- + instrumentation/events/lttng-module/ext4.h | 3 ++- + instrumentation/events/lttng-module/writeback.h | 8 +++++++- + 2 files changed, 9 insertions(+), 2 deletions(-) + +diff --git a/instrumentation/events/lttng-module/ext4.h b/instrumentation/events/lttng-module/ext4.h +index 4476abb..b172c8d 100644 +--- a/instrumentation/events/lttng-module/ext4.h ++++ b/instrumentation/events/lttng-module/ext4.h +@@ -460,7 +460,8 @@ LTTNG_TRACEPOINT_EVENT(ext4_mb_release_group_pa, + ) + #endif + +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) || \ ++ LTTNG_KERNEL_RANGE(5,8,6, 5,9,0)) + LTTNG_TRACEPOINT_EVENT(ext4_discard_preallocations, + TP_PROTO(struct inode *inode, unsigned int len, unsigned int needed), + +diff --git a/instrumentation/events/lttng-module/writeback.h b/instrumentation/events/lttng-module/writeback.h +index e9018dd..09637d7 100644 +--- a/instrumentation/events/lttng-module/writeback.h ++++ b/instrumentation/events/lttng-module/writeback.h +@@ -384,7 +384,13 @@ LTTNG_TRACEPOINT_EVENT_WBC_INSTANCE(wbc_balance_dirty_wait, writeback_wbc_balanc + #endif + LTTNG_TRACEPOINT_EVENT_WBC_INSTANCE(wbc_writepage, writeback_wbc_writepage) + +-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0) || \ ++ LTTNG_KERNEL_RANGE(5,8,6, 5,9,0) || \ ++ LTTNG_KERNEL_RANGE(5,4,62, 5,5,0) || \ ++ LTTNG_KERNEL_RANGE(4,19,143, 4,20,0) || \ ++ LTTNG_KERNEL_RANGE(4,14,196, 4,15,0) || \ ++ LTTNG_KERNEL_RANGE(4,9,235, 4,10,0) || \ ++ LTTNG_KERNEL_RANGE(4,4,235, 4,5,0)) + LTTNG_TRACEPOINT_EVENT(writeback_queue_io, + TP_PROTO(struct bdi_writeback *wb, + struct wb_writeback_work *work, +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules/0010-Fix-system-call-filter-table.patch b/poky/meta/recipes-kernel/lttng/lttng-modules/0010-Fix-system-call-filter-table.patch new file mode 100644 index 000000000..a16750ddb --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-modules/0010-Fix-system-call-filter-table.patch @@ -0,0 +1,918 @@ +From ad594e3a953db1b0c3c059fde45b5a5494f6be78 Mon Sep 17 00:00:00 2001 +From: Mathieu Desnoyers +Date: Tue, 28 Jan 2020 16:02:44 -0500 +Subject: [PATCH 10/10] Fix: system call filter table + +The system call filter table has effectively been unused for a long +time due to system call name prefix mismatch. This means the overhead of +selective system call tracing was larger than it should have been because +the event payload preparation would be done for all system calls as soon +as a single system call is traced. + +However, fixing this underlying issue unearths several issues that crept +unnoticed when the "enabler" concept was introduced (after the original +implementation of the system call filter table). + +Here is a list of the issues which are resolved here: + +- Split lttng_syscalls_unregister into an unregister and destroy + function, thus awaiting for a grace period (and therefore quiescence + of the users) after unregistering the system call tracepoints before + freeing the system call filter data structures. This effectively fixes + a use-after-free. + +- The state for enabling "all" system calls vs enabling specific system + calls (and sequences of enable-disable) was incorrect with respect to + the "enablers" semantic. This is solved by always tracking the + bitmap of enabled system calls, and keeping this bitmap even when + enabling all system calls. The sc_filter is now always allocated + before system call tracing is registered to tracepoints, which means + it does not need to be RCU dereferenced anymore. + +Padding fields in the ABI are reserved to select whether to: + +- Trace either native or compat system call (or both, which is the + behavior currently implemented), +- Trace either system call entry or exit (or both, which is the + behavior currently implemented), +- Select the system call to trace by name (behavior currently + implemented) or by system call number, + +Upstream-Status: Backport + +Signed-off-by: Mathieu Desnoyers +--- + lttng-abi.c | 43 ++++++ + lttng-abi.h | 26 ++++ + lttng-events.c | 112 +++++++++++++-- + lttng-events.h | 31 ++++- + lttng-syscalls.c | 348 +++++++++++++++++++++++++---------------------- + 5 files changed, 380 insertions(+), 180 deletions(-) + +diff --git a/lttng-abi.c b/lttng-abi.c +index 64ea99d..b33879d 100644 +--- a/lttng-abi.c ++++ b/lttng-abi.c +@@ -1264,6 +1264,46 @@ nomem: + return ret; + } + ++static ++int lttng_abi_validate_event_param(struct lttng_kernel_event *event_param) ++{ ++ /* Limit ABI to implemented features. */ ++ switch (event_param->instrumentation) { ++ case LTTNG_KERNEL_SYSCALL: ++ switch (event_param->u.syscall.entryexit) { ++ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT: ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (event_param->u.syscall.abi) { ++ case LTTNG_KERNEL_SYSCALL_ABI_ALL: ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (event_param->u.syscall.match) { ++ case LTTNG_SYSCALL_MATCH_NAME: ++ break; ++ default: ++ return -EINVAL; ++ } ++ break; ++ ++ case LTTNG_KERNEL_TRACEPOINT: /* Fallthrough */ ++ case LTTNG_KERNEL_KPROBE: /* Fallthrough */ ++ case LTTNG_KERNEL_KRETPROBE: /* Fallthrough */ ++ case LTTNG_KERNEL_NOOP: /* Fallthrough */ ++ case LTTNG_KERNEL_UPROBE: ++ break; ++ ++ case LTTNG_KERNEL_FUNCTION: /* Fallthrough */ ++ default: ++ return -EINVAL; ++ } ++ return 0; ++} ++ + static + int lttng_abi_create_event(struct file *channel_file, + struct lttng_kernel_event *event_param) +@@ -1305,6 +1345,9 @@ int lttng_abi_create_event(struct file *channel_file, + ret = -EOVERFLOW; + goto refcount_error; + } ++ ret = lttng_abi_validate_event_param(event_param); ++ if (ret) ++ goto event_error; + if (event_param->instrumentation == LTTNG_KERNEL_TRACEPOINT + || event_param->instrumentation == LTTNG_KERNEL_SYSCALL) { + struct lttng_enabler *enabler; +diff --git a/lttng-abi.h b/lttng-abi.h +index 1d356ab..51d60e5 100644 +--- a/lttng-abi.h ++++ b/lttng-abi.h +@@ -90,6 +90,31 @@ struct lttng_kernel_event_callsite { + } u; + } __attribute__((packed)); + ++enum lttng_kernel_syscall_entryexit { ++ LTTNG_KERNEL_SYSCALL_ENTRYEXIT = 0, ++ LTTNG_KERNEL_SYSCALL_ENTRY = 1, /* Not implemented. */ ++ LTTNG_KERNEL_SYSCALL_EXIT = 2, /* Not implemented. */ ++}; ++ ++enum lttng_kernel_syscall_abi { ++ LTTNG_KERNEL_SYSCALL_ABI_ALL = 0, ++ LTTNG_KERNEL_SYSCALL_ABI_NATIVE = 1, /* Not implemented. */ ++ LTTNG_KERNEL_SYSCALL_ABI_COMPAT = 2, /* Not implemented. */ ++}; ++ ++enum lttng_kernel_syscall_match { ++ LTTNG_SYSCALL_MATCH_NAME = 0, ++ LTTNG_SYSCALL_MATCH_NR = 1, /* Not implemented. */ ++}; ++ ++struct lttng_kernel_syscall { ++ uint8_t entryexit; /* enum lttng_kernel_syscall_entryexit */ ++ uint8_t abi; /* enum lttng_kernel_syscall_abi */ ++ uint8_t match; /* enum lttng_kernel_syscall_match */ ++ uint8_t padding; ++ uint32_t nr; /* For LTTNG_SYSCALL_MATCH_NR */ ++} __attribute__((packed)); ++ + /* + * For syscall tracing, name = "*" means "enable all". + */ +@@ -106,6 +131,7 @@ struct lttng_kernel_event { + struct lttng_kernel_kprobe kprobe; + struct lttng_kernel_function_tracer ftrace; + struct lttng_kernel_uprobe uprobe; ++ struct lttng_kernel_syscall syscall; + char padding[LTTNG_KERNEL_EVENT_PADDING2]; + } u; + } __attribute__((packed)); +diff --git a/lttng-events.c b/lttng-events.c +index d719294..4c0b04a 100644 +--- a/lttng-events.c ++++ b/lttng-events.c +@@ -201,6 +201,10 @@ void lttng_session_destroy(struct lttng_session *session) + WARN_ON(ret); + } + synchronize_trace(); /* Wait for in-flight events to complete */ ++ list_for_each_entry(chan, &session->chan, list) { ++ ret = lttng_syscalls_destroy(chan); ++ WARN_ON(ret); ++ } + list_for_each_entry_safe(enabler, tmpenabler, + &session->enablers_head, node) + lttng_enabler_destroy(enabler); +@@ -740,6 +744,28 @@ struct lttng_event *_lttng_event_create(struct lttng_channel *chan, + event->enabled = 0; + event->registered = 0; + event->desc = event_desc; ++ switch (event_param->u.syscall.entryexit) { ++ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT: ++ ret = -EINVAL; ++ goto register_error; ++ case LTTNG_KERNEL_SYSCALL_ENTRY: ++ event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY; ++ break; ++ case LTTNG_KERNEL_SYSCALL_EXIT: ++ event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT; ++ break; ++ } ++ switch (event_param->u.syscall.abi) { ++ case LTTNG_KERNEL_SYSCALL_ABI_ALL: ++ ret = -EINVAL; ++ goto register_error; ++ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE: ++ event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE; ++ break; ++ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT: ++ event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT; ++ break; ++ } + if (!event->desc) { + ret = -EINVAL; + goto register_error; +@@ -826,8 +852,7 @@ void register_event(struct lttng_event *event) + event); + break; + case LTTNG_KERNEL_SYSCALL: +- ret = lttng_syscall_filter_enable(event->chan, +- desc->name); ++ ret = lttng_syscall_filter_enable(event->chan, event); + break; + case LTTNG_KERNEL_KPROBE: + case LTTNG_KERNEL_UPROBE: +@@ -870,8 +895,7 @@ int _lttng_event_unregister(struct lttng_event *event) + ret = 0; + break; + case LTTNG_KERNEL_SYSCALL: +- ret = lttng_syscall_filter_disable(event->chan, +- desc->name); ++ ret = lttng_syscall_filter_disable(event->chan, event); + break; + case LTTNG_KERNEL_NOOP: + ret = 0; +@@ -1203,39 +1227,87 @@ int lttng_desc_match_enabler(const struct lttng_event_desc *desc, + struct lttng_enabler *enabler) + { + const char *desc_name, *enabler_name; ++ bool compat = false, entry = false; + + enabler_name = enabler->event_param.name; + switch (enabler->event_param.instrumentation) { + case LTTNG_KERNEL_TRACEPOINT: + desc_name = desc->name; ++ switch (enabler->type) { ++ case LTTNG_ENABLER_STAR_GLOB: ++ return lttng_match_enabler_star_glob(desc_name, enabler_name); ++ case LTTNG_ENABLER_NAME: ++ return lttng_match_enabler_name(desc_name, enabler_name); ++ default: ++ return -EINVAL; ++ } + break; + case LTTNG_KERNEL_SYSCALL: + desc_name = desc->name; +- if (!strncmp(desc_name, "compat_", strlen("compat_"))) ++ if (!strncmp(desc_name, "compat_", strlen("compat_"))) { + desc_name += strlen("compat_"); ++ compat = true; ++ } + if (!strncmp(desc_name, "syscall_exit_", + strlen("syscall_exit_"))) { + desc_name += strlen("syscall_exit_"); + } else if (!strncmp(desc_name, "syscall_entry_", + strlen("syscall_entry_"))) { + desc_name += strlen("syscall_entry_"); ++ entry = true; + } else { + WARN_ON_ONCE(1); + return -EINVAL; + } ++ switch (enabler->event_param.u.syscall.entryexit) { ++ case LTTNG_KERNEL_SYSCALL_ENTRYEXIT: ++ break; ++ case LTTNG_KERNEL_SYSCALL_ENTRY: ++ if (!entry) ++ return 0; ++ break; ++ case LTTNG_KERNEL_SYSCALL_EXIT: ++ if (entry) ++ return 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (enabler->event_param.u.syscall.abi) { ++ case LTTNG_KERNEL_SYSCALL_ABI_ALL: ++ break; ++ case LTTNG_KERNEL_SYSCALL_ABI_NATIVE: ++ if (compat) ++ return 0; ++ break; ++ case LTTNG_KERNEL_SYSCALL_ABI_COMPAT: ++ if (!compat) ++ return 0; ++ break; ++ default: ++ return -EINVAL; ++ } ++ switch (enabler->event_param.u.syscall.match) { ++ case LTTNG_SYSCALL_MATCH_NAME: ++ switch (enabler->type) { ++ case LTTNG_ENABLER_STAR_GLOB: ++ return lttng_match_enabler_star_glob(desc_name, enabler_name); ++ case LTTNG_ENABLER_NAME: ++ return lttng_match_enabler_name(desc_name, enabler_name); ++ default: ++ return -EINVAL; ++ } ++ break; ++ case LTTNG_SYSCALL_MATCH_NR: ++ return -EINVAL; /* Not implemented. */ ++ default: ++ return -EINVAL; ++ } + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } +- switch (enabler->type) { +- case LTTNG_ENABLER_STAR_GLOB: +- return lttng_match_enabler_star_glob(desc_name, enabler_name); +- case LTTNG_ENABLER_NAME: +- return lttng_match_enabler_name(desc_name, enabler_name); +- default: +- return -EINVAL; +- } + } + + static +@@ -1361,9 +1433,21 @@ void lttng_create_event_if_missing(struct lttng_enabler *enabler) + static + int lttng_enabler_ref_events(struct lttng_enabler *enabler) + { +- struct lttng_session *session = enabler->chan->session; ++ struct lttng_channel *chan = enabler->chan; ++ struct lttng_session *session = chan->session; + struct lttng_event *event; + ++ if (enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL && ++ enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT && ++ enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL && ++ enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME && ++ !strcmp(enabler->event_param.name, "*")) { ++ if (enabler->enabled) ++ WRITE_ONCE(chan->syscall_all, 1); ++ else ++ WRITE_ONCE(chan->syscall_all, 0); ++ } ++ + /* First ensure that probe events are created for this enabler. */ + lttng_create_event_if_missing(enabler); + +diff --git a/lttng-events.h b/lttng-events.h +index a36a312..d4d9976 100644 +--- a/lttng-events.h ++++ b/lttng-events.h +@@ -292,6 +292,16 @@ struct lttng_uprobe_handler { + struct list_head node; + }; + ++enum lttng_syscall_entryexit { ++ LTTNG_SYSCALL_ENTRY, ++ LTTNG_SYSCALL_EXIT, ++}; ++ ++enum lttng_syscall_abi { ++ LTTNG_SYSCALL_ABI_NATIVE, ++ LTTNG_SYSCALL_ABI_COMPAT, ++}; ++ + /* + * lttng_event structure is referred to by the tracing fast path. It must be + * kept small. +@@ -318,6 +328,11 @@ struct lttng_event { + struct inode *inode; + struct list_head head; + } uprobe; ++ struct { ++ char *syscall_name; ++ enum lttng_syscall_entryexit entryexit; ++ enum lttng_syscall_abi abi; ++ } syscall; + } u; + struct list_head list; /* Event list in session */ + unsigned int metadata_dumped:1; +@@ -457,10 +472,10 @@ struct lttng_channel { + struct lttng_syscall_filter *sc_filter; + int header_type; /* 0: unset, 1: compact, 2: large */ + enum channel_type channel_type; ++ int syscall_all; + unsigned int metadata_dumped:1, + sys_enter_registered:1, + sys_exit_registered:1, +- syscall_all:1, + tstate:1; /* Transient enable state */ + }; + +@@ -653,10 +668,11 @@ void lttng_clock_unref(void); + #if defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS) + int lttng_syscalls_register(struct lttng_channel *chan, void *filter); + int lttng_syscalls_unregister(struct lttng_channel *chan); ++int lttng_syscalls_destroy(struct lttng_channel *chan); + int lttng_syscall_filter_enable(struct lttng_channel *chan, +- const char *name); ++ struct lttng_event *event); + int lttng_syscall_filter_disable(struct lttng_channel *chan, +- const char *name); ++ struct lttng_event *event); + long lttng_channel_syscall_mask(struct lttng_channel *channel, + struct lttng_kernel_syscall_mask __user *usyscall_mask); + #else +@@ -670,14 +686,19 @@ static inline int lttng_syscalls_unregister(struct lttng_channel *chan) + return 0; + } + ++static inline int lttng_syscalls_destroy(struct lttng_channel *chan) ++{ ++ return 0; ++} ++ + static inline int lttng_syscall_filter_enable(struct lttng_channel *chan, +- const char *name) ++ struct lttng_event *event); + { + return -ENOSYS; + } + + static inline int lttng_syscall_filter_disable(struct lttng_channel *chan, +- const char *name) ++ struct lttng_event *event); + { + return -ENOSYS; + } +diff --git a/lttng-syscalls.c b/lttng-syscalls.c +index 97f1ba9..26cead6 100644 +--- a/lttng-syscalls.c ++++ b/lttng-syscalls.c +@@ -367,8 +367,10 @@ const struct trace_syscall_entry compat_sc_exit_table[] = { + #undef CREATE_SYSCALL_TABLE + + struct lttng_syscall_filter { +- DECLARE_BITMAP(sc, NR_syscalls); +- DECLARE_BITMAP(sc_compat, NR_compat_syscalls); ++ DECLARE_BITMAP(sc_entry, NR_syscalls); ++ DECLARE_BITMAP(sc_exit, NR_syscalls); ++ DECLARE_BITMAP(sc_compat_entry, NR_compat_syscalls); ++ DECLARE_BITMAP(sc_compat_exit, NR_compat_syscalls); + }; + + static void syscall_entry_unknown(struct lttng_event *event, +@@ -391,29 +393,23 @@ void syscall_entry_probe(void *__data, struct pt_regs *regs, long id) + size_t table_len; + + if (unlikely(in_compat_syscall())) { +- struct lttng_syscall_filter *filter; +- +- filter = lttng_rcu_dereference(chan->sc_filter); +- if (filter) { +- if (id < 0 || id >= NR_compat_syscalls +- || !test_bit(id, filter->sc_compat)) { +- /* System call filtered out. */ +- return; +- } ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ ++ if (id < 0 || id >= NR_compat_syscalls ++ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_compat_entry))) { ++ /* System call filtered out. */ ++ return; + } + table = compat_sc_table; + table_len = ARRAY_SIZE(compat_sc_table); + unknown_event = chan->sc_compat_unknown; + } else { +- struct lttng_syscall_filter *filter; +- +- filter = lttng_rcu_dereference(chan->sc_filter); +- if (filter) { +- if (id < 0 || id >= NR_syscalls +- || !test_bit(id, filter->sc)) { +- /* System call filtered out. */ +- return; +- } ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ ++ if (id < 0 || id >= NR_syscalls ++ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_entry))) { ++ /* System call filtered out. */ ++ return; + } + table = sc_table; + table_len = ARRAY_SIZE(sc_table); +@@ -545,29 +541,23 @@ void syscall_exit_probe(void *__data, struct pt_regs *regs, long ret) + + id = syscall_get_nr(current, regs); + if (unlikely(in_compat_syscall())) { +- struct lttng_syscall_filter *filter; +- +- filter = lttng_rcu_dereference(chan->sc_filter); +- if (filter) { +- if (id < 0 || id >= NR_compat_syscalls +- || !test_bit(id, filter->sc_compat)) { +- /* System call filtered out. */ +- return; +- } ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ ++ if (id < 0 || id >= NR_compat_syscalls ++ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_compat_exit))) { ++ /* System call filtered out. */ ++ return; + } + table = compat_sc_exit_table; + table_len = ARRAY_SIZE(compat_sc_exit_table); + unknown_event = chan->compat_sc_exit_unknown; + } else { +- struct lttng_syscall_filter *filter; +- +- filter = lttng_rcu_dereference(chan->sc_filter); +- if (filter) { +- if (id < 0 || id >= NR_syscalls +- || !test_bit(id, filter->sc)) { +- /* System call filtered out. */ +- return; +- } ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ ++ if (id < 0 || id >= NR_syscalls ++ || (!READ_ONCE(chan->syscall_all) && !test_bit(id, filter->sc_exit))) { ++ /* System call filtered out. */ ++ return; + } + table = sc_exit_table; + table_len = ARRAY_SIZE(sc_exit_table); +@@ -713,27 +703,23 @@ int fill_table(const struct trace_syscall_entry *table, size_t table_len, + memset(&ev, 0, sizeof(ev)); + switch (type) { + case SC_TYPE_ENTRY: +- strncpy(ev.name, SYSCALL_ENTRY_STR, +- LTTNG_KERNEL_SYM_NAME_LEN); ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_ENTRY; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_NATIVE; + break; + case SC_TYPE_EXIT: +- strncpy(ev.name, SYSCALL_EXIT_STR, +- LTTNG_KERNEL_SYM_NAME_LEN); ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_EXIT; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_NATIVE; + break; + case SC_TYPE_COMPAT_ENTRY: +- strncpy(ev.name, COMPAT_SYSCALL_ENTRY_STR, +- LTTNG_KERNEL_SYM_NAME_LEN); ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_ENTRY; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_COMPAT; + break; + case SC_TYPE_COMPAT_EXIT: +- strncpy(ev.name, COMPAT_SYSCALL_EXIT_STR, +- LTTNG_KERNEL_SYM_NAME_LEN); +- break; +- default: +- BUG_ON(1); ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_EXIT; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_COMPAT; + break; + } +- strncat(ev.name, desc->name, +- LTTNG_KERNEL_SYM_NAME_LEN - strlen(ev.name) - 1); ++ strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); + ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; + ev.instrumentation = LTTNG_KERNEL_SYSCALL; + chan_table[i] = _lttng_event_create(chan, &ev, filter, +@@ -803,6 +789,8 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter) + strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); + ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; + ev.instrumentation = LTTNG_KERNEL_SYSCALL; ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_ENTRY; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_NATIVE; + chan->sc_unknown = _lttng_event_create(chan, &ev, filter, + desc, + ev.instrumentation); +@@ -820,6 +808,8 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter) + strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); + ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; + ev.instrumentation = LTTNG_KERNEL_SYSCALL; ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_ENTRY; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_COMPAT; + chan->sc_compat_unknown = _lttng_event_create(chan, &ev, filter, + desc, + ev.instrumentation); +@@ -837,6 +827,8 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter) + strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); + ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; + ev.instrumentation = LTTNG_KERNEL_SYSCALL; ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_EXIT; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_COMPAT; + chan->compat_sc_exit_unknown = _lttng_event_create(chan, &ev, + filter, desc, + ev.instrumentation); +@@ -854,6 +846,8 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter) + strncpy(ev.name, desc->name, LTTNG_KERNEL_SYM_NAME_LEN); + ev.name[LTTNG_KERNEL_SYM_NAME_LEN - 1] = '\0'; + ev.instrumentation = LTTNG_KERNEL_SYSCALL; ++ ev.u.syscall.entryexit = LTTNG_KERNEL_SYSCALL_EXIT; ++ ev.u.syscall.abi = LTTNG_KERNEL_SYSCALL_ABI_NATIVE; + chan->sc_exit_unknown = _lttng_event_create(chan, &ev, filter, + desc, ev.instrumentation); + WARN_ON_ONCE(!chan->sc_exit_unknown); +@@ -883,6 +877,14 @@ int lttng_syscalls_register(struct lttng_channel *chan, void *filter) + if (ret) + return ret; + #endif ++ ++ if (!chan->sc_filter) { ++ chan->sc_filter = kzalloc(sizeof(struct lttng_syscall_filter), ++ GFP_KERNEL); ++ if (!chan->sc_filter) ++ return -ENOMEM; ++ } ++ + if (!chan->sys_enter_registered) { + ret = lttng_wrapper_tracepoint_probe_register("sys_enter", + (void *) syscall_entry_probe, chan); +@@ -930,7 +932,11 @@ int lttng_syscalls_unregister(struct lttng_channel *chan) + return ret; + chan->sys_exit_registered = 0; + } +- /* lttng_event destroy will be performed by lttng_session_destroy() */ ++ return 0; ++} ++ ++int lttng_syscalls_destroy(struct lttng_channel *chan) ++{ + kfree(chan->sc_table); + kfree(chan->sc_exit_table); + #ifdef CONFIG_COMPAT +@@ -993,136 +999,150 @@ uint32_t get_sc_tables_len(void) + return ARRAY_SIZE(sc_table) + ARRAY_SIZE(compat_sc_table); + } + +-int lttng_syscall_filter_enable(struct lttng_channel *chan, +- const char *name) ++static ++const char *get_syscall_name(struct lttng_event *event) + { +- int syscall_nr, compat_syscall_nr, ret; +- struct lttng_syscall_filter *filter; ++ size_t prefix_len = 0; + +- WARN_ON_ONCE(!chan->sc_table); ++ WARN_ON_ONCE(event->instrumentation != LTTNG_KERNEL_SYSCALL); + +- if (!name) { +- /* Enable all system calls by removing filter */ +- if (chan->sc_filter) { +- filter = chan->sc_filter; +- rcu_assign_pointer(chan->sc_filter, NULL); +- synchronize_trace(); +- kfree(filter); ++ switch (event->u.syscall.entryexit) { ++ case LTTNG_SYSCALL_ENTRY: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ prefix_len = strlen(SYSCALL_ENTRY_STR); ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ prefix_len = strlen(COMPAT_SYSCALL_ENTRY_STR); ++ break; + } +- chan->syscall_all = 1; +- return 0; +- } +- +- if (!chan->sc_filter) { +- if (chan->syscall_all) { +- /* +- * All syscalls are already enabled. +- */ +- return -EEXIST; ++ break; ++ case LTTNG_SYSCALL_EXIT: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ prefix_len = strlen(SYSCALL_EXIT_STR); ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ prefix_len = strlen(COMPAT_SYSCALL_EXIT_STR); ++ break; + } +- filter = kzalloc(sizeof(struct lttng_syscall_filter), +- GFP_KERNEL); +- if (!filter) +- return -ENOMEM; +- } else { +- filter = chan->sc_filter; ++ break; + } +- syscall_nr = get_syscall_nr(name); +- compat_syscall_nr = get_compat_syscall_nr(name); +- if (syscall_nr < 0 && compat_syscall_nr < 0) { +- ret = -ENOENT; +- goto error; ++ WARN_ON_ONCE(prefix_len == 0); ++ return event->desc->name + prefix_len; ++} ++ ++int lttng_syscall_filter_enable(struct lttng_channel *chan, ++ struct lttng_event *event) ++{ ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ const char *syscall_name; ++ unsigned long *bitmap; ++ int syscall_nr; ++ ++ WARN_ON_ONCE(!chan->sc_table); ++ ++ syscall_name = get_syscall_name(event); ++ ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ syscall_nr = get_syscall_nr(syscall_name); ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ syscall_nr = get_compat_syscall_nr(syscall_name); ++ break; ++ default: ++ return -EINVAL; + } +- if (syscall_nr >= 0) { +- if (test_bit(syscall_nr, filter->sc)) { +- ret = -EEXIST; +- goto error; ++ if (syscall_nr < 0) ++ return -ENOENT; ++ ++ ++ switch (event->u.syscall.entryexit) { ++ case LTTNG_SYSCALL_ENTRY: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ bitmap = filter->sc_entry; ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ bitmap = filter->sc_compat_entry; ++ break; + } +- bitmap_set(filter->sc, syscall_nr, 1); +- } +- if (compat_syscall_nr >= 0) { +- if (test_bit(compat_syscall_nr, filter->sc_compat)) { +- ret = -EEXIST; +- goto error; ++ break; ++ case LTTNG_SYSCALL_EXIT: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ bitmap = filter->sc_exit; ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ bitmap = filter->sc_compat_exit; ++ break; + } +- bitmap_set(filter->sc_compat, compat_syscall_nr, 1); ++ break; ++ default: ++ return -EINVAL; + } +- if (!chan->sc_filter) +- rcu_assign_pointer(chan->sc_filter, filter); ++ if (test_bit(syscall_nr, bitmap)) ++ return -EEXIST; ++ bitmap_set(bitmap, syscall_nr, 1); + return 0; +- +-error: +- if (!chan->sc_filter) +- kfree(filter); +- return ret; + } + + int lttng_syscall_filter_disable(struct lttng_channel *chan, +- const char *name) ++ struct lttng_event *event) + { +- int syscall_nr, compat_syscall_nr, ret; +- struct lttng_syscall_filter *filter; ++ struct lttng_syscall_filter *filter = chan->sc_filter; ++ const char *syscall_name; ++ unsigned long *bitmap; ++ int syscall_nr; + + WARN_ON_ONCE(!chan->sc_table); + +- if (!chan->sc_filter) { +- if (!chan->syscall_all) +- return -EEXIST; +- filter = kzalloc(sizeof(struct lttng_syscall_filter), +- GFP_KERNEL); +- if (!filter) +- return -ENOMEM; +- /* Trace all system calls, then apply disable. */ +- bitmap_set(filter->sc, 0, NR_syscalls); +- bitmap_set(filter->sc_compat, 0, NR_compat_syscalls); +- } else { +- filter = chan->sc_filter; ++ syscall_name = get_syscall_name(event); ++ ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ syscall_nr = get_syscall_nr(syscall_name); ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ syscall_nr = get_compat_syscall_nr(syscall_name); ++ break; ++ default: ++ return -EINVAL; + } ++ if (syscall_nr < 0) ++ return -ENOENT; + +- if (!name) { +- /* Fail if all syscalls are already disabled. */ +- if (bitmap_empty(filter->sc, NR_syscalls) +- && bitmap_empty(filter->sc_compat, +- NR_compat_syscalls)) { +- ret = -EEXIST; +- goto error; +- } + +- /* Disable all system calls */ +- bitmap_clear(filter->sc, 0, NR_syscalls); +- bitmap_clear(filter->sc_compat, 0, NR_compat_syscalls); +- goto apply_filter; +- } +- syscall_nr = get_syscall_nr(name); +- compat_syscall_nr = get_compat_syscall_nr(name); +- if (syscall_nr < 0 && compat_syscall_nr < 0) { +- ret = -ENOENT; +- goto error; +- } +- if (syscall_nr >= 0) { +- if (!test_bit(syscall_nr, filter->sc)) { +- ret = -EEXIST; +- goto error; ++ switch (event->u.syscall.entryexit) { ++ case LTTNG_SYSCALL_ENTRY: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ bitmap = filter->sc_entry; ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ bitmap = filter->sc_compat_entry; ++ break; + } +- bitmap_clear(filter->sc, syscall_nr, 1); +- } +- if (compat_syscall_nr >= 0) { +- if (!test_bit(compat_syscall_nr, filter->sc_compat)) { +- ret = -EEXIST; +- goto error; ++ break; ++ case LTTNG_SYSCALL_EXIT: ++ switch (event->u.syscall.abi) { ++ case LTTNG_SYSCALL_ABI_NATIVE: ++ bitmap = filter->sc_exit; ++ break; ++ case LTTNG_SYSCALL_ABI_COMPAT: ++ bitmap = filter->sc_compat_exit; ++ break; + } +- bitmap_clear(filter->sc_compat, compat_syscall_nr, 1); ++ break; ++ default: ++ return -EINVAL; + } +-apply_filter: +- if (!chan->sc_filter) +- rcu_assign_pointer(chan->sc_filter, filter); +- chan->syscall_all = 0; +- return 0; ++ if (!test_bit(syscall_nr, bitmap)) ++ return -EEXIST; ++ bitmap_clear(bitmap, syscall_nr, 1); + +-error: +- if (!chan->sc_filter) +- kfree(filter); +- return ret; ++ return 0; + } + + static +@@ -1236,6 +1256,9 @@ const struct file_operations lttng_syscall_list_fops = { + .release = seq_release, + }; + ++/* ++ * A syscall is enabled if it is traced for either entry or exit. ++ */ + long lttng_channel_syscall_mask(struct lttng_channel *channel, + struct lttng_kernel_syscall_mask __user *usyscall_mask) + { +@@ -1262,8 +1285,9 @@ long lttng_channel_syscall_mask(struct lttng_channel *channel, + char state; + + if (channel->sc_table) { +- if (filter) +- state = test_bit(bit, filter->sc); ++ if (!READ_ONCE(channel->syscall_all) && filter) ++ state = test_bit(bit, filter->sc_entry) ++ || test_bit(bit, filter->sc_exit); + else + state = 1; + } else { +@@ -1275,9 +1299,11 @@ long lttng_channel_syscall_mask(struct lttng_channel *channel, + char state; + + if (channel->compat_sc_table) { +- if (filter) ++ if (!READ_ONCE(channel->syscall_all) && filter) + state = test_bit(bit - ARRAY_SIZE(sc_table), +- filter->sc_compat); ++ filter->sc_compat_entry) ++ || test_bit(bit - ARRAY_SIZE(sc_table), ++ filter->sc_compat_exit); + else + state = 1; + } else { +-- +2.19.1 + diff --git a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb index 49b7a116b..e36b327a0 100644 --- a/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb +++ b/poky/meta/recipes-kernel/lttng/lttng-modules_2.12.2.bb @@ -6,11 +6,21 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=3f882d431dc0f32f1f44c0707aa41128" inherit module -COMPATIBLE_HOST = '(x86_64|i.86|powerpc|aarch64|mips|nios2|arm|riscv).*-linux' +include lttng-platforms.inc SRC_URI = "https://lttng.org/files/${BPN}/${BPN}-${PV}.tar.bz2 \ file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ + file://0001-Kconfig-fix-dependency-issue-when-building-in-tree-w.patch \ + file://0002-fix-Move-mmutrace.h-into-the-mmu-sub-directory-v5.9.patch \ + file://0003-fix-KVM-x86-mmu-Make-kvm_mmu_page-definition-and-acc.patch \ + file://0004-fix-ext4-limit-the-length-of-per-inode-prealloc-list.patch \ + file://0005-fix-ext4-indicate-via-a-block-bitmap-read-is-prefetc.patch \ + file://0006-fix-removal-of-smp_-read_barrier_depends-v5.9.patch \ + file://0007-fix-writeback-Drop-I_DIRTY_TIME_EXPIRE-v5.9.patch \ + file://0008-fix-writeback-Fix-sync-livelock-due-to-b_dirty_time-.patch \ + file://0009-fix-version-ranges-for-ext4_discard_preallocations-a.patch \ + file://0010-Fix-system-call-filter-table.patch \ " SRC_URI[sha256sum] = "df50bc3bd58679705714f17721acf619a8b0cedc694f8a97052aa5099626feca" @@ -36,7 +46,7 @@ SRC_URI_class-devupstream = "git://git.lttng.org/lttng-modules;branch=stable-2.1 file://Makefile-Do-not-fail-if-CONFIG_TRACEPOINTS-is-not-en.patch \ file://BUILD_RUNTIME_BUG_ON-vs-gcc7.patch \ " -SRCREV_class-devupstream = "11441f8f17f7825f529e2f6c54d3605771709260" -PV_class-devupstream = "2.12.1+git${SRCPV}" +SRCREV_class-devupstream = "ad594e3a953db1b0c3c059fde45b5a5494f6be78" +PV_class-devupstream = "2.12.2+git${SRCPV}" S_class-devupstream = "${WORKDIR}/git" SRCREV_FORMAT ?= "lttng_git" diff --git a/poky/meta/recipes-kernel/lttng/lttng-platforms.inc b/poky/meta/recipes-kernel/lttng/lttng-platforms.inc new file mode 100644 index 000000000..aa8220bbb --- /dev/null +++ b/poky/meta/recipes-kernel/lttng/lttng-platforms.inc @@ -0,0 +1,17 @@ +# +# Whether the platform supports kernel tracing +# +LTTNGMODULES = "lttng-modules" +LTTNGMODULES_arc = "" +LTTNGMODULES_riscv64 = "" + +COMPATIBLE_HOST_riscv64_pn-lttng-modules = "null" +COMPATIBLE_HOST_arc_pn-lttng-modules = "null" + +# Whether the platform supports userspace tracing +# lttng-ust uses sched_getcpu() which is not there on for some platforms. +LTTNGUST = "lttng-ust" +LTTNGUST_arc = "" + +COMPATIBLE_HOST_arc_pn-lttng-ust = "null" + diff --git a/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb b/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb index e9c8e18e2..0787e04d1 100644 --- a/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb +++ b/poky/meta/recipes-kernel/lttng/lttng-tools_2.12.2.bb @@ -9,9 +9,12 @@ LIC_FILES_CHKSUM = "file://LICENSE;md5=40ef17463fbd6f377db3c47b1cbaded8 \ file://LICENSES/GPL-2.0;md5=e68f69a54b44ba526ad7cb963e18fbce \ file://LICENSES/LGPL-2.1;md5=9920968d0f2ff585ce61fae30344dd95" +include lttng-platforms.inc + DEPENDS = "liburcu popt libxml2 util-linux" RDEPENDS_${PN} = "libgcc" -RDEPENDS_${PN}-ptest += "make perl bash gawk babeltrace procps perl-module-overloading coreutils util-linux kmod lttng-modules sed python3-core" +RRECOMMENDS_${PN} += "${LTTNGMODULES}" +RDEPENDS_${PN}-ptest += "make perl bash gawk babeltrace procps perl-module-overloading coreutils util-linux kmod ${LTTNGMODULES} sed python3-core" RDEPENDS_${PN}-ptest_append_libc-glibc = " glibc-utils" RDEPENDS_${PN}-ptest_append_libc-musl = " musl-utils" # babelstats.pl wants getopt-long @@ -21,12 +24,11 @@ PYTHON_OPTION = "am_cv_python_pyexecdir='${PYTHON_SITEPACKAGES_DIR}' \ am_cv_python_pythondir='${PYTHON_SITEPACKAGES_DIR}' \ PYTHON_INCLUDE='-I${STAGING_INCDIR}/python${PYTHON_BASEVERSION}${PYTHON_ABI}' \ " -PACKAGECONFIG ??= "lttng-ust" +PACKAGECONFIG ??= "${LTTNGUST}" PACKAGECONFIG[python] = "--enable-python-bindings ${PYTHON_OPTION},,python3 swig-native" PACKAGECONFIG[lttng-ust] = "--with-lttng-ust, --without-lttng-ust, lttng-ust" PACKAGECONFIG[kmod] = "--with-kmod, --without-kmod, kmod" PACKAGECONFIG[manpages] = "--enable-man-pages, --disable-man-pages, asciidoc-native xmlto-native libxslt-native" -PACKAGECONFIG_remove_arc = "lttng-ust" SRC_URI = "https://lttng.org/files/lttng-tools/lttng-tools-${PV}.tar.bz2 \ file://0001-tests-do-not-strip-a-helper-library.patch \ diff --git a/poky/meta/recipes-kernel/lttng/lttng-ust_2.12.0.bb b/poky/meta/recipes-kernel/lttng/lttng-ust_2.12.0.bb index ad544d1b4..67a4307c7 100644 --- a/poky/meta/recipes-kernel/lttng/lttng-ust_2.12.0.bb +++ b/poky/meta/recipes-kernel/lttng/lttng-ust_2.12.0.bb @@ -15,6 +15,8 @@ PYTHON_OPTION = "am_cv_python_pyexecdir='${PYTHON_SITEPACKAGES_DIR}' \ inherit autotools lib_package manpages python3native +include lttng-platforms.inc + EXTRA_OECONF = "--disable-numa" DEPENDS = "liburcu util-linux" diff --git a/poky/meta/recipes-multimedia/alsa/alsa-plugins_1.2.2.bb b/poky/meta/recipes-multimedia/alsa/alsa-plugins_1.2.2.bb index 52dee3012..e8402a6fc 100644 --- a/poky/meta/recipes-multimedia/alsa/alsa-plugins_1.2.2.bb +++ b/poky/meta/recipes-multimedia/alsa/alsa-plugins_1.2.2.bb @@ -51,7 +51,7 @@ FILES_${PN} = "" ALLOW_EMPTY_${PN} = "1" do_install_append() { - rm ${D}${libdir}/alsa-lib/*.la + rm -f ${D}${libdir}/alsa-lib/*.la if [ "${@bb.utils.contains('PACKAGECONFIG', 'pulseaudio', 'yes', 'no', d)}" = "yes" ]; then # We use the example as is. Upstream installs the file under diff --git a/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb b/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb index 5c23e8520..e7d8a284b 100644 --- a/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb +++ b/poky/meta/recipes-sato/matchbox-desktop/matchbox-desktop_2.2.bb @@ -13,7 +13,7 @@ SECTION = "x11/wm" # SRCREV tagged 2.2 SRCREV = "6bc67d09da4147e5552fe30011a05a2c59d2f777" SRC_URI = "git://git.yoctoproject.org/${BPN}-2 \ - file://vfolders/* \ + file://vfolders/ \ " EXTRA_OECONF = "--enable-startup-notification --with-dbus" diff --git a/poky/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb b/poky/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb index f6dac2cf8..2b1f513f1 100644 --- a/poky/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb +++ b/poky/meta/recipes-sato/sato-screenshot/sato-screenshot_0.3.bb @@ -21,7 +21,7 @@ inherit autotools pkgconfig features_check FILES_${PN} += "${libdir}/matchbox-panel/*.so" do_install_append () { - rm ${D}${libdir}/matchbox-panel/*.la + rm -f ${D}${libdir}/matchbox-panel/*.la } # The matchbox-panel-2 requires x11 in DISTRO_FEATURES diff --git a/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.6.1.bb b/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.6.1.bb deleted file mode 100644 index 2fdab42c3..000000000 --- a/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.6.1.bb +++ /dev/null @@ -1,17 +0,0 @@ -SUMMARY = "WPE's backend based on a freedesktop.org stack." -HOMEPAGE = "https://github.com/Igalia/WPEBackend-fdo" -BUGTRACKER = "https://github.com/Igalia/WPEBackend-fdo/issues" - -LICENSE = "BSD" -LIC_FILES_CHKSUM = "file://COPYING;md5=1f62cef2e3645e3e74eb05fd389d7a66" -DEPENDS = "glib-2.0 libxkbcommon wayland virtual/egl libwpe" - -DEPENDS_append_class-target = " wayland-native" - -inherit cmake features_check - -REQUIRED_DISTRO_FEATURES = "opengl" - -SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "740eee3327acfb462b8460519a219e30dc0a870326e88e2ddc4fe2c8de20b1c9" - diff --git a/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.7.1.bb b/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.7.1.bb new file mode 100644 index 000000000..519762d12 --- /dev/null +++ b/poky/meta/recipes-sato/webkit/wpebackend-fdo_1.7.1.bb @@ -0,0 +1,17 @@ +SUMMARY = "WPE's backend based on a freedesktop.org stack." +HOMEPAGE = "https://github.com/Igalia/WPEBackend-fdo" +BUGTRACKER = "https://github.com/Igalia/WPEBackend-fdo/issues" + +LICENSE = "BSD" +LIC_FILES_CHKSUM = "file://COPYING;md5=1f62cef2e3645e3e74eb05fd389d7a66" +DEPENDS = "glib-2.0 libxkbcommon wayland virtual/egl libwpe libepoxy" + +DEPENDS_append_class-target = " wayland-native" + +inherit meson features_check + +REQUIRED_DISTRO_FEATURES = "opengl" + +SRC_URI = "https://wpewebkit.org/releases/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "9b980a73ea4e3762266c48f81ded56d9dcad4acf32bad9bd05d0dffdd454c6f5" + diff --git a/poky/meta/recipes-support/atk/at-spi2-core_2.36.0.bb b/poky/meta/recipes-support/atk/at-spi2-core_2.36.0.bb deleted file mode 100644 index c5d01c929..000000000 --- a/poky/meta/recipes-support/atk/at-spi2-core_2.36.0.bb +++ /dev/null @@ -1,36 +0,0 @@ -SUMMARY = "Assistive Technology Service Provider Interface (dbus core)" -HOMEPAGE = "https://wiki.linuxfoundation.org/accessibility/d-bus" -LICENSE = "LGPL-2.1+" -LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" - -MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" - -SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz" - -SRC_URI[md5sum] = "f101d111b06293d15738afc904c1d931" -SRC_URI[sha256sum] = "88da57de0a7e3c60bc341a974a80fdba091612db3547c410d6deab039ca5c05a" - -X11DEPENDS = "virtual/libx11 libxi libxtst" - -DEPENDS = "dbus glib-2.0" -DEPENDS += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '${X11DEPENDS}', '', d)}" - -inherit meson gtk-doc gettext systemd pkgconfig upstream-version-is-even gobject-introspection - -EXTRA_OEMESON = " -Dsystemd_user_dir=${systemd_user_unitdir} \ - -Ddbus_daemon=${bindir}/dbus-daemon \ - ${@bb.utils.contains('DISTRO_FEATURES', 'x11', '-Dx11=yes', '-Dx11=no', d)} \ -" - -GTKDOC_MESON_OPTION = "docs" - -GIR_MESON_OPTION = 'introspection' -GIR_MESON_ENABLE_FLAG = 'yes' -GIR_MESON_DISABLE_FLAG = 'no' - -FILES_${PN} += "${datadir}/dbus-1/services/*.service \ - ${datadir}/dbus-1/accessibility-services/*.service \ - ${datadir}/defaults/at-spi2 \ - ${systemd_user_unitdir}/at-spi-dbus-bus.service \ - " -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/atk/at-spi2-core_2.36.1.bb b/poky/meta/recipes-support/atk/at-spi2-core_2.36.1.bb new file mode 100644 index 000000000..218af6afa --- /dev/null +++ b/poky/meta/recipes-support/atk/at-spi2-core_2.36.1.bb @@ -0,0 +1,36 @@ +SUMMARY = "Assistive Technology Service Provider Interface (dbus core)" +HOMEPAGE = "https://wiki.linuxfoundation.org/accessibility/d-bus" +LICENSE = "LGPL-2.1+" +LIC_FILES_CHKSUM = "file://COPYING;md5=4fbd65380cdd255951079008b364516c" + +MAJ_VER = "${@oe.utils.trim_version("${PV}", 2)}" + +SRC_URI = "${GNOME_MIRROR}/${BPN}/${MAJ_VER}/${BPN}-${PV}.tar.xz" + +SRC_URI[md5sum] = "d01e5326b4eb15ac3c27eed73ecf26f1" +SRC_URI[sha256sum] = "97417b909dbbf000e7b21062a13b2f1fd52a336f5a53925bb26d27b65ace6c54" + +X11DEPENDS = "virtual/libx11 libxi libxtst" + +DEPENDS = "dbus glib-2.0" +DEPENDS += "${@bb.utils.contains('DISTRO_FEATURES', 'x11', '${X11DEPENDS}', '', d)}" + +inherit meson gtk-doc gettext systemd pkgconfig upstream-version-is-even gobject-introspection + +EXTRA_OEMESON = " -Dsystemd_user_dir=${systemd_user_unitdir} \ + -Ddbus_daemon=${bindir}/dbus-daemon \ + ${@bb.utils.contains('DISTRO_FEATURES', 'x11', '-Dx11=yes', '-Dx11=no', d)} \ +" + +GTKDOC_MESON_OPTION = "docs" + +GIR_MESON_OPTION = 'introspection' +GIR_MESON_ENABLE_FLAG = 'yes' +GIR_MESON_DISABLE_FLAG = 'no' + +FILES_${PN} += "${datadir}/dbus-1/services/*.service \ + ${datadir}/dbus-1/accessibility-services/*.service \ + ${datadir}/defaults/at-spi2 \ + ${systemd_user_unitdir}/at-spi-dbus-bus.service \ + " +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/boost/bjam-native_1.73.0.bb b/poky/meta/recipes-support/boost/bjam-native_1.73.0.bb deleted file mode 100644 index d843eb038..000000000 --- a/poky/meta/recipes-support/boost/bjam-native_1.73.0.bb +++ /dev/null @@ -1,20 +0,0 @@ -require boost-${PV}.inc - -SUMMARY = "Portable Boost.Jam build tool for boost" -SECTION = "devel" - -inherit native - -SRC_URI += "file://0001-Build-debug-version-of-bjam.patch \ - file://0001-build.sh-use-DNDEBUG-also-in-debug-builds.patch \ - " - -do_compile() { - ./bootstrap.sh --with-toolset=gcc -} - -do_install() { - install -d ${D}${bindir}/ - # install unstripped version for bjam - install -c -m 755 b2 ${D}${bindir}/bjam -} diff --git a/poky/meta/recipes-support/boost/bjam-native_1.74.0.bb b/poky/meta/recipes-support/boost/bjam-native_1.74.0.bb new file mode 100644 index 000000000..d843eb038 --- /dev/null +++ b/poky/meta/recipes-support/boost/bjam-native_1.74.0.bb @@ -0,0 +1,20 @@ +require boost-${PV}.inc + +SUMMARY = "Portable Boost.Jam build tool for boost" +SECTION = "devel" + +inherit native + +SRC_URI += "file://0001-Build-debug-version-of-bjam.patch \ + file://0001-build.sh-use-DNDEBUG-also-in-debug-builds.patch \ + " + +do_compile() { + ./bootstrap.sh --with-toolset=gcc +} + +do_install() { + install -d ${D}${bindir}/ + # install unstripped version for bjam + install -c -m 755 b2 ${D}${bindir}/bjam +} diff --git a/poky/meta/recipes-support/boost/boost-1.73.0.inc b/poky/meta/recipes-support/boost/boost-1.73.0.inc deleted file mode 100644 index b57475719..000000000 --- a/poky/meta/recipes-support/boost/boost-1.73.0.inc +++ /dev/null @@ -1,21 +0,0 @@ -# The Boost web site provides free peer-reviewed portable -# C++ source libraries. The emphasis is on libraries which -# work well with the C++ Standard Library. The libraries are -# intended to be widely useful, and are in regular use by -# thousands of programmers across a broad spectrum of applications. -HOMEPAGE = "http://www.boost.org/" -LICENSE = "BSL-1.0 & MIT & Python-2.0" -LIC_FILES_CHKSUM = "file://LICENSE_1_0.txt;md5=e4224ccaecb14d942c71d31bef20d78c" - -BOOST_VER = "${@"_".join(d.getVar("PV").split("."))}" -BOOST_MAJ = "${@"_".join(d.getVar("PV").split(".")[0:2])}" -BOOST_P = "boost_${BOOST_VER}" - -SRC_URI = "https://dl.bintray.com/boostorg/release/${PV}/source/${BOOST_P}.tar.bz2" -SRC_URI[md5sum] = "9273c8c4576423562bbe84574b07b2bd" -SRC_URI[sha256sum] = "4eb3b8d442b426dc35346235c8733b5ae35ba431690e38c6a8263dce9fcbb402" - -UPSTREAM_CHECK_URI = "http://www.boost.org/users/download/" -UPSTREAM_CHECK_REGEX = "boostorg/release/(?P.*)/source/" - -S = "${WORKDIR}/${BOOST_P}" diff --git a/poky/meta/recipes-support/boost/boost-1.74.0.inc b/poky/meta/recipes-support/boost/boost-1.74.0.inc new file mode 100644 index 000000000..b47fdaf09 --- /dev/null +++ b/poky/meta/recipes-support/boost/boost-1.74.0.inc @@ -0,0 +1,20 @@ +# The Boost web site provides free peer-reviewed portable +# C++ source libraries. The emphasis is on libraries which +# work well with the C++ Standard Library. The libraries are +# intended to be widely useful, and are in regular use by +# thousands of programmers across a broad spectrum of applications. +HOMEPAGE = "http://www.boost.org/" +LICENSE = "BSL-1.0 & MIT & Python-2.0" +LIC_FILES_CHKSUM = "file://LICENSE_1_0.txt;md5=e4224ccaecb14d942c71d31bef20d78c" + +BOOST_VER = "${@"_".join(d.getVar("PV").split("."))}" +BOOST_MAJ = "${@"_".join(d.getVar("PV").split(".")[0:2])}" +BOOST_P = "boost_${BOOST_VER}" + +SRC_URI = "https://dl.bintray.com/boostorg/release/${PV}/source/${BOOST_P}.tar.bz2" +SRC_URI[sha256sum] = "83bfc1507731a0906e387fc28b7ef5417d591429e51e788417fe9ff025e116b1" + +UPSTREAM_CHECK_URI = "http://www.boost.org/users/download/" +UPSTREAM_CHECK_REGEX = "boostorg/release/(?P.*)/source/" + +S = "${WORKDIR}/${BOOST_P}" diff --git a/poky/meta/recipes-support/boost/boost/0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch b/poky/meta/recipes-support/boost/boost/0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch index 7c6b62e38..91ab53efd 100644 --- a/poky/meta/recipes-support/boost/boost/0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch +++ b/poky/meta/recipes-support/boost/boost/0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch @@ -1,7 +1,8 @@ -From f4d3fad43d67808d71325ba0df1457555b9a0086 Mon Sep 17 00:00:00 2001 +From 8e1d30454afde37eaa3c593ec19d108cd5ed10d0 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Tue, 18 Dec 2018 15:42:57 +0100 Subject: [PATCH] Don't set up arch/instruction-set flags, we do that + ourselves Upstream-Status: Inappropriate @@ -9,14 +10,14 @@ Signed-off-by: Christopher Larson Signed-off-by: Alexander Kanavin --- - tools/build/src/tools/gcc.jam | 133 ---------------------------------- - 1 file changed, 133 deletions(-) + tools/build/src/tools/gcc.jam | 139 ---------------------------------- + 1 file changed, 139 deletions(-) diff --git a/tools/build/src/tools/gcc.jam b/tools/build/src/tools/gcc.jam -index 8910a55f2..ed7e7b20c 100644 +index ff3209f7b..70cbc39a5 100644 --- a/tools/build/src/tools/gcc.jam +++ b/tools/build/src/tools/gcc.jam -@@ -1195,136 +1195,3 @@ local rule cpu-flags ( toolset variable : architecture : instruction-set + : +@@ -1217,142 +1217,3 @@ local rule cpu-flags ( toolset variable : architecture : instruction-set + : $(architecture)/$(instruction-set) : $(values) ; } @@ -66,7 +67,11 @@ index 8910a55f2..ed7e7b20c 100644 -cpu-flags gcc OPTIONS : x86 : skylake : -march=skylake ; -cpu-flags gcc OPTIONS : x86 : skylake-avx512 : -march=skylake-avx512 ; -cpu-flags gcc OPTIONS : x86 : cannonlake : -march=skylake-avx512 -mavx512vbmi -mavx512ifma -msha ; --cpu-flags gcc OPTIONS : x86 : icelake : -march=icelake ; +-cpu-flags gcc OPTIONS : x86 : icelake-client : -march=icelake-client ; +-cpu-flags gcc OPTIONS : x86 : icelake-server : -march=icelake-server ; +-cpu-flags gcc OPTIONS : x86 : cascadelake : -march=skylake-avx512 -mavx512vnni ; +-cpu-flags gcc OPTIONS : x86 : cooperlake : -march=cooperlake ; +-cpu-flags gcc OPTIONS : x86 : tigerlake : -march=tigerlake ; -cpu-flags gcc OPTIONS : x86 : k6 : -march=k6 ; -cpu-flags gcc OPTIONS : x86 : k6-2 : -march=k6-2 ; -cpu-flags gcc OPTIONS : x86 : k6-3 : -march=k6-3 ; @@ -92,10 +97,12 @@ index 8910a55f2..ed7e7b20c 100644 -cpu-flags gcc OPTIONS : x86 : btver1 : -march=btver1 ; -cpu-flags gcc OPTIONS : x86 : btver2 : -march=btver2 ; -cpu-flags gcc OPTIONS : x86 : znver1 : -march=znver1 ; +-cpu-flags gcc OPTIONS : x86 : znver2 : -march=znver2 ; -cpu-flags gcc OPTIONS : x86 : winchip-c6 : -march=winchip-c6 ; -cpu-flags gcc OPTIONS : x86 : winchip2 : -march=winchip2 ; -cpu-flags gcc OPTIONS : x86 : c3 : -march=c3 ; -cpu-flags gcc OPTIONS : x86 : c3-2 : -march=c3-2 ; +-cpu-flags gcc OPTIONS : x86 : c7 : -march=c7 ; -## -cpu-flags gcc OPTIONS : x86 : atom : -march=atom ; -# Sparc @@ -153,6 +160,3 @@ index 8910a55f2..ed7e7b20c 100644 -cpu-flags gcc OPTIONS : s390x : z15 : -march=z15 ; -# AIX variant of RS/6000 & PowerPC -toolset.flags gcc AROPTIONS 64/aix : "-X64" ; --- -2.17.1 - diff --git a/poky/meta/recipes-support/boost/boost/0001-Make-index-detail-rtree-visitors-insert-base-class-p.patch b/poky/meta/recipes-support/boost/boost/0001-Make-index-detail-rtree-visitors-insert-base-class-p.patch deleted file mode 100644 index 1edad329c..000000000 --- a/poky/meta/recipes-support/boost/boost/0001-Make-index-detail-rtree-visitors-insert-base-class-p.patch +++ /dev/null @@ -1,30 +0,0 @@ -From aafbceccc76dccb75963dd4f596fd1f10ee34b03 Mon Sep 17 00:00:00 2001 -From: Jonathan Wakely -Date: Fri, 5 Jun 2020 19:29:27 +0100 -Subject: [PATCH] Make index::detail::rtree::visitors::insert base class public - -Fixes #721 - -Upstream-Status: Backport [https://github.com/boostorg/geometry/commit/aafbceccc76dccb75963dd4f596fd1f10ee34b03] - -Signed-off-by: Andreas Müller ---- - boost/geometry/index/detail/rtree/visitors/insert.hpp | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/boost/geometry/index/detail/rtree/visitors/insert.hpp b/boost/geometry/index/detail/rtree/visitors/insert.hpp -index 2d324cb7f..5709a930c 100644 ---- a/boost/geometry/index/detail/rtree/visitors/insert.hpp -+++ b/boost/geometry/index/detail/rtree/visitors/insert.hpp -@@ -265,7 +265,7 @@ struct insert_traverse_data - // Default insert visitor - template - class insert -- : MembersHolder::visitor -+ : public MembersHolder::visitor - { - protected: - typedef typename MembersHolder::box_type box_type; --- -2.26.2 - diff --git a/poky/meta/recipes-support/boost/boost_1.73.0.bb b/poky/meta/recipes-support/boost/boost_1.73.0.bb deleted file mode 100644 index 995c14e8c..000000000 --- a/poky/meta/recipes-support/boost/boost_1.73.0.bb +++ /dev/null @@ -1,11 +0,0 @@ -require boost-${PV}.inc -require boost.inc - -SRC_URI += "file://arm-intrinsics.patch \ - file://boost-CVE-2012-2677.patch \ - file://boost-math-disable-pch-for-gcc.patch \ - file://0001-Apply-boost-1.62.0-no-forced-flags.patch.patch \ - file://0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch \ - file://0001-dont-setup-compiler-flags-m32-m64.patch \ - file://0001-Make-index-detail-rtree-visitors-insert-base-class-p.patch \ - " diff --git a/poky/meta/recipes-support/boost/boost_1.74.0.bb b/poky/meta/recipes-support/boost/boost_1.74.0.bb new file mode 100644 index 000000000..5e9e0d87d --- /dev/null +++ b/poky/meta/recipes-support/boost/boost_1.74.0.bb @@ -0,0 +1,10 @@ +require boost-${PV}.inc +require boost.inc + +SRC_URI += "file://arm-intrinsics.patch \ + file://boost-CVE-2012-2677.patch \ + file://boost-math-disable-pch-for-gcc.patch \ + file://0001-Apply-boost-1.62.0-no-forced-flags.patch.patch \ + file://0001-Don-t-set-up-arch-instruction-set-flags-we-do-that-o.patch \ + file://0001-dont-setup-compiler-flags-m32-m64.patch \ + " diff --git a/poky/meta/recipes-support/curl/curl_7.72.0.bb b/poky/meta/recipes-support/curl/curl_7.72.0.bb index e7f549269..a2ae0b690 100644 --- a/poky/meta/recipes-support/curl/curl_7.72.0.bb +++ b/poky/meta/recipes-support/curl/curl_7.72.0.bb @@ -11,7 +11,9 @@ SRC_URI = "http://curl.haxx.se/download/curl-${PV}.tar.bz2 \ SRC_URI[sha256sum] = "ad91970864102a59765e20ce16216efc9d6ad381471f7accceceab7d905703ef" -CVE_PRODUCT = "curl libcurl" +# Curl has used many names over the years... +CVE_PRODUCT = "haxx:curl haxx:libcurl curl:curl curl:libcurl libcurl:libcurl daniel_stenberg:curl" + inherit autotools pkgconfig binconfig multilib_header PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'ipv6', d)} gnutls libidn proxy threaded-resolver verbose zlib" diff --git a/poky/meta/recipes-support/debianutils/debianutils_4.11.1.bb b/poky/meta/recipes-support/debianutils/debianutils_4.11.1.bb new file mode 100644 index 000000000..ef7aa5040 --- /dev/null +++ b/poky/meta/recipes-support/debianutils/debianutils_4.11.1.bb @@ -0,0 +1,54 @@ +SUMMARY = "Miscellaneous utilities specific to Debian" +SECTION = "base" +LICENSE = "GPLv2 & SMAIL_GPL" +LIC_FILES_CHKSUM = "file://debian/copyright;md5=9b912cd0cc654134c0ef3424a0705b94" + +SRC_URI = "http://snapshot.debian.org/archive/debian/20200525T145753Z/pool/main/d/${BPN}/${BPN}_${PV}.tar.xz" +# the package is taken from snapshots.debian.org; that source is static and goes stale +# so we check the latest upstream from a directory that does get updated +UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/d/${BPN}/" + +SRC_URI[sha256sum] = "8be869f19c55c18d53d9f0414b68bb966a068b2154e9fbbfc6193827d6af983c" + +inherit autotools update-alternatives + +do_configure_prepend() { + sed -i -e 's:tempfile.1 which.1:which.1:g' ${S}/Makefile.am +} + +do_install_append() { + if [ "${base_bindir}" != "${bindir}" ]; then + # Debian places some utils into ${base_bindir} as does busybox + install -d ${D}${base_bindir} + for app in run-parts tempfile; do + mv ${D}${bindir}/$app ${D}${base_bindir}/$app + done + fi +} + +# Note that we package the update-alternatives name. +# +PACKAGES =+ "${PN}-run-parts" +FILES_${PN}-run-parts = "${base_bindir}/run-parts.debianutils" + +RDEPENDS_${PN} += "${PN}-run-parts" +RDEPENDS_${PN}_class-native = "" + +ALTERNATIVE_PRIORITY = "30" +ALTERNATIVE_${PN} = "add-shell installkernel remove-shell savelog tempfile which" + +ALTERNATIVE_PRIORITY_${PN}-run-parts = "60" +ALTERNATIVE_${PN}-run-parts = "run-parts" + +ALTERNATIVE_${PN}-doc = "which.1" +ALTERNATIVE_LINK_NAME[which.1] = "${mandir}/man1/which.1" + +ALTERNATIVE_LINK_NAME[add-shell] = "${sbindir}/add-shell" +ALTERNATIVE_LINK_NAME[installkernel] = "${sbindir}/installkernel" +ALTERNATIVE_LINK_NAME[remove-shell] = "${sbindir}/remove-shell" +ALTERNATIVE_LINK_NAME[run-parts] = "${base_bindir}/run-parts" +ALTERNATIVE_LINK_NAME[savelog] = "${bindir}/savelog" +ALTERNATIVE_LINK_NAME[tempfile] = "${base_bindir}/tempfile" +ALTERNATIVE_LINK_NAME[which] = "${bindir}/which" + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/debianutils/debianutils_4.11.bb b/poky/meta/recipes-support/debianutils/debianutils_4.11.bb deleted file mode 100644 index ac3d5b026..000000000 --- a/poky/meta/recipes-support/debianutils/debianutils_4.11.bb +++ /dev/null @@ -1,54 +0,0 @@ -SUMMARY = "Miscellaneous utilities specific to Debian" -SECTION = "base" -LICENSE = "GPLv2 & SMAIL_GPL" -LIC_FILES_CHKSUM = "file://debian/copyright;md5=9b912cd0cc654134c0ef3424a0705b94" - -SRC_URI = "${DEBIAN_MIRROR}/main/d/${BPN}/${BPN}_${PV}.tar.xz" -# the package is taken from snapshots.debian.org; that source is static and goes stale -# so we check the latest upstream from a directory that does get updated -UPSTREAM_CHECK_URI = "${DEBIAN_MIRROR}/main/d/${BPN}/" - -SRC_URI[sha256sum] = "bb5ce6290696b0d623377521ed217f484aa98f7346c5f7c48f9ae3e1acfb7151" - -inherit autotools update-alternatives - -do_configure_prepend() { - sed -i -e 's:tempfile.1 which.1:which.1:g' ${S}/Makefile.am -} - -do_install_append() { - if [ "${base_bindir}" != "${bindir}" ]; then - # Debian places some utils into ${base_bindir} as does busybox - install -d ${D}${base_bindir} - for app in run-parts tempfile; do - mv ${D}${bindir}/$app ${D}${base_bindir}/$app - done - fi -} - -# Note that we package the update-alternatives name. -# -PACKAGES =+ "${PN}-run-parts" -FILES_${PN}-run-parts = "${base_bindir}/run-parts.debianutils" - -RDEPENDS_${PN} += "${PN}-run-parts" -RDEPENDS_${PN}_class-native = "" - -ALTERNATIVE_PRIORITY = "30" -ALTERNATIVE_${PN} = "add-shell installkernel remove-shell savelog tempfile which" - -ALTERNATIVE_PRIORITY_${PN}-run-parts = "60" -ALTERNATIVE_${PN}-run-parts = "run-parts" - -ALTERNATIVE_${PN}-doc = "which.1" -ALTERNATIVE_LINK_NAME[which.1] = "${mandir}/man1/which.1" - -ALTERNATIVE_LINK_NAME[add-shell] = "${sbindir}/add-shell" -ALTERNATIVE_LINK_NAME[installkernel] = "${sbindir}/installkernel" -ALTERNATIVE_LINK_NAME[remove-shell] = "${sbindir}/remove-shell" -ALTERNATIVE_LINK_NAME[run-parts] = "${base_bindir}/run-parts" -ALTERNATIVE_LINK_NAME[savelog] = "${bindir}/savelog" -ALTERNATIVE_LINK_NAME[tempfile] = "${base_bindir}/tempfile" -ALTERNATIVE_LINK_NAME[which] = "${bindir}/which" - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_153.bb b/poky/meta/recipes-support/diffoscope/diffoscope_153.bb deleted file mode 100644 index 77f5254ae..000000000 --- a/poky/meta/recipes-support/diffoscope/diffoscope_153.bb +++ /dev/null @@ -1,17 +0,0 @@ -SUMMARY = "in-depth comparison of files, archives, and directories" -HOMEPAGE = "https://diffoscope.org/" -LICENSE = "GPL-3.0+" -LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" - -PYPI_PACKAGE = "diffoscope" - -inherit pypi setuptools3 - -SRC_URI[sha256sum] = "b5104b5e72252df45ba6b7cbb0169e2e3407715b6b063fa5b38a2649b0d719a2" - -RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" - -# Dependencies don't build for musl -COMPATIBLE_HOST_libc-musl = 'null' - -BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/diffoscope/diffoscope_160.bb b/poky/meta/recipes-support/diffoscope/diffoscope_160.bb new file mode 100644 index 000000000..2c3da3bbd --- /dev/null +++ b/poky/meta/recipes-support/diffoscope/diffoscope_160.bb @@ -0,0 +1,17 @@ +SUMMARY = "in-depth comparison of files, archives, and directories" +HOMEPAGE = "https://diffoscope.org/" +LICENSE = "GPL-3.0+" +LIC_FILES_CHKSUM = "file://COPYING;md5=d32239bcb673463ab874e80d47fae504" + +PYPI_PACKAGE = "diffoscope" + +inherit pypi setuptools3 + +SRC_URI[sha256sum] = "f164b5e74cc11f6238ad8d62c92d3a819fa4c8b618683fc0533e04f21acae6b2" + +RDEPENDS_${PN} += "binutils vim squashfs-tools python3-libarchive-c python3-magic" + +# Dependencies don't build for musl +COMPATIBLE_HOST_libc-musl = 'null' + +BBCLASSEXTEND = "native" diff --git a/poky/meta/recipes-support/enchant/enchant2_2.2.8.bb b/poky/meta/recipes-support/enchant/enchant2_2.2.8.bb deleted file mode 100644 index 4ddbe55da..000000000 --- a/poky/meta/recipes-support/enchant/enchant2_2.2.8.bb +++ /dev/null @@ -1,29 +0,0 @@ -SUMMARY = "Enchant Spell checker API Library" -SECTION = "libs" -HOMEPAGE = "https://abiword.github.io/enchant/" -LICENSE = "LGPLv2.1+" -LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=a916467b91076e631dd8edb7424769c7" - -DEPENDS = "glib-2.0" - -inherit autotools pkgconfig - -SRC_URI = "https://github.com/AbiWord/enchant/releases/download/v${PV}/enchant-${PV}.tar.gz" -SRC_URI[md5sum] = "c7b9d6a392ecb8758e499f783e8dc883" -SRC_URI[sha256sum] = "c7b5e2853f0dd0b1aafea2f9e071941affeec3a76df8e3f6d67a718c89293555" - -UPSTREAM_CHECK_URI = "https://github.com/AbiWord/enchant/releases" - -S = "${WORKDIR}/enchant-${PV}" - -EXTRA_OEMAKE = "pkgdatadir=${datadir}/enchant-2" - -PACKAGECONFIG ??= "aspell" -PACKAGECONFIG[aspell] = "--with-aspell,--without-aspell,aspell,aspell" -PACKAGECONFIG[hunspell] = "--with-hunspell,--without-hunspell,hunspell,hunspell" - -FILES_${PN} += " \ - ${datadir}/enchant-2 \ - ${libdir}/enchant-2 \ -" -FILES_${PN}-staticdev += "${libdir}/enchant-2/*.a" diff --git a/poky/meta/recipes-support/enchant/enchant2_2.2.9.bb b/poky/meta/recipes-support/enchant/enchant2_2.2.9.bb new file mode 100644 index 000000000..784fd14ee --- /dev/null +++ b/poky/meta/recipes-support/enchant/enchant2_2.2.9.bb @@ -0,0 +1,28 @@ +SUMMARY = "Enchant Spell checker API Library" +SECTION = "libs" +HOMEPAGE = "https://abiword.github.io/enchant/" +LICENSE = "LGPLv2.1+" +LIC_FILES_CHKSUM = "file://COPYING.LIB;md5=a916467b91076e631dd8edb7424769c7" + +DEPENDS = "glib-2.0" + +inherit autotools pkgconfig + +SRC_URI = "https://github.com/AbiWord/enchant/releases/download/v${PV}/enchant-${PV}.tar.gz" +SRC_URI[sha256sum] = "b29a3d2273f5edcbdbbb565e94bfd8ea3f9526886fcb6327b4b0f72f0d722f3c" + +UPSTREAM_CHECK_URI = "https://github.com/AbiWord/enchant/releases" + +S = "${WORKDIR}/enchant-${PV}" + +EXTRA_OEMAKE = "pkgdatadir=${datadir}/enchant-2" + +PACKAGECONFIG ??= "aspell" +PACKAGECONFIG[aspell] = "--with-aspell,--without-aspell,aspell,aspell" +PACKAGECONFIG[hunspell] = "--with-hunspell,--without-hunspell,hunspell,hunspell" + +FILES_${PN} += " \ + ${datadir}/enchant-2 \ + ${libdir}/enchant-2 \ +" +FILES_${PN}-staticdev += "${libdir}/enchant-2/*.a" diff --git a/poky/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch b/poky/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch index 0e78f5679..c641a1961 100644 --- a/poky/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch +++ b/poky/meta/recipes-support/gnupg/gnupg/0001-configure.ac-use-a-custom-value-for-the-location-of-.patch @@ -1,4 +1,4 @@ -From 0e51c62706a8c54e90a2d98c5250ecc894c65182 Mon Sep 17 00:00:00 2001 +From 56343af532389c31eab32c096c9a989c53c78ce0 Mon Sep 17 00:00:00 2001 From: Alexander Kanavin Date: Mon, 22 Jan 2018 18:00:21 +0200 Subject: [PATCH] configure.ac: use a custom value for the location of @@ -14,10 +14,10 @@ Signed-off-by: Alexander Kanavin 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac -index f3c9863..a57f559 100644 +index 1d05d39..eaaf33c 100644 --- a/configure.ac +++ b/configure.ac -@@ -1856,7 +1856,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf", +@@ -1858,7 +1858,7 @@ AC_DEFINE_UNQUOTED(GPGCONF_DISP_NAME, "GPGConf", AC_DEFINE_UNQUOTED(GPGTAR_NAME, "gpgtar", [The name of the gpgtar tool]) diff --git a/poky/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch b/poky/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch index 3e798efd0..607a09f18 100644 --- a/poky/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch +++ b/poky/meta/recipes-support/gnupg/gnupg/0003-dirmngr-uses-libgpg-error.patch @@ -1,7 +1,7 @@ -From 9c3858ffda6246bf9e1e6aeeb920532a56b19408 Mon Sep 17 00:00:00 2001 +From 9a901dbb1c48685f2db6d7b55916c9484e871f16 Mon Sep 17 00:00:00 2001 From: Saul Wold Date: Wed, 16 Aug 2017 11:18:01 +0800 -Subject: [PATCH 3/4] dirmngr uses libgpg error +Subject: [PATCH] dirmngr uses libgpg error Upstream-Status: Pending Signed-off-by: Saul Wold @@ -9,24 +9,22 @@ Signed-off-by: Saul Wold Rebase to 2.1.23 Signed-off-by: Hongxu Jia + --- dirmngr/Makefile.am | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dirmngr/Makefile.am b/dirmngr/Makefile.am -index b404165..d3f916e 100644 +index 208a813..292c036 100644 --- a/dirmngr/Makefile.am +++ b/dirmngr/Makefile.am -@@ -82,7 +82,8 @@ endif +@@ -90,7 +90,8 @@ endif dirmngr_LDADD = $(libcommonpth) \ $(DNSLIBS) $(LIBASSUAN_LIBS) \ $(LIBGCRYPT_LIBS) $(KSBA_LIBS) $(NPTH_LIBS) \ -- $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) -+ $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) \ +- $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) $(NETLIBS) ++ $(NTBTLS_LIBS) $(LIBGNUTLS_LIBS) $(LIBINTL) $(LIBICONV) $(NETLIBS) \ + $(GPG_ERROR_LIBS) if USE_LDAP dirmngr_LDADD += $(ldaplibs) endif --- -1.8.3.1 - diff --git a/poky/meta/recipes-support/gnupg/gnupg/relocate.patch b/poky/meta/recipes-support/gnupg/gnupg/relocate.patch index 25732a827..aa8d1e3cc 100644 --- a/poky/meta/recipes-support/gnupg/gnupg/relocate.patch +++ b/poky/meta/recipes-support/gnupg/gnupg/relocate.patch @@ -1,4 +1,4 @@ -From 6e3b1d89758c3ee7072aeefa305ce5fe76f2e439 Mon Sep 17 00:00:00 2001 +From 4005b3342db06749453835720b5a5c2392a90810 Mon Sep 17 00:00:00 2001 From: Ross Burton Date: Wed, 19 Sep 2018 14:44:40 +0100 Subject: [PATCH] Allow the environment to override where gnupg looks for its diff --git a/poky/meta/recipes-support/gnupg/gnupg_2.2.21.bb b/poky/meta/recipes-support/gnupg/gnupg_2.2.21.bb deleted file mode 100644 index 701d769e6..000000000 --- a/poky/meta/recipes-support/gnupg/gnupg_2.2.21.bb +++ /dev/null @@ -1,80 +0,0 @@ -SUMMARY = "GNU Privacy Guard - encryption and signing tools (2.x)" -HOMEPAGE = "http://www.gnupg.org/" -LICENSE = "GPLv3 & LGPLv3" -LIC_FILES_CHKSUM = "file://COPYING;md5=189af8afca6d6075ba6c9e0aa8077626 \ - file://COPYING.LGPL3;md5=a2b6bf2cb38ee52619e60f30a1fc7257" - -DEPENDS = "npth libassuan libksba zlib bzip2 readline libgcrypt" - -inherit autotools gettext texinfo pkgconfig - -UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html" -SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \ - file://0001-Use-pkg-config-to-find-pth-instead-of-pth-config.patch \ - file://0002-use-pkgconfig-instead-of-npth-config.patch \ - file://0003-dirmngr-uses-libgpg-error.patch \ - file://0004-autogen.sh-fix-find-version-for-beta-checking.patch \ - file://0001-Woverride-init-is-not-needed-with-gcc-9.patch \ - " -SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \ - file://relocate.patch" -SRC_URI_append_class-nativesdk = " file://relocate.patch" - -SRC_URI[sha256sum] = "61e83278fb5fa7336658a8b73ab26f379d41275bb1c7c6e694dd9f9a6e8e76ec" - -EXTRA_OECONF = "--disable-ldap \ - --disable-ccid-driver \ - --with-zlib=${STAGING_LIBDIR}/.. \ - --with-bzip2=${STAGING_LIBDIR}/.. \ - --with-readline=${STAGING_LIBDIR}/.. \ - --enable-gpg-is-gpg2 \ - " - -# A minimal package containing just enough to run gpg+gpgagent (E.g. use gpgme in opkg) -PACKAGES =+ "${PN}-gpg" -FILES_${PN}-gpg = " \ - ${bindir}/gpg \ - ${bindir}/gpg2 \ - ${bindir}/gpg-agent \ -" - -# Normal package (gnupg) should depend on minimal package (gnupg-gpg) -# to ensure all tools are included. This is done only in non-native -# builds. Native builds don't have sub-packages, so appending RDEPENDS -# in this case breaks recipe parsing. -RDEPENDS_${PN} += "${@ "" if ("native" in d.getVar("PN")) else (d.getVar("PN") + "-gpg")}" - -RRECOMMENDS_${PN} = "pinentry" - -do_configure_prepend () { - # Else these could be used in prefernce to those in aclocal-copy - rm -f ${S}/m4/gpg-error.m4 - rm -f ${S}/m4/libassuan.m4 - rm -f ${S}/m4/ksba.m4 - rm -f ${S}/m4/libgcrypt.m4 -} - -do_install_append() { - ln -sf gpg2 ${D}${bindir}/gpg - ln -sf gpgv2 ${D}${bindir}/gpgv -} - -do_install_append_class-native() { - create_wrappers ${STAGING_BINDIR_NATIVE} -} - -do_install_append_class-nativesdk() { - create_wrappers ${SDKPATHNATIVE}${bindir_nativesdk} -} - -create_wrappers() { - for i in gpg2 gpgconf gpg-agent gpg-connect-agent; do - create_wrapper ${D}${bindir}/$i GNUPG_BINDIR=$1 - done -} - -PACKAGECONFIG ??= "gnutls" -PACKAGECONFIG[gnutls] = "--enable-gnutls, --disable-gnutls, gnutls" -PACKAGECONFIG[sqlite3] = "--enable-sqlite, --disable-sqlite, sqlite3" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/gnupg/gnupg_2.2.23.bb b/poky/meta/recipes-support/gnupg/gnupg_2.2.23.bb new file mode 100644 index 000000000..c624b67a0 --- /dev/null +++ b/poky/meta/recipes-support/gnupg/gnupg_2.2.23.bb @@ -0,0 +1,80 @@ +SUMMARY = "GNU Privacy Guard - encryption and signing tools (2.x)" +HOMEPAGE = "http://www.gnupg.org/" +LICENSE = "GPLv3 & LGPLv3" +LIC_FILES_CHKSUM = "file://COPYING;md5=189af8afca6d6075ba6c9e0aa8077626 \ + file://COPYING.LGPL3;md5=a2b6bf2cb38ee52619e60f30a1fc7257" + +DEPENDS = "npth libassuan libksba zlib bzip2 readline libgcrypt" + +inherit autotools gettext texinfo pkgconfig + +UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html" +SRC_URI = "${GNUPG_MIRROR}/${BPN}/${BPN}-${PV}.tar.bz2 \ + file://0001-Use-pkg-config-to-find-pth-instead-of-pth-config.patch \ + file://0002-use-pkgconfig-instead-of-npth-config.patch \ + file://0003-dirmngr-uses-libgpg-error.patch \ + file://0004-autogen.sh-fix-find-version-for-beta-checking.patch \ + file://0001-Woverride-init-is-not-needed-with-gcc-9.patch \ + " +SRC_URI_append_class-native = " file://0001-configure.ac-use-a-custom-value-for-the-location-of-.patch \ + file://relocate.patch" +SRC_URI_append_class-nativesdk = " file://relocate.patch" + +SRC_URI[sha256sum] = "10b55e49d78b3e49f1edb58d7541ecbdad92ddaeeb885b6f486ed23d1cd1da5c" + +EXTRA_OECONF = "--disable-ldap \ + --disable-ccid-driver \ + --with-zlib=${STAGING_LIBDIR}/.. \ + --with-bzip2=${STAGING_LIBDIR}/.. \ + --with-readline=${STAGING_LIBDIR}/.. \ + --enable-gpg-is-gpg2 \ + " + +# A minimal package containing just enough to run gpg+gpgagent (E.g. use gpgme in opkg) +PACKAGES =+ "${PN}-gpg" +FILES_${PN}-gpg = " \ + ${bindir}/gpg \ + ${bindir}/gpg2 \ + ${bindir}/gpg-agent \ +" + +# Normal package (gnupg) should depend on minimal package (gnupg-gpg) +# to ensure all tools are included. This is done only in non-native +# builds. Native builds don't have sub-packages, so appending RDEPENDS +# in this case breaks recipe parsing. +RDEPENDS_${PN} += "${@ "" if ("native" in d.getVar("PN")) else (d.getVar("PN") + "-gpg")}" + +RRECOMMENDS_${PN} = "pinentry" + +do_configure_prepend () { + # Else these could be used in prefernce to those in aclocal-copy + rm -f ${S}/m4/gpg-error.m4 + rm -f ${S}/m4/libassuan.m4 + rm -f ${S}/m4/ksba.m4 + rm -f ${S}/m4/libgcrypt.m4 +} + +do_install_append() { + ln -sf gpg2 ${D}${bindir}/gpg + ln -sf gpgv2 ${D}${bindir}/gpgv +} + +do_install_append_class-native() { + create_wrappers ${STAGING_BINDIR_NATIVE} +} + +do_install_append_class-nativesdk() { + create_wrappers ${SDKPATHNATIVE}${bindir_nativesdk} +} + +create_wrappers() { + for i in gpg2 gpgconf gpg-agent gpg-connect-agent; do + create_wrapper ${D}${bindir}/$i GNUPG_BINDIR=$1 + done +} + +PACKAGECONFIG ??= "gnutls" +PACKAGECONFIG[gnutls] = "--enable-gnutls, --disable-gnutls, gnutls" +PACKAGECONFIG[sqlite3] = "--enable-sqlite, --disable-sqlite, sqlite3" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch b/poky/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch new file mode 100644 index 000000000..1702325e6 --- /dev/null +++ b/poky/meta/recipes-support/gnutls/gnutls/CVE-2020-24659.patch @@ -0,0 +1,117 @@ +From 29ee67c205855e848a0a26e6d0e4f65b6b943e0a Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Sat, 22 Aug 2020 17:19:39 +0200 +Subject: [PATCH] handshake: reject no_renegotiation alert if handshake is + incomplete + +If the initial handshake is incomplete and the server sends a +no_renegotiation alert, the client should treat it as a fatal error +even if its level is warning. Otherwise the same handshake +state (e.g., DHE parameters) are reused in the next gnutls_handshake +call, if it is called in the loop idiom: + + do { + ret = gnutls_handshake(session); + } while (ret < 0 && gnutls_error_is_fatal(ret) == 0); + +Signed-off-by: Daiki Ueno +CVE: CVE-2020-24659 +Upstream-Status: Backport [https://gitlab.com/gnutls/gnutls.git] +Signed-off-by: Zhixiong Chi +--- + lib/gnutls_int.h | 1 + + lib/handshake.c | 48 +++++++++++++----- + 2 files changed, 36 insertions(+), 13 deletions(-) + +diff --git a/lib/gnutls_int.h b/lib/gnutls_int.h +index bb6c19713..31cec5c0c 100644 +--- a/lib/gnutls_int.h ++++ b/lib/gnutls_int.h +@@ -1370,6 +1370,7 @@ typedef struct { + #define HSK_RECORD_SIZE_LIMIT_RECEIVED (1<<26) /* server: record_size_limit extension was seen but not accepted yet */ + #define HSK_OCSP_REQUESTED (1<<27) /* server: client requested OCSP stapling */ + #define HSK_CLIENT_OCSP_REQUESTED (1<<28) /* client: server requested OCSP stapling */ ++#define HSK_SERVER_HELLO_RECEIVED (1<<29) /* client: Server Hello message has been received */ + + /* The hsk_flags are for use within the ongoing handshake; + * they are reset to zero prior to handshake start by gnutls_handshake. */ +diff --git a/lib/handshake.c b/lib/handshake.c +index b40f84b3d..ce2d160e2 100644 +--- a/lib/handshake.c ++++ b/lib/handshake.c +@@ -2051,6 +2051,8 @@ read_server_hello(gnutls_session_t session, + if (ret < 0) + return gnutls_assert_val(ret); + ++ session->internals.hsk_flags |= HSK_SERVER_HELLO_RECEIVED; ++ + return 0; + } + +@@ -2575,16 +2577,42 @@ int gnutls_rehandshake(gnutls_session_t session) + return 0; + } + ++/* This function checks whether the error code should be treated fatal ++ * or not, and also does the necessary state transition. In ++ * particular, in the case of a rehandshake abort it resets the ++ * handshake's internal state. ++ */ + inline static int + _gnutls_abort_handshake(gnutls_session_t session, int ret) + { +- if (((ret == GNUTLS_E_WARNING_ALERT_RECEIVED) && +- (gnutls_alert_get(session) == GNUTLS_A_NO_RENEGOTIATION)) +- || ret == GNUTLS_E_GOT_APPLICATION_DATA) +- return 0; ++ switch (ret) { ++ case GNUTLS_E_WARNING_ALERT_RECEIVED: ++ if (gnutls_alert_get(session) == GNUTLS_A_NO_RENEGOTIATION) { ++ /* The server always toleretes a "no_renegotiation" alert. */ ++ if (session->security_parameters.entity == GNUTLS_SERVER) { ++ STATE = STATE0; ++ return ret; ++ } ++ ++ /* The client should tolerete a "no_renegotiation" alert only if: ++ * - the initial handshake has completed, or ++ * - a Server Hello is not yet received ++ */ ++ if (session->internals.initial_negotiation_completed || ++ !(session->internals.hsk_flags & HSK_SERVER_HELLO_RECEIVED)) { ++ STATE = STATE0; ++ return ret; ++ } + +- /* this doesn't matter */ +- return GNUTLS_E_INTERNAL_ERROR; ++ return gnutls_assert_val(GNUTLS_E_UNEXPECTED_PACKET); ++ } ++ return ret; ++ case GNUTLS_E_GOT_APPLICATION_DATA: ++ STATE = STATE0; ++ return ret; ++ default: ++ return ret; ++ } + } + + +@@ -2747,13 +2774,7 @@ int gnutls_handshake(gnutls_session_t session) + } + + if (ret < 0) { +- /* In the case of a rehandshake abort +- * we should reset the handshake's internal state. +- */ +- if (_gnutls_abort_handshake(session, ret) == 0) +- STATE = STATE0; +- +- return ret; ++ return _gnutls_abort_handshake(session, ret); + } + + /* clear handshake buffer */ +-- +2.17.0 + diff --git a/poky/meta/recipes-support/gnutls/gnutls_3.6.14.bb b/poky/meta/recipes-support/gnutls/gnutls_3.6.14.bb index cc0454a56..51578b4b3 100644 --- a/poky/meta/recipes-support/gnutls/gnutls_3.6.14.bb +++ b/poky/meta/recipes-support/gnutls/gnutls_3.6.14.bb @@ -20,6 +20,7 @@ SHRT_VER = "${@d.getVar('PV').split('.')[0]}.${@d.getVar('PV').split('.')[1]}" SRC_URI = "https://www.gnupg.org/ftp/gcrypt/gnutls/v${SHRT_VER}/gnutls-${PV}.tar.xz \ file://arm_eabi.patch \ file://0001-Modied-the-license-to-GPLv2.1-to-keep-with-LICENSE-f.patch \ + file://CVE-2020-24659.patch \ " SRC_URI[sha256sum] = "5630751adec7025b8ef955af4d141d00d252a985769f51b4059e5affa3d39d63" diff --git a/poky/meta/recipes-support/gpgme/gpgme/0008-do-not-auto-check-var-PYTHON.patch b/poky/meta/recipes-support/gpgme/gpgme/0008-do-not-auto-check-var-PYTHON.patch index 3afbfc863..dfea1bf78 100644 --- a/poky/meta/recipes-support/gpgme/gpgme/0008-do-not-auto-check-var-PYTHON.patch +++ b/poky/meta/recipes-support/gpgme/gpgme/0008-do-not-auto-check-var-PYTHON.patch @@ -1,4 +1,4 @@ -From fc3d9cc218e60582fd158d21a1cd537a3dc1b007 Mon Sep 17 00:00:00 2001 +From 5bbf7a048b6d81d23186340e839f9f65b5b514b6 Mon Sep 17 00:00:00 2001 From: Hongxu Jia Date: Fri, 10 May 2019 16:19:54 +0800 Subject: [PATCH] do not auto check var-PYTHON @@ -12,25 +12,23 @@ only check specific python 2.7 and 3.7 Upstream-Status: Inappropriate [oe-core specific] Signed-off-by: Hongxu Jia + --- configure.ac | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.ac b/configure.ac -index cb2f073..7d74a6d 100644 +index 5ef00c0..bbcff93 100644 --- a/configure.ac +++ b/configure.ac -@@ -423,8 +423,8 @@ if test "$found_py" = "1"; then +@@ -425,8 +425,8 @@ if test "$found_py" = "1"; then if test "$found_py" = "1" -o "$found_py3" = "1"; then # Reset everything, so that we can look for another Python. m4_foreach([mym4pythonver], -- [[2.7],[3.4],[3.5],[3.6],[3.7],[3.8],[all]], +- [[2.7],[3.4],[3.5],[3.6],[3.7],[3.8],[3.9],[all]], - [unset PYTHON + [[2.7],[3.7]], + [ unset PYTHON_VERSION unset PYTHON_CPPFLAGS unset PYTHON_LDFLAGS --- -2.7.4 - diff --git a/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb b/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb deleted file mode 100644 index b51534351..000000000 --- a/poky/meta/recipes-support/gpgme/gpgme_1.13.1.bb +++ /dev/null @@ -1,89 +0,0 @@ -SUMMARY = "High-level GnuPG encryption/signing API" -DESCRIPTION = "GnuPG Made Easy (GPGME) is a library designed to make access to GnuPG easier for applications. It provides a High-Level Crypto API for encryption, decryption, signing, signature verification and key management" -HOMEPAGE = "http://www.gnupg.org/gpgme.html" -BUGTRACKER = "https://bugs.g10code.com/gnupg/index" - -LICENSE = "GPLv2+ & LGPLv2.1+" -LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ - file://COPYING.LESSER;md5=bbb461211a33b134d42ed5ee802b37ff \ - file://src/gpgme.h.in;endline=23;md5=2f0bf06d1c7dcb28532a9d0f94a7ca1d \ - file://src/engine.h;endline=22;md5=4b6d8ba313d9b564cc4d4cfb1640af9d" - -UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html" -SRC_URI = "${GNUPG_MIRROR}/gpgme/${BP}.tar.bz2 \ - file://0001-Revert-build-Make-gpgme.m4-use-gpgrt-config-with-.pc.patch \ - file://0001-pkgconfig.patch \ - file://0002-gpgme-lang-python-gpg-error-config-should-not-be-use.patch \ - file://0003-Correctly-install-python-modules.patch \ - file://0004-python-import.patch \ - file://0005-gpgme-config-skip-all-lib-or-usr-lib-directories-in-.patch \ - file://0006-fix-build-path-issue.patch \ - file://0007-python-Add-variables-to-tests.patch \ - file://0008-do-not-auto-check-var-PYTHON.patch \ - " - -SRC_URI[md5sum] = "198f0a908ec3cd8f0ce9a4f3a4489645" -SRC_URI[sha256sum] = "c4e30b227682374c23cddc7fdb9324a99694d907e79242a25a4deeedb393be46" - -DEPENDS = "libgpg-error libassuan" -RDEPENDS_${PN}-cpp += "libstdc++" - -RDEPENDS_python2-gpg += "python-unixadmin" -RDEPENDS_python3-gpg += "python3-unixadmin" - -BINCONFIG = "${bindir}/gpgme-config" - -# Note select python2 or python3, but you can't select both at the same time -PACKAGECONFIG ??= "python3" -PACKAGECONFIG[python2] = ",,python swig-native," -PACKAGECONFIG[python3] = ",,python3 swig-native," - -# Default in configure.ac: "cl cpp python qt" -# Supported: "cl cpp python python2 python3 qt" -# python says 'search and find python2 or python3' - -# Building the C++ bindings for native requires a C++ compiler with C++11 -# support. Since these bindings are currently not needed, we can disable them. -DEFAULT_LANGUAGES = "" -DEFAULT_LANGUAGES_class-target = "cpp" -LANGUAGES ?= "${DEFAULT_LANGUAGES} python" - -PYTHON_INHERIT = "${@bb.utils.contains('PACKAGECONFIG', 'python2', 'pythonnative', '', d)}" -PYTHON_INHERIT .= "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native', '', d)}" - -EXTRA_OECONF += '--enable-languages="${LANGUAGES}" \ - --disable-gpgconf-test \ - --disable-gpg-test \ - --disable-gpgsm-test \ - --disable-g13-test \ - --disable-lang-python-test \ -' - -inherit autotools texinfo binconfig-disabled pkgconfig distutils-common-base ${PYTHON_INHERIT} multilib_header - -export PKG_CONFIG='pkg-config' - -BBCLASSEXTEND = "native nativesdk" - -PACKAGES =+ "${PN}-cpp" -PACKAGES =. "${@bb.utils.contains('PACKAGECONFIG', 'python2', 'python2-gpg ', '', d)}" -PACKAGES =. "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-gpg ', '', d)}" - -FILES_${PN}-cpp = "${libdir}/libgpgmepp.so.*" -FILES_python2-gpg = "${PYTHON_SITEPACKAGES_DIR}/*" -FILES_python3-gpg = "${PYTHON_SITEPACKAGES_DIR}/*" -FILES_${PN}-dev += "${datadir}/common-lisp/source/gpgme/* \ - ${libdir}/cmake/* \ -" - -CFLAGS_append_libc-musl = " -D__error_t_defined " -do_configure_prepend () { - # Else these could be used in preference to those in aclocal-copy - rm -f ${S}/m4/gpg-error.m4 - rm -f ${S}/m4/libassuan.m4 - rm -f ${S}/m4/python.m4 -} - -do_install_append() { - oe_multilib_header gpgme.h -} diff --git a/poky/meta/recipes-support/gpgme/gpgme_1.14.0.bb b/poky/meta/recipes-support/gpgme/gpgme_1.14.0.bb new file mode 100644 index 000000000..9fa821280 --- /dev/null +++ b/poky/meta/recipes-support/gpgme/gpgme_1.14.0.bb @@ -0,0 +1,88 @@ +SUMMARY = "High-level GnuPG encryption/signing API" +DESCRIPTION = "GnuPG Made Easy (GPGME) is a library designed to make access to GnuPG easier for applications. It provides a High-Level Crypto API for encryption, decryption, signing, signature verification and key management" +HOMEPAGE = "http://www.gnupg.org/gpgme.html" +BUGTRACKER = "https://bugs.g10code.com/gnupg/index" + +LICENSE = "GPLv2+ & LGPLv2.1+" +LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ + file://COPYING.LESSER;md5=bbb461211a33b134d42ed5ee802b37ff \ + file://src/gpgme.h.in;endline=23;md5=2f0bf06d1c7dcb28532a9d0f94a7ca1d \ + file://src/engine.h;endline=22;md5=4b6d8ba313d9b564cc4d4cfb1640af9d" + +UPSTREAM_CHECK_URI = "https://gnupg.org/download/index.html" +SRC_URI = "${GNUPG_MIRROR}/gpgme/${BP}.tar.bz2 \ + file://0001-Revert-build-Make-gpgme.m4-use-gpgrt-config-with-.pc.patch \ + file://0001-pkgconfig.patch \ + file://0002-gpgme-lang-python-gpg-error-config-should-not-be-use.patch \ + file://0003-Correctly-install-python-modules.patch \ + file://0004-python-import.patch \ + file://0005-gpgme-config-skip-all-lib-or-usr-lib-directories-in-.patch \ + file://0006-fix-build-path-issue.patch \ + file://0007-python-Add-variables-to-tests.patch \ + file://0008-do-not-auto-check-var-PYTHON.patch \ + " + +SRC_URI[sha256sum] = "cef1f710a6b0d28f5b44242713ad373702d1466dcbe512eb4e754d7f35cd4307" + +DEPENDS = "libgpg-error libassuan" +RDEPENDS_${PN}-cpp += "libstdc++" + +RDEPENDS_python2-gpg += "python-unixadmin" +RDEPENDS_python3-gpg += "python3-unixadmin" + +BINCONFIG = "${bindir}/gpgme-config" + +# Note select python2 or python3, but you can't select both at the same time +PACKAGECONFIG ??= "python3" +PACKAGECONFIG[python2] = ",,python swig-native," +PACKAGECONFIG[python3] = ",,python3 swig-native," + +# Default in configure.ac: "cl cpp python qt" +# Supported: "cl cpp python python2 python3 qt" +# python says 'search and find python2 or python3' + +# Building the C++ bindings for native requires a C++ compiler with C++11 +# support. Since these bindings are currently not needed, we can disable them. +DEFAULT_LANGUAGES = "" +DEFAULT_LANGUAGES_class-target = "cpp" +LANGUAGES ?= "${DEFAULT_LANGUAGES} python" + +PYTHON_INHERIT = "${@bb.utils.contains('PACKAGECONFIG', 'python2', 'pythonnative', '', d)}" +PYTHON_INHERIT .= "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3native', '', d)}" + +EXTRA_OECONF += '--enable-languages="${LANGUAGES}" \ + --disable-gpgconf-test \ + --disable-gpg-test \ + --disable-gpgsm-test \ + --disable-g13-test \ + --disable-lang-python-test \ +' + +inherit autotools texinfo binconfig-disabled pkgconfig distutils-common-base ${PYTHON_INHERIT} multilib_header + +export PKG_CONFIG='pkg-config' + +BBCLASSEXTEND = "native nativesdk" + +PACKAGES =+ "${PN}-cpp" +PACKAGES =. "${@bb.utils.contains('PACKAGECONFIG', 'python2', 'python2-gpg ', '', d)}" +PACKAGES =. "${@bb.utils.contains('PACKAGECONFIG', 'python3', 'python3-gpg ', '', d)}" + +FILES_${PN}-cpp = "${libdir}/libgpgmepp.so.*" +FILES_python2-gpg = "${PYTHON_SITEPACKAGES_DIR}/*" +FILES_python3-gpg = "${PYTHON_SITEPACKAGES_DIR}/*" +FILES_${PN}-dev += "${datadir}/common-lisp/source/gpgme/* \ + ${libdir}/cmake/* \ +" + +CFLAGS_append_libc-musl = " -D__error_t_defined " +do_configure_prepend () { + # Else these could be used in preference to those in aclocal-copy + rm -f ${S}/m4/gpg-error.m4 + rm -f ${S}/m4/libassuan.m4 + rm -f ${S}/m4/python.m4 +} + +do_install_append() { + oe_multilib_header gpgme.h +} diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.10.bb b/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.10.bb deleted file mode 100644 index 43f76dc56..000000000 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.10.bb +++ /dev/null @@ -1,29 +0,0 @@ -require libcap-ng.inc - -FILESEXTRAPATHS_prepend := "${THISDIR}/libcap-ng:" - -SUMMARY .= " - python" - -inherit lib_package autotools python3native - -DEPENDS += "libcap-ng python3 swig-native" - -S = "${WORKDIR}/libcap-ng-${PV}" - -EXTRA_OECONF += "--with-python --with-python3" -EXTRA_OEMAKE += "PYLIBVER='python${PYTHON_BASEVERSION}${PYTHON_ABI}' PYINC='${STAGING_INCDIR}/${PYLIBVER}'" - -do_install_append() { - rm -rf ${D}${bindir} - rm -rf ${D}${libdir}/.debug - rm -f ${D}${libdir}/lib* - rm -rf ${D}${libdir}/pkgconfig - rm -rf ${D}${datadir} - rm -rf ${D}${includedir} -} - -# PACKAGES = "${PN}" - -FILES_${PN} = "${libdir}/python${PYTHON_BASEVERSION}" -FILES_${PN}-dbg =+ "${PYTHON_SITEPACKAGES_DIR}/.debug/_capng.so" - diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.11.bb b/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.11.bb new file mode 100644 index 000000000..43f76dc56 --- /dev/null +++ b/poky/meta/recipes-support/libcap-ng/libcap-ng-python_0.7.11.bb @@ -0,0 +1,29 @@ +require libcap-ng.inc + +FILESEXTRAPATHS_prepend := "${THISDIR}/libcap-ng:" + +SUMMARY .= " - python" + +inherit lib_package autotools python3native + +DEPENDS += "libcap-ng python3 swig-native" + +S = "${WORKDIR}/libcap-ng-${PV}" + +EXTRA_OECONF += "--with-python --with-python3" +EXTRA_OEMAKE += "PYLIBVER='python${PYTHON_BASEVERSION}${PYTHON_ABI}' PYINC='${STAGING_INCDIR}/${PYLIBVER}'" + +do_install_append() { + rm -rf ${D}${bindir} + rm -rf ${D}${libdir}/.debug + rm -f ${D}${libdir}/lib* + rm -rf ${D}${libdir}/pkgconfig + rm -rf ${D}${datadir} + rm -rf ${D}${includedir} +} + +# PACKAGES = "${PN}" + +FILES_${PN} = "${libdir}/python${PYTHON_BASEVERSION}" +FILES_${PN}-dbg =+ "${PYTHON_SITEPACKAGES_DIR}/.debug/_capng.so" + diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng.inc b/poky/meta/recipes-support/libcap-ng/libcap-ng.inc index 002915ad4..6d6fa644a 100644 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng.inc +++ b/poky/meta/recipes-support/libcap-ng/libcap-ng.inc @@ -9,11 +9,9 @@ LIC_FILES_CHKSUM = "file://COPYING;md5=94d55d512a9ba36caa9b7df079bae19f \ SRC_URI = "http://people.redhat.com/sgrubb/libcap-ng/libcap-ng-${PV}.tar.gz \ file://python.patch \ - file://0001-configure.ac-add-library-if-header-found.patch \ - file://0002-Wrap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch \ " -SRC_URI[md5sum] = "57dc267e2949cdecb651a929f9206572" -SRC_URI[sha256sum] = "a84ca7b4e0444283ed269b7a29f5b6187f647c82e2b876636b49b9a744f0ffbf" +SRC_URI[md5sum] = "5883ed10b621c87e29a05cff36d2928e" +SRC_URI[sha256sum] = "85815c711862d01a440db471f12fba462c9949e923966f5859607e652d9c0ae9" BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng/0001-configure.ac-add-library-if-header-found.patch b/poky/meta/recipes-support/libcap-ng/libcap-ng/0001-configure.ac-add-library-if-header-found.patch deleted file mode 100644 index 6b155ccea..000000000 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng/0001-configure.ac-add-library-if-header-found.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 0230e2e374bb71aed0181ccd9ebd13c0c5125a5d Mon Sep 17 00:00:00 2001 -From: Trevor Woerner -Date: Fri, 25 Oct 2019 17:01:20 -0400 -Subject: [PATCH] configure.ac: add library if header found - -If the pthread.h header is found, make sure library containing -"pthread_atfork" is added to the list of libraries against which to link. -On some hosts (e.g. openSUSE 15.1) "-lpthread" needs to be explicitly added -in order for the code to compile correctly. - -Upstream-Status: Submitted [https://github.com/stevegrubb/libcap-ng/pull/10] -Signed-off-by: Trevor Woerner ---- - configure.ac | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/configure.ac b/configure.ac -index 63088f4..639b464 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -56,7 +56,9 @@ AC_CHECK_HEADERS(sys/xattr.h, [], [ - AC_CHECK_HEADERS(attr/xattr.h, [], [AC_MSG_WARN(attr/xattr.h not found, disabling file system capabilities.)]) - ]) - AC_CHECK_HEADERS(linux/securebits.h, [], []) --AC_CHECK_HEADERS(pthread.h, [], [AC_MSG_WARN(pthread.h not found, disabling pthread_atfork.)]) -+AC_CHECK_HEADERS(pthread.h, -+ [AC_SEARCH_LIBS(pthread_atfork, pthread)], -+ [AC_MSG_WARN(pthread.h not found, disabling pthread_atfork.)]) - - AC_C_CONST - AC_C_INLINE diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng/0002-Wrap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch b/poky/meta/recipes-support/libcap-ng/libcap-ng/0002-Wrap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch deleted file mode 100644 index c68254ff3..000000000 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng/0002-Wrap-pthread_atfork-usage-in-HAVE_PTHREAD_H.patch +++ /dev/null @@ -1,25 +0,0 @@ -From d95c4018ad57c37f6272dbedfa5217776567c329 Mon Sep 17 00:00:00 2001 -From: Christopher Larson -Date: Tue, 26 Nov 2019 22:34:34 +0500 -Subject: [PATCH] Wrap pthread_atfork usage in HAVE_PTHREAD_H - -Upstream-Status: Pending -Signed-off-by: Christopher Larson ---- - src/cap-ng.c | 2 ++ - 1 file changed, 2 insertions(+) - -diff --git a/src/cap-ng.c b/src/cap-ng.c -index 35fcd7a..97a3dbd 100644 ---- a/src/cap-ng.c -+++ b/src/cap-ng.c -@@ -166,7 +166,9 @@ static void deinit(void) - static void init_lib(void) __attribute__ ((constructor)); - static void init_lib(void) - { -+#ifdef HAVE_PTHREAD_H - pthread_atfork(NULL, NULL, deinit); -+#endif - } - - static void init(void) diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng/python.patch b/poky/meta/recipes-support/libcap-ng/libcap-ng/python.patch index d60a0a39b..fcd6f9cd3 100644 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng/python.patch +++ b/poky/meta/recipes-support/libcap-ng/libcap-ng/python.patch @@ -1,6 +1,3 @@ -From b4a354ae8d4f7c2ec3ec421c7d8a790cc57e77a9 Mon Sep 17 00:00:00 2001 -From: Li xin -Date: Sat, 18 Jul 2015 23:03:30 +0900 Subject: [PATCH] configure.ac - Avoid an incorrect check for python. Makefile.am - avoid hard coded host include paths. @@ -9,6 +6,8 @@ Upstream-Status: pending Signed-off-by: Mark Hatle Signed-off-by: Li Xin Signed-off-by: Yi Zhao +Signed-off-by: Zang Ruochen + --- bindings/python/Makefile.am | 4 +++- configure.ac | 17 ++--------------- @@ -30,12 +29,12 @@ index 999b184..c8e49db 100644 SWIG_INCLUDES = ${AM_CPPFLAGS} pyexec_PYTHON = capng.py diff --git a/configure.ac b/configure.ac -index 7f66179..079d026 100644 +index 8b46f51..2d7e00c 100644 --- a/configure.ac +++ b/configure.ac -@@ -123,21 +123,8 @@ if test x$use_python = xno ; then - else - AC_MSG_RESULT(testing) +@@ -141,21 +141,8 @@ fi + + # Setup Python2 with the interpreter found previously. AM_PATH_PYTHON -PYINCLUDEDIR=`python${am_cv_python_version} -c "from distutils import sysconfig; print(sysconfig.get_config_var('INCLUDEPY'))"` -if test -f ${PYINCLUDEDIR}/Python.h ; then @@ -46,7 +45,7 @@ index 7f66179..079d026 100644 - AC_MSG_NOTICE(Python bindings will be built) -else - python_found="no" -- if test x$use_python = xyes ; then +- if test "x$use_python" = xyes ; then - AC_MSG_ERROR([Python explicitly requested and python headers were not found]) - else - AC_MSG_WARN("Python headers not found - python bindings will not be made") @@ -58,5 +57,4 @@ index 7f66179..079d026 100644 AM_CONDITIONAL(HAVE_PYTHON, test ${python_found} = "yes") -- -2.7.4 - +2.25.1 diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.10.bb b/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.10.bb deleted file mode 100644 index 6e6de4549..000000000 --- a/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.10.bb +++ /dev/null @@ -1,17 +0,0 @@ -require libcap-ng.inc - -inherit lib_package autotools - -EXTRA_OECONF += "--without-python --without-python3" - -BBCLASSEXTEND = "native nativesdk" - -do_install_append() { - # Moving libcap-ng to base_libdir - if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then - mkdir -p ${D}/${base_libdir}/ - mv -f ${D}${libdir}/libcap-ng.so.* ${D}${base_libdir}/ - relpath=${@os.path.relpath("${base_libdir}", "${libdir}")} - ln -sf ${relpath}/libcap-ng.so.0.0.0 ${D}${libdir}/libcap-ng.so - fi -} diff --git a/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.11.bb b/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.11.bb new file mode 100644 index 000000000..6e6de4549 --- /dev/null +++ b/poky/meta/recipes-support/libcap-ng/libcap-ng_0.7.11.bb @@ -0,0 +1,17 @@ +require libcap-ng.inc + +inherit lib_package autotools + +EXTRA_OECONF += "--without-python --without-python3" + +BBCLASSEXTEND = "native nativesdk" + +do_install_append() { + # Moving libcap-ng to base_libdir + if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then + mkdir -p ${D}/${base_libdir}/ + mv -f ${D}${libdir}/libcap-ng.so.* ${D}${base_libdir}/ + relpath=${@os.path.relpath("${base_libdir}", "${libdir}")} + ln -sf ${relpath}/libcap-ng.so.0.0.0 ${D}${libdir}/libcap-ng.so + fi +} diff --git a/poky/meta/recipes-support/libcap/libcap_2.42.bb b/poky/meta/recipes-support/libcap/libcap_2.42.bb deleted file mode 100644 index 48f8f9262..000000000 --- a/poky/meta/recipes-support/libcap/libcap_2.42.bb +++ /dev/null @@ -1,76 +0,0 @@ -SUMMARY = "Library for getting/setting POSIX.1e capabilities" -HOMEPAGE = "http://sites.google.com/site/fullycapable/" - -# no specific GPL version required -LICENSE = "BSD | GPLv2" -LIC_FILES_CHKSUM = "file://License;md5=3f84fd6f29d453a56514cb7e4ead25f1" - -DEPENDS = "hostperl-runtime-native gperf-native" - -SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${PV}.tar.xz \ - file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \ - file://0002-tests-do-not-run-target-executables.patch \ - file://0001-tests-do-not-statically-link-a-test.patch \ - " -SRC_URI[sha256sum] = "3605a9cb60076547ea9f64989e0ba576da9508e4653e8dc40ae54c0d6f443dfd" - -UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/linux/libs/security/linux-privs/${BPN}2/" - -inherit lib_package - -# do NOT pass target cflags to host compilations -# -do_configure() { - # libcap uses := for compilers, fortunately, it gives us a hint - # on what should be replaced with ?= - sed -e 's,:=,?=,g' -i Make.Rules - sed -e 's,^BUILD_CFLAGS ?= ,BUILD_CFLAGS := $(BUILD_CFLAGS) ,' -i Make.Rules -} - -PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" -PACKAGECONFIG_class-native ??= "" - -PACKAGECONFIG[pam] = "PAM_CAP=yes,PAM_CAP=no,libpam" - -EXTRA_OEMAKE = " \ - INDENT= \ - lib='${baselib}' \ - RAISE_SETFCAP=no \ - DYNAMIC=yes \ - BUILD_GPERF=yes \ -" - -EXTRA_OEMAKE_append_class-target = " SYSTEM_HEADERS=${STAGING_INCDIR}" - -# these are present in the libcap defaults, so include in our CFLAGS too -CFLAGS += "-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64" - -do_compile() { - oe_runmake ${PACKAGECONFIG_CONFARGS} -} - -do_install() { - oe_runmake install \ - ${PACKAGECONFIG_CONFARGS} \ - DESTDIR="${D}" \ - prefix="${prefix}" \ - SBINDIR="${sbindir}" -} - -do_install_append() { - # Move the library to base_libdir - install -d ${D}${base_libdir} - if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then - mv ${D}${libdir}/libcap* ${D}${base_libdir} - if [ -d ${D}${libdir}/security ]; then - mv ${D}${libdir}/security ${D}${base_libdir} - fi - fi -} - -FILES_${PN}-dev += "${base_libdir}/*.so" - -# pam files -FILES_${PN} += "${base_libdir}/security/*.so" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libcap/libcap_2.43.bb b/poky/meta/recipes-support/libcap/libcap_2.43.bb new file mode 100644 index 000000000..c712c2594 --- /dev/null +++ b/poky/meta/recipes-support/libcap/libcap_2.43.bb @@ -0,0 +1,76 @@ +SUMMARY = "Library for getting/setting POSIX.1e capabilities" +HOMEPAGE = "http://sites.google.com/site/fullycapable/" + +# no specific GPL version required +LICENSE = "BSD | GPLv2" +LIC_FILES_CHKSUM = "file://License;md5=3f84fd6f29d453a56514cb7e4ead25f1" + +DEPENDS = "hostperl-runtime-native gperf-native" + +SRC_URI = "${KERNELORG_MIRROR}/linux/libs/security/linux-privs/${BPN}2/${BPN}-${PV}.tar.xz \ + file://0001-ensure-the-XATTR_NAME_CAPS-is-defined-when-it-is-use.patch \ + file://0002-tests-do-not-run-target-executables.patch \ + file://0001-tests-do-not-statically-link-a-test.patch \ + " +SRC_URI[sha256sum] = "512a0e5fc4c1e06d472a20da26aa96a9b9bf2a26b23f094f77f1b8da56cc427f" + +UPSTREAM_CHECK_URI = "https://www.kernel.org/pub/linux/libs/security/linux-privs/${BPN}2/" + +inherit lib_package + +# do NOT pass target cflags to host compilations +# +do_configure() { + # libcap uses := for compilers, fortunately, it gives us a hint + # on what should be replaced with ?= + sed -e 's,:=,?=,g' -i Make.Rules + sed -e 's,^BUILD_CFLAGS ?= ,BUILD_CFLAGS := $(BUILD_CFLAGS) ,' -i Make.Rules +} + +PACKAGECONFIG ??= "${@bb.utils.filter('DISTRO_FEATURES', 'pam', d)}" +PACKAGECONFIG_class-native ??= "" + +PACKAGECONFIG[pam] = "PAM_CAP=yes,PAM_CAP=no,libpam" + +EXTRA_OEMAKE = " \ + INDENT= \ + lib='${baselib}' \ + RAISE_SETFCAP=no \ + DYNAMIC=yes \ + BUILD_GPERF=yes \ +" + +EXTRA_OEMAKE_append_class-target = " SYSTEM_HEADERS=${STAGING_INCDIR}" + +# these are present in the libcap defaults, so include in our CFLAGS too +CFLAGS += "-D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64" + +do_compile() { + oe_runmake ${PACKAGECONFIG_CONFARGS} +} + +do_install() { + oe_runmake install \ + ${PACKAGECONFIG_CONFARGS} \ + DESTDIR="${D}" \ + prefix="${prefix}" \ + SBINDIR="${sbindir}" +} + +do_install_append() { + # Move the library to base_libdir + install -d ${D}${base_libdir} + if [ ! ${D}${libdir} -ef ${D}${base_libdir} ]; then + mv ${D}${libdir}/libcap* ${D}${base_libdir} + if [ -d ${D}${libdir}/security ]; then + mv ${D}${libdir}/security ${D}${base_libdir} + fi + fi +} + +FILES_${PN}-dev += "${base_libdir}/*.so" + +# pam files +FILES_${PN} += "${base_libdir}/security/*.so" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb b/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb deleted file mode 100644 index 177c0edc8..000000000 --- a/poky/meta/recipes-support/libcheck/libcheck_0.15.0.bb +++ /dev/null @@ -1,28 +0,0 @@ -SUMMARY = "Check - unit testing framework for C code" -HOMEPAGE = "https://libcheck.github.io/check/" -SECTION = "devel" - -LICENSE = "LGPLv2.1+" -LIC_FILES_CHKSUM = "file://COPYING.LESSER;md5=2d5025d4aa3495befef8f17206a5b0a1" - -SRC_URI = "https://github.com/${BPN}/check/releases/download/${PV}/check-${PV}.tar.gz \ - file://not-echo-compiler-info-to-check_stdint.h.patch" -SRC_URI[sha256sum] = "aea2e3c68fa6e1e92378e744b1c0db350ccda4b6bd0d19530d0ae185b3d1ac60" -UPSTREAM_CHECK_URI = "https://github.com/libcheck/check/releases/" - -S = "${WORKDIR}/check-${PV}" - -inherit autotools pkgconfig texinfo - -CACHED_CONFIGUREVARS += "ac_cv_path_AWK_PATH=${bindir}/gawk" - -RREPLACES_${PN} = "check (<= 0.9.5)" - -BBCLASSEXTEND = "native nativesdk" - -PACKAGES =+ "checkmk" - -FILES_checkmk = "${bindir}/checkmk" - -RDEPENDS_checkmk = "gawk" - diff --git a/poky/meta/recipes-support/libcheck/libcheck_0.15.2.bb b/poky/meta/recipes-support/libcheck/libcheck_0.15.2.bb new file mode 100644 index 000000000..33e07db93 --- /dev/null +++ b/poky/meta/recipes-support/libcheck/libcheck_0.15.2.bb @@ -0,0 +1,28 @@ +SUMMARY = "Check - unit testing framework for C code" +HOMEPAGE = "https://libcheck.github.io/check/" +SECTION = "devel" + +LICENSE = "LGPLv2.1+" +LIC_FILES_CHKSUM = "file://COPYING.LESSER;md5=2d5025d4aa3495befef8f17206a5b0a1" + +SRC_URI = "https://github.com/${BPN}/check/releases/download/${PV}/check-${PV}.tar.gz \ + file://not-echo-compiler-info-to-check_stdint.h.patch" +SRC_URI[sha256sum] = "a8de4e0bacfb4d76dd1c618ded263523b53b85d92a146d8835eb1a52932fa20a" +UPSTREAM_CHECK_URI = "https://github.com/libcheck/check/releases/" + +S = "${WORKDIR}/check-${PV}" + +inherit autotools pkgconfig texinfo + +CACHED_CONFIGUREVARS += "ac_cv_path_AWK_PATH=${bindir}/gawk" + +RREPLACES_${PN} = "check (<= 0.9.5)" + +BBCLASSEXTEND = "native nativesdk" + +PACKAGES =+ "checkmk" + +FILES_checkmk = "${bindir}/checkmk" + +RDEPENDS_checkmk = "gawk" + diff --git a/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch b/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch deleted file mode 100644 index 20610bee5..000000000 --- a/poky/meta/recipes-support/libgpg-error/libgpg-error/0003-build-Fix-cross-compiling-into-a-separate-build-dir.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 6efe006e99a7e739afbf7fe8937445c82630fc8f Mon Sep 17 00:00:00 2001 -From: David Michael -Date: Mon, 1 Jun 2020 10:24:53 -0400 -Subject: [PATCH] build: Fix cross-compiling into a separate build dir. - -* configure.ac: Create the src directory before writing into it. -* src/Makefile.am (EXTRA_DIST): Add gen-lock-obj.sh. - --- - -Upstream-Status: Backport -Signed-off-by: David Michael -Signed-off-by: Alexander Kanavin ---- - configure.ac | 1 + - src/Makefile.am | 2 +- - 2 files changed, 2 insertions(+), 1 deletion(-) - -diff --git a/configure.ac b/configure.ac -index def8bba..8c0d845 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -598,6 +598,7 @@ if test x$cross_compiling = xyes; then - case $host in - *-*-linux*) - lock_obj_h_generated=yes -+ mkdir src - LOCK_ABI_VERSION=1 host=$host host_alias=$host_alias \ - CC=$CC OBJDUMP=$host_alias-objdump \ - ac_ext=$ac_ext ac_objext=$ac_objext \ -diff --git a/src/Makefile.am b/src/Makefile.am -index 2fb83c0..d773877 100644 ---- a/src/Makefile.am -+++ b/src/Makefile.am -@@ -102,7 +102,7 @@ EXTRA_DIST = mkstrtable.awk err-sources.h.in err-codes.h.in \ - gpg-error.vers gpg-error.def.in \ - versioninfo.rc.in gpg-error.w32-manifest.in \ - gpg-error-config-test.sh gpg-error.pc.in \ -- $(lock_obj_pub) -+ gen-lock-obj.sh $(lock_obj_pub) - - BUILT_SOURCES = $(srcdir)/err-sources.h $(srcdir)/err-codes.h \ - code-to-errno.h code-from-errno.h \ diff --git a/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch b/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch deleted file mode 100644 index e6f6c09ba..000000000 --- a/poky/meta/recipes-support/libgpg-error/libgpg-error/0005-src-gen-lock-obj.sh-add-a-file.patch +++ /dev/null @@ -1,134 +0,0 @@ -From fcb414abb62223e66dba413d0ca86eab3ea5bbc3 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Sun, 21 Jun 2020 13:54:47 +0000 -Subject: [PATCH] src-gen-lock-obj.sh: add a file - -This is erroneously missing from the tarball; it will show -up in the next release tarball, as upstream has fixed the -packaging in master. - -Upstream-Status: Inappropriate -Signed-off-by: Alexander Kanavin ---- - src/gen-lock-obj.sh | 112 ++++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 112 insertions(+) - create mode 100755 src/gen-lock-obj.sh - -diff --git a/src/gen-lock-obj.sh b/src/gen-lock-obj.sh -new file mode 100755 -index 0000000..13858cf ---- /dev/null -+++ b/src/gen-lock-obj.sh -@@ -0,0 +1,112 @@ -+#! /bin/sh -+# -+# gen-lock-obj.sh - Build tool to construct the lock object. -+# -+# Copyright (C) 2020 g10 Code GmbH -+# -+# This file is part of libgpg-error. -+# -+# libgpg-error is free software; you can redistribute it and/or -+# modify it under the terms of the GNU Lesser General Public License -+# as published by the Free Software Foundation; either version 2.1 of -+# the License, or (at your option) any later version. -+# -+# libgpg-error is distributed in the hope that it will be useful, but -+# WITHOUT ANY WARRANTY; without even the implied warranty of -+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+# Lesser General Public License for more details. -+# -+# You should have received a copy of the GNU Lesser General Public -+# License along with this program; if not, see . -+# -+ -+# -+# Following variables should be defined to invoke this script -+# -+# CC -+# OBJDUMP -+# AWK -+# ac_ext -+# ac_object -+# host -+# LOCK_ABI_VERSION -+# -+# An example: -+# -+# LOCK_ABI_VERSION=1 host=x86_64-pc-linux-gnu host_alias=x86_64-linux-gnu \ -+# CC=$host_alias-gcc OBJDUMP=$host_alias-objdump ac_ext=c ac_objext=o \ -+# AWK=gawk ./gen-lock-obj.sh -+# -+ -+AWK_VERSION_OUTPUT=$($AWK 'BEGIN { print PROCINFO["version"] }') -+if test -n "$AWK_VERSION_OUTPUT"; then -+ # It's GNU awk, which supports PROCINFO. -+ AWK_OPTION=--non-decimal-data -+fi -+ -+cat <<'EOF' >conftest.$ac_ext -+#include -+pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; -+EOF -+ -+if $CC -c conftest.$ac_ext; then : -+ ac_mtx_size=$($OBJDUMP -j .bss -t conftest.$ac_objext \ -+ | $AWK $AWK_OPTION ' -+/mtx$/ { mtx_size = int("0x" $5) } -+END { print mtx_size }') -+else -+ echo "Can't determine mutex size" -+ exit 1 -+fi -+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -+ -+cat < -Date: Thu, 11 Jul 2019 17:47:11 +0100 -Subject: [PATCH] Makefile.am: use PYTHON when invoking psl-make-dafsa - -In an environment where only Python 3 is installed, configure.ac finds and sets -PYTHON=python3 correctly but src/psl-make-dafsa is called directly, so the hashbang of -`#!/usr/bin/env python` is used which doesn't exist. - -Fix this by explicitly running $(PYTHON) when using the tool. - -Upstream-Status: Backport [https://github.com/rockdaboot/libpsl/commit/b4fec5d0ddb70fc4f5360eb14f2f5c5e91194333] -Signed-off-by: Alexander Kanavin ---- - src/Makefile.am | 2 +- - tests/Makefile.am | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - -diff --git a/src/Makefile.am b/src/Makefile.am -index f9c0a3d..a05ea05 100644 ---- a/src/Makefile.am -+++ b/src/Makefile.am -@@ -19,7 +19,7 @@ libpsl_la_LDFLAGS = -no-undefined -version-info $(LIBPSL_SO_VERSION) - # Build rule for suffix_dafsa.c - # PSL_FILE can be set by ./configure --with-psl-file=[PATH] - suffixes_dafsa.h: $(PSL_FILE) $(srcdir)/psl-make-dafsa -- $(srcdir)/psl-make-dafsa --output-format=cxx+ "$(PSL_FILE)" suffixes_dafsa.h -+ $(PYTHON) $(srcdir)/psl-make-dafsa --output-format=cxx+ "$(PSL_FILE)" suffixes_dafsa.h - - EXTRA_DIST = psl-make-dafsa LICENSE.chromium - -diff --git a/tests/Makefile.am b/tests/Makefile.am -index 8b29b61..28a9a0b 100644 ---- a/tests/Makefile.am -+++ b/tests/Makefile.am -@@ -34,9 +34,9 @@ TESTS = $(PSL_TESTS) - # check-local target works in parallel to the tests, so the test suite will likely fail - BUILT_SOURCES = psl.dafsa psl_ascii.dafsa - psl.dafsa: $(top_srcdir)/list/public_suffix_list.dat -- $(top_srcdir)/src/psl-make-dafsa --output-format=binary "$(PSL_FILE)" psl.dafsa -+ $(PYTHON) $(top_srcdir)/src/psl-make-dafsa --output-format=binary "$(PSL_FILE)" psl.dafsa - psl_ascii.dafsa: $(top_srcdir)/list/public_suffix_list.dat -- $(top_srcdir)/src/psl-make-dafsa --output-format=binary --encoding=ascii "$(PSL_FILE)" psl_ascii.dafsa -+ $(PYTHON) $(top_srcdir)/src/psl-make-dafsa --output-format=binary --encoding=ascii "$(PSL_FILE)" psl_ascii.dafsa - - clean-local: - rm -f psl.dafsa psl_ascii.dafsa --- -2.17.1 - diff --git a/poky/meta/recipes-support/libpsl/libpsl/0001-gtk-doc-do-not-include-tree_index.sgml.patch b/poky/meta/recipes-support/libpsl/libpsl/0001-gtk-doc-do-not-include-tree_index.sgml.patch deleted file mode 100644 index c78d6fd98..000000000 --- a/poky/meta/recipes-support/libpsl/libpsl/0001-gtk-doc-do-not-include-tree_index.sgml.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 8691105e1808ec9b031b6c25f70204a0908ae9c1 Mon Sep 17 00:00:00 2001 -From: Alexander Kanavin -Date: Sat, 1 Jun 2019 13:09:41 +0200 -Subject: [PATCH] gtk-doc: do not include tree_index.sgml - -gtk-doc 1.30 no longer generates the file if the object tree is empty - -Upstream-Status: Backport [87d1add318b5e5d09977f7f374e923577b6ff3be] -Signed-off-by: Alexander Kanavin ---- - docs/libpsl/libpsl-docs.sgml | 4 ---- - 1 file changed, 4 deletions(-) - -diff --git a/docs/libpsl/libpsl-docs.sgml b/docs/libpsl/libpsl-docs.sgml -index 1eca9a2..1c38dbc 100644 ---- a/docs/libpsl/libpsl-docs.sgml -+++ b/docs/libpsl/libpsl-docs.sgml -@@ -22,10 +22,6 @@ - - - -- -- Object Hierarchy -- -- - - API Index - diff --git a/poky/meta/recipes-support/libpsl/libpsl_0.21.0.bb b/poky/meta/recipes-support/libpsl/libpsl_0.21.0.bb deleted file mode 100644 index 9831b4b94..000000000 --- a/poky/meta/recipes-support/libpsl/libpsl_0.21.0.bb +++ /dev/null @@ -1,23 +0,0 @@ -SUMMARY = "Public Suffix List library" - -LICENSE = "MIT" -LIC_FILES_CHKSUM = "file://LICENSE;md5=5437030d9e4fbe7267ced058ddb8a7f5 \ - file://COPYING;md5=f41d10997a12da5ee3c24ceeb0148d18" - -SRC_URI = "https://github.com/rockdaboot/${BPN}/releases/download/${BP}/${BP}.tar.gz \ - file://0001-gtk-doc-do-not-include-tree_index.sgml.patch \ - file://0001-Makefile.am-use-PYTHON-when-invoking-psl-make-dafsa.patch \ - " -SRC_URI[md5sum] = "171e96d887709e36a57f4ee627bf82d2" -SRC_URI[sha256sum] = "41bd1c75a375b85c337b59783f5deb93dbb443fb0a52d257f403df7bd653ee12" - -UPSTREAM_CHECK_URI = "https://github.com/rockdaboot/libpsl/releases" - -DEPENDS = "libidn2" - -inherit autotools gettext gtk-doc manpages pkgconfig lib_package - -PACKAGECONFIG ??= "" -PACKAGECONFIG[manpages] = "--enable-man,--disable-man,libxslt-native" - -BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/libpsl/libpsl_0.21.1.bb b/poky/meta/recipes-support/libpsl/libpsl_0.21.1.bb new file mode 100644 index 000000000..0a48db65c --- /dev/null +++ b/poky/meta/recipes-support/libpsl/libpsl_0.21.1.bb @@ -0,0 +1,20 @@ +SUMMARY = "Public Suffix List library" + +LICENSE = "MIT" +LIC_FILES_CHKSUM = "file://LICENSE;md5=5437030d9e4fbe7267ced058ddb8a7f5 \ + file://COPYING;md5=f41d10997a12da5ee3c24ceeb0148d18" + +SRC_URI = "https://github.com/rockdaboot/${BPN}/releases/download/${PV}/${BP}.tar.gz \ + " +SRC_URI[sha256sum] = "ac6ce1e1fbd4d0254c4ddb9d37f1fa99dec83619c1253328155206b896210d4c" + +UPSTREAM_CHECK_URI = "https://github.com/rockdaboot/libpsl/releases" + +DEPENDS = "libidn2" + +inherit autotools gettext gtk-doc manpages pkgconfig lib_package + +PACKAGECONFIG ??= "" +PACKAGECONFIG[manpages] = "--enable-man,--disable-man,libxslt-native" + +BBCLASSEXTEND = "native nativesdk" diff --git a/poky/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb b/poky/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb deleted file mode 100644 index 4ba93f998..000000000 --- a/poky/meta/recipes-support/p11-kit/p11-kit_0.23.20.bb +++ /dev/null @@ -1,29 +0,0 @@ -SUMMARY = "Provides a way to load and enumerate PKCS#11 modules" -LICENSE = "BSD-3-Clause" -LIC_FILES_CHKSUM = "file://COPYING;md5=02933887f609807fbb57aa4237d14a50" - -inherit meson gettext pkgconfig gtk-doc bash-completion - -DEPENDS = "libtasn1 libtasn1-native libffi" - -DEPENDS_append = "${@' glib-2.0' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}" - -SRC_URI = "git://github.com/p11-glue/p11-kit" -SRCREV = "762cdaa2cd5c5ec09cc844f9a6bdc551c7f6c8ed" -S = "${WORKDIR}/git" - -PACKAGECONFIG ??= "" -PACKAGECONFIG[trust-paths] = "-Dtrust_paths=/etc/ssl/certs/ca-certificates.crt,,,ca-certificates" - -GTKDOC_MESON_OPTION = 'gtk_doc' - -FILES_${PN} += " \ - ${libdir}/p11-kit-proxy.so \ - ${libdir}/pkcs11/*.so \ - ${libdir}/pkcs11/*.la \ - ${systemd_user_unitdir}/*" - -# PN contains p11-kit-proxy.so, a symlink to a loadable module -INSANE_SKIP_${PN} = "dev-so" - -BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-support/p11-kit/p11-kit_0.23.21.bb b/poky/meta/recipes-support/p11-kit/p11-kit_0.23.21.bb new file mode 100644 index 000000000..b1fd2334b --- /dev/null +++ b/poky/meta/recipes-support/p11-kit/p11-kit_0.23.21.bb @@ -0,0 +1,29 @@ +SUMMARY = "Provides a way to load and enumerate PKCS#11 modules" +LICENSE = "BSD-3-Clause" +LIC_FILES_CHKSUM = "file://COPYING;md5=02933887f609807fbb57aa4237d14a50" + +inherit meson gettext pkgconfig gtk-doc bash-completion + +DEPENDS = "libtasn1 libtasn1-native libffi" + +DEPENDS_append = "${@' glib-2.0' if d.getVar('GTKDOC_ENABLED') == 'True' else ''}" + +SRC_URI = "git://github.com/p11-glue/p11-kit" +SRCREV = "fd8b56f3ee971f94dc6fc95411fc01e1c12153ab" +S = "${WORKDIR}/git" + +PACKAGECONFIG ??= "" +PACKAGECONFIG[trust-paths] = "-Dtrust_paths=/etc/ssl/certs/ca-certificates.crt,,,ca-certificates" + +GTKDOC_MESON_OPTION = 'gtk_doc' + +FILES_${PN} += " \ + ${libdir}/p11-kit-proxy.so \ + ${libdir}/pkcs11/*.so \ + ${libdir}/pkcs11/*.la \ + ${systemd_user_unitdir}/*" + +# PN contains p11-kit-proxy.so, a symlink to a loadable module +INSANE_SKIP_${PN} = "dev-so" + +BBCLASSEXTEND = "nativesdk" diff --git a/poky/meta/recipes-support/re2c/re2c_2.0.3.bb b/poky/meta/recipes-support/re2c/re2c_2.0.3.bb new file mode 100644 index 000000000..a0b521ce5 --- /dev/null +++ b/poky/meta/recipes-support/re2c/re2c_2.0.3.bb @@ -0,0 +1,14 @@ +SUMMARY = "Tool for writing very fast and very flexible scanners" +HOMEPAGE = "http://re2c.sourceforge.net/" +AUTHOR = "Marcus Börger " +SECTION = "devel" +LICENSE = "PD" +LIC_FILES_CHKSUM = "file://LICENSE;md5=64eca4d8a3b67f9dc7656094731a2c8d" + +SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.xz" +SRC_URI[sha256sum] = "b2bc1eb8aaaa21ff2fcd26507b7e6e72c5e3d887e58aa515c2155fb17d744278" +UPSTREAM_CHECK_URI = "https://github.com/skvadrik/re2c/releases" + +BBCLASSEXTEND = "native nativesdk" + +inherit autotools diff --git a/poky/meta/recipes-support/re2c/re2c_2.0.bb b/poky/meta/recipes-support/re2c/re2c_2.0.bb deleted file mode 100644 index b73b02407..000000000 --- a/poky/meta/recipes-support/re2c/re2c_2.0.bb +++ /dev/null @@ -1,14 +0,0 @@ -SUMMARY = "Tool for writing very fast and very flexible scanners" -HOMEPAGE = "http://re2c.sourceforge.net/" -AUTHOR = "Marcus Börger " -SECTION = "devel" -LICENSE = "PD" -LIC_FILES_CHKSUM = "file://LICENSE;md5=64eca4d8a3b67f9dc7656094731a2c8d" - -SRC_URI = "https://github.com/skvadrik/re2c/releases/download/${PV}/${BPN}-${PV}.tar.xz" -SRC_URI[sha256sum] = "89a9d7ee14be10e3779ea7b2c8ea4a964afce6e76b8dbcd5479940681db46d20" -UPSTREAM_CHECK_URI = "https://github.com/skvadrik/re2c/releases" - -BBCLASSEXTEND = "native nativesdk" - -inherit autotools diff --git a/poky/scripts/buildhistory-diff b/poky/scripts/buildhistory-diff index 833f7c33a..3bd40a2a1 100755 --- a/poky/scripts/buildhistory-diff +++ b/poky/scripts/buildhistory-diff @@ -28,10 +28,12 @@ def get_args_parser(): %(prog)s [options] [from-revision [to-revision]] (if not specified, from-revision defaults to build-minus-1, and to-revision defaults to HEAD)""") + default_dir = os.path.join(os.environ.get('BUILDDIR', '.'), 'buildhistory') + parser.add_argument('-p', '--buildhistory-dir', action='store', dest='buildhistory_dir', - default='buildhistory/', + default=default_dir, help="Specify path to buildhistory directory (defaults to buildhistory/ under cwd)") parser.add_argument('-v', '--report-version', action='store_true', @@ -80,11 +82,6 @@ def main(): parser.print_help() sys.exit(1) - if not os.path.exists(args.buildhistory_dir): - if args.buildhistory_dir == 'buildhistory/': - cwd = os.getcwd() - if os.path.basename(cwd) == 'buildhistory': - args.buildhistory_dir = cwd if not os.path.exists(args.buildhistory_dir): sys.stderr.write('Buildhistory directory "%s" does not exist\n\n' % args.buildhistory_dir) diff --git a/poky/scripts/lib/devtool/deploy.py b/poky/scripts/lib/devtool/deploy.py index b1749ce67..e5af2c95a 100644 --- a/poky/scripts/lib/devtool/deploy.py +++ b/poky/scripts/lib/devtool/deploy.py @@ -177,13 +177,19 @@ def deploy(args, config, basepath, workspace): rd.getVar('base_libdir'), rd) filelist = [] + inodes = set({}) ftotalsize = 0 for root, _, files in os.walk(recipe_outdir): for fn in files: + fstat = os.lstat(os.path.join(root, fn)) # Get the size in kiB (since we'll be comparing it to the output of du -k) # MUST use lstat() here not stat() or getfilesize() since we don't want to # dereference symlinks - fsize = int(math.ceil(float(os.lstat(os.path.join(root, fn)).st_size)/1024)) + if fstat.st_ino in inodes: + fsize = 0 + else: + fsize = int(math.ceil(float(fstat.st_size)/1024)) + inodes.add(fstat.st_ino) ftotalsize += fsize # The path as it would appear on the target fpath = os.path.join(destdir, os.path.relpath(root, recipe_outdir), fn) diff --git a/poky/scripts/lib/devtool/standard.py b/poky/scripts/lib/devtool/standard.py index bab644b83..d140b97de 100644 --- a/poky/scripts/lib/devtool/standard.py +++ b/poky/scripts/lib/devtool/standard.py @@ -1711,7 +1711,7 @@ def _update_recipe_patch(recipename, workspace, srctree, rd, appendlayerdir, wil def _guess_recipe_update_mode(srctree, rdata): """Guess the recipe update mode to use""" - src_uri = (rdata.getVar('SRC_URI', False) or '').split() + src_uri = (rdata.getVar('SRC_URI') or '').split() git_uris = [uri for uri in src_uri if uri.startswith('git://')] if not git_uris: return 'patch' diff --git a/poky/scripts/lib/wic/ksparser.py b/poky/scripts/lib/wic/ksparser.py index 3453d9cb9..913e3283d 100644 --- a/poky/scripts/lib/wic/ksparser.py +++ b/poky/scripts/lib/wic/ksparser.py @@ -51,11 +51,11 @@ class KickStartParser(ArgumentParser): def error(self, message): raise ArgumentError(None, message) -def sizetype(default): +def sizetype(default, size_in_bytes=False): def f(arg): """ Custom type for ArgumentParser - Converts size string in [K|k|M|G] format into the integer value + Converts size string in [S|s|K|k|M|G] format into the integer value """ try: suffix = default @@ -67,12 +67,20 @@ def sizetype(default): except ValueError: raise ArgumentTypeError("Invalid size: %r" % arg) + + if size_in_bytes: + if suffix == 's' or suffix == 'S': + return size * 512 + mult = 1024 + else: + mult = 1 + if suffix == "k" or suffix == "K": - return size + return size * mult if suffix == "M": - return size * 1024 + return size * mult * 1024 if suffix == "G": - return size * 1024 * 1024 + return size * mult * 1024 * 1024 raise ArgumentTypeError("Invalid size: %r" % arg) return f @@ -141,7 +149,7 @@ class KickStart(): part.add_argument('mountpoint', nargs='?') part.add_argument('--active', action='store_true') part.add_argument('--align', type=int) - part.add_argument('--offset', type=sizetype("K")) + part.add_argument('--offset', type=sizetype("K", True)) part.add_argument('--exclude-path', nargs='+') part.add_argument('--include-path', nargs='+', action='append') part.add_argument('--change-directory') diff --git a/poky/scripts/lib/wic/misc.py b/poky/scripts/lib/wic/misc.py index 91975ba15..4b08d649c 100644 --- a/poky/scripts/lib/wic/misc.py +++ b/poky/scripts/lib/wic/misc.py @@ -138,8 +138,9 @@ def exec_native_cmd(cmd_and_args, native_sysroot, pseudo=""): if pseudo: cmd_and_args = pseudo + cmd_and_args - native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin" % \ - (native_sysroot, native_sysroot, native_sysroot) + native_paths = "%s/sbin:%s/usr/sbin:%s/usr/bin:%s/bin" % \ + (native_sysroot, native_sysroot, + native_sysroot, native_sysroot) native_cmd_and_args = "export PATH=%s:$PATH;%s" % \ (native_paths, cmd_and_args) diff --git a/poky/scripts/lib/wic/plugins/imager/direct.py b/poky/scripts/lib/wic/plugins/imager/direct.py index 2f0199940..55db826e9 100644 --- a/poky/scripts/lib/wic/plugins/imager/direct.py +++ b/poky/scripts/lib/wic/plugins/imager/direct.py @@ -429,14 +429,14 @@ class PartitionedImage(): self.offset += align_sectors if part.offset is not None: - offset = (part.offset * 1024) // self.sector_size + offset = part.offset // self.sector_size - if offset * self.sector_size != part.offset * 1024: - raise WicError("Could not place %s%s at offset %dK with sector size %d" % (part.disk, self.numpart, part.offset, self.sector_size)) + if offset * self.sector_size != part.offset: + raise WicError("Could not place %s%s at offset %d with sector size %d" % (part.disk, self.numpart, part.offset, self.sector_size)) delta = offset - self.offset if delta < 0: - raise WicError("Could not place %s%s at offset %dK: next free sector is %d (delta: %d)" % (part.disk, self.numpart, part.offset, self.offset, delta)) + raise WicError("Could not place %s%s at offset %d: next free sector is %d (delta: %d)" % (part.disk, self.numpart, part.offset, self.offset, delta)) logger.debug("Skipping %d sectors to place %s%s at offset %dK", delta, part.disk, self.numpart, part.offset) diff --git a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py index 14c172357..cdc72543c 100644 --- a/poky/scripts/lib/wic/plugins/source/bootimg-efi.py +++ b/poky/scripts/lib/wic/plugins/source/bootimg-efi.py @@ -212,8 +212,8 @@ class BootimgEFIPlugin(SourcePlugin): except KeyError: raise WicError("bootimg-efi requires a loader, none specified") - if get_bitbake_var("IMAGE_BOOT_FILES") is None: - logger.debug('No boot files defined in IMAGE_BOOT_FILES') + if get_bitbake_var("IMAGE_EFI_BOOT_FILES") is None: + logger.debug('No boot files defined in IMAGE_EFI_BOOT_FILES') else: boot_files = None for (fmt, id) in (("_uuid-%s", part.uuid), ("_label-%s", part.label), (None, None)): @@ -222,7 +222,7 @@ class BootimgEFIPlugin(SourcePlugin): else: var = "" - boot_files = get_bitbake_var("IMAGE_BOOT_FILES" + var) + boot_files = get_bitbake_var("IMAGE_EFI_BOOT_FILES" + var) if boot_files: break @@ -292,7 +292,7 @@ class BootimgEFIPlugin(SourcePlugin): (staging_kernel_dir, kernel, hdddir, kernel) exec_cmd(install_cmd) - if get_bitbake_var("IMAGE_BOOT_FILES"): + if get_bitbake_var("IMAGE_EFI_BOOT_FILES"): for src_path, dst_path in cls.install_task: install_cmd = "install -m 0644 -D %s %s" \ % (os.path.join(kernel_dir, src_path), diff --git a/poky/scripts/oe-publish-sdk b/poky/scripts/oe-publish-sdk index 4b70f436b..deb8ae180 100755 --- a/poky/scripts/oe-publish-sdk +++ b/poky/scripts/oe-publish-sdk @@ -94,7 +94,10 @@ def publish(args): logger.error('Failed to unpack %s to %s' % (dest_sdk, destination)) return ret else: - cmd = "ssh %s 'sh %s -p -y -d %s && rm -f %s'" % (host, dest_sdk, destdir, dest_sdk) + rm_or_not = " && rm -f %s" % dest_sdk + if args.keep_orig: + rm_or_not = "" + cmd = "ssh %s 'sh %s -p -y -d %s%s'" % (host, dest_sdk, destdir, rm_or_not) ret = subprocess.call(cmd, shell=True) if ret == 0: logger.info('Successfully unpacked %s to %s' % (dest_sdk, destdir)) @@ -106,7 +109,7 @@ def publish(args): if not is_remote: cmd = 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo "*.pyc\n*.pyo\npyshtables.py" > .gitignore; fi; git add -A .; git config user.email "oe@oe.oe" && git config user.name "OE" && git commit -q -m "init repo" || true' % (destination, destination) else: - cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc\n*.pyo\npyshtables.py' > .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir) + cmd = "ssh %s 'set -e; mkdir -p %s/layers; cd %s/layers; if [ ! -e .git ]; then git init .; cp .git/hooks/post-update.sample .git/hooks/post-commit; echo '*.pyc' > .gitignore; echo '*.pyo' >> .gitignore; echo 'pyshtables.py' >> .gitignore; fi; git add -A .; git config user.email 'oe@oe.oe' && git config user.name 'OE' && git commit -q -m \"init repo\" || true'" % (host, destdir, destdir) ret = subprocess.call(cmd, shell=True) if ret == 0: logger.info('SDK published successfully') @@ -119,6 +122,7 @@ def main(): parser = argparse_oe.ArgumentParser(description="OpenEmbedded extensible SDK publishing tool - writes server-side data to support the extensible SDK update process to a specified location") parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true') parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true') + parser.add_argument('-k', '--keep-orig', help='When published to a remote host, the eSDK installer gets deleted by default.', action='store_true') parser.add_argument('sdk', help='Extensible SDK to publish (path to .sh installer file)') parser.add_argument('dest', help='Destination to publish SDK to; can be local path or remote in the form of user@host:/path (in the latter case ssh/scp will be used).') diff --git a/poky/scripts/runqemu b/poky/scripts/runqemu index 7fb5f7db5..e62d869c2 100755 --- a/poky/scripts/runqemu +++ b/poky/scripts/runqemu @@ -456,6 +456,10 @@ class BaseConfig(object): if arg in self.fstypes + self.vmtypes + self.wictypes: self.check_arg_fstype(arg) elif arg == 'nographic': + if ('sdl' in sys.argv): + raise RunQemuError('Option nographic makes no sense alongside the sdl option.' % (arg)) + if ('gtk' in sys.argv): + raise RunQemuError('Option nographic makes no sense alongside the gtk option.' % (arg)) self.qemu_opt_script += ' -nographic' self.kernel_cmdline_script += ' console=ttyS0' elif arg == 'sdl': @@ -1515,6 +1519,11 @@ def main(): try: config = BaseConfig() + renice = os.path.expanduser("~/bin/runqemu-renice") + if os.path.exists(renice): + logger.info('Using %s to renice' % renice) + subprocess.check_call([renice, str(os.getpid())]) + def sigterm_handler(signum, frame): logger.info("SIGTERM received") os.kill(config.qemupid, signal.SIGTERM) -- cgit v1.2.3